repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
h2oai/h2o | py/jprobe.py | 8 | 20674 | #!/usr/bin/python
import random, jenkinsapi, getpass, re, os, argparse, shutil, json, logging, sys
import string
from jenkinsapi.jenkins import Jenkins
# only used when we wanted to see what objects were available (below)
# from see import see
DO_LAST_GOOD = True
# using the env variables to force jenkinsapi to use proxy..but after to clear to avoid
# problems in other python stuff that uses requests!
def clear_env():
# need to set environment variables for proxy server if going to sm box
# or clear them if not!
if os.environ.get('HTTPS_PROXY'):
print "removing HTTPS_PROXY os env variable so requests won't use it"
del os.environ['HTTPS_PROXY']
if os.environ.get('HTTP_PROXY'):
print "removing HTTP_PROXY os env variable so requests won't use it"
del os.environ['HTTP_PROXY']
import sys
def my_hook(type, value, traceback):
print 'hooked the exception so we can clear env variables'
clear_env()
print 'Type:', type
print 'Value:', value
print 'Traceback:', traceback
raise Exception
sys.excepthook = my_hook
parse = argparse.ArgumentParser()
group = parse.add_mutually_exclusive_group()
group.add_argument('-e', help="job number from a list of ec2 known jobs", type=int, action='store', default=None)
group.add_argument('-x', help="job number from a list of 164 known jobs", type=int, action='store', default=None)
group.add_argument('-s', help="job number from a list of 174 known jobs", type=int, action='store', default=None)
group.add_argument('-s2', help="job number from a list of 190 known jobs", type=int, action='store', default=None)
group.add_argument('-j', '--jobname', help="jobname. Correct url is found", action='store', default=None)
parse.add_argument('-l', '--logging', help="turn on logging.DEBUG msgs to see allUrls used", action='store_true')
parse.add_argument('-v', '--verbose', help="dump the last N stdout from the failed jobs", action='store_true')
group.add_argument('-c', help="do a hardwired special job copy between jenkins", type=int, action='store', default=None)
args = parse.parse_args()
# can refer to this by zero-based index with -n 0 or -n 1 etc
# or by job name with -j h2o_master_test
allowedJobsX = [
'h2o_master_test',
'h2o_release_tests',
'h2o_release_tests2',
'h2o_release_tests_164',
'h2o_release_tests_c10_only',
'h2o_perf_test',
'h2o_release_Runit',
]
allowedJobsE = [
'h2o.tests.single.jvm',
'h2o.multi.vm.temporary',
'h2o.tests.ec2.multi.jvm',
'h2o.tests.ec2.hosts',
]
allowedJobsS = [
'testdir_0xdata_only_1_of_1',
'testdir_hosts_1_of_1',
'testdir_kevin',
'testdir_multi_jvm_1_of_5',
'testdir_multi_jvm_2_of_5',
'testdir_multi_jvm_3_of_5',
'testdir_single_jvm_1_of_5',
'testdir_single_jvm_2_of_5',
'testdir_single_jvm_3_of_5',
'testdir_single_jvm_4_of_5',
'testdir_single_jvm_5_of_5',
]
allowedJobsS2 = [
'testdir_multi_jvm_1_of_5',
'testdir_multi_jvm_2_of_5',
'testdir_multi_jvm_3_of_5',
'testdir_single_jvm_1_of_5',
'testdir_single_jvm_2_of_5',
'testdir_single_jvm_3_of_5',
'testdir_single_jvm_4_of_5',
'testdir_single_jvm_5_of_5',
]
allUrls = {
'ec2': 'http://test.0xdata.com',
'164': 'http://172.16.2.164:8080',
'184': 'http://172.16.2.184:8080',
}
all164Jobs = ['do all', 'h2o_master_test', 'h2o_master_test2', 'h2o_perf_test', 'h2o_private_json_vers_Runit', 'h2o_release_Runit', 'h2o_release_tests', 'h2o_release_tests2', 'h2o_release_tests_164', 'h2o_release_tests_c10_only', 'h2o_release_tests_cdh4', 'h2o_release_tests_cdh4', 'h2o_release_tests_cdh4_yarn', 'h2o_release_tests_cdh5', 'h2o_release_tests_cdh5_yarn', 'h2o_release_tests_hdp1.3', 'h2o_release_tests_hdp2.0.6', 'h2o_release_tests_mapr', 'selenium12']
allEc2Jobs = ['generic.h2o.build.branch', 'h2o.branch.api-dev', 'h2o.branch.cliffc-drf', 'h2o.branch.hilbert', 'h2o.branch.jobs', 'h2o.branch.jobs1', 'h2o.branch.json_versioning', 'h2o.branch.rel-ito', 'h2o.build', 'h2o.build.api-dev', 'h2o.build.gauss', 'h2o.build.godel', 'h2o.build.h2oscala', 'h2o.build.hilbert', 'h2o.build.jobs', 'h2o.build.master', 'h2o.build.rel-ito', 'h2o.build.rel-ivory', 'h2o.build.rel-iwasawa', 'h2o.build.rel-jacobi', 'h2o.build.rel-jordan', 'h2o.build.rest_api_versioning', 'h2o.build.ux-client', 'h2o.build.va_defaults_renamed', 'h2o.clone', 'h2o.datasets', 'h2o.download.latest', 'h2o.ec2.start', 'h2o.ec2.stop', 'h2o.findbugs', 'h2o.multi.vm.temporary', 'h2o.multi.vm.temporary.cliffc-no-limits', 'h2o.nightly', 'h2o.nightly.1', 'h2o.nightly.cliffc-lock', 'h2o.nightly.ec2', 'h2o.nightly.ec2.cliffc-no-limits', 'h2o.nightly.ec2.erdos', 'h2o.nightly.ec2.hilbert', 'h2o.nightly.ec2.rel-ito', 'h2o.nightly.ec2.rel-jacobi', 'h2o.nightly.ec2.rel-jordan', 'h2o.nightly.fourier', 'h2o.nightly.godel', 'h2o.nightly.multi.vm', 'h2o.nightly.rel-ivory', 'h2o.nightly.rel-iwasawa', 'h2o.nightly.rel-jacobi', 'h2o.nightly.rel-jordan', 'h2o.nightly.va_defaults_renamed', 'h2o.post.push', 'h2o.private.nightly', 'h2o.tests.ec2', 'h2o.tests.ec2.hosts', 'h2o.tests.ec2.multi.jvm', 'h2o.tests.ec2.multi.jvm.fvec', 'h2o.tests.golden', 'h2o.tests.junit', 'h2o.tests.multi.jvm', 'h2o.tests.multi.jvm.fvec', 'h2o.tests.single.jvm', 'h2o.tests.single.jvm.fvec', 'h2o.tests.test']
all184Jobs = [
'testdir_0xdata_only_1_of_1',
'testdir_hosts_1_of_1',
'testdir_kevin',
'testdir_multi_jvm_1_of_5',
'testdir_multi_jvm_2_of_5',
'testdir_multi_jvm_3_of_5',
'testdir_single_jvm_1_of_5',
'testdir_single_jvm_2_of_5',
'testdir_single_jvm_3_of_5',
'testdir_single_jvm_4_of_5',
'testdir_single_jvm_5_of_5',
]
all190Jobs = [
'testdir_multi_jvm_1_of_5',
'testdir_multi_jvm_2_of_5',
'testdir_multi_jvm_3_of_5',
'testdir_single_jvm_1_of_5',
'testdir_single_jvm_2_of_5',
'testdir_single_jvm_3_of_5',
'testdir_single_jvm_4_of_5',
'testdir_single_jvm_5_of_5',
]
# jenkinsapi:
# This library wraps up that interface as more
# conventional python objects in order to make many
# Jenkins oriented tasks easier to automate.
# http://pythonhosted.org//jenkinsapi
# https://pypi.python.org/pypi/jenkinsapi
# Project source code: github: https://github.com/salimfadhley/jenkinsapi
# Project documentation: https://jenkinsapi.readthedocs.org/en/latest/
#************************************************
if args.logging:
logging.basicConfig(level=logging.DEBUG)
if args.jobname and (args.e or args.x or args.s or args.s2):
raise Exception("Don't use both -j and -x or -e or -s args or -s2 args")
# default ec2 0
jobname = None
if args.e is not None:
if args.e<0 or args.e>(len(allowedJobsE)-1):
raise Exception("ec2 job number %s is outside allowed range: 0-%s" % \
(args.e, len(allowedJobsE)-1))
jobname = allowedJobsE[args.e]
if args.x is not None:
if args.x<0 or args.x>(len(allowedJobsX)-1):
raise Exception("0xdata job number %s is outside allowed range: 0-%s" % \
(args.x, len(allowedJobsX)-1))
jobname = allowedJobsX[args.x]
if args.s is not None:
if args.s<0 or args.s>(len(allowedJobsS)-1):
raise Exception("sm job number %s is outside allowed range: 0-%s" % \
(args.s, len(allowedJobsS)-1))
jobname = allowedJobsS[args.s]
if args.s2 is not None:
if args.s2<0 or args.s2>(len(allowedJobsS2)-1):
raise Exception("sm job number %s is outside allowed range: 0-%s" % \
(args.s2, len(allowedJobsS2)-1))
jobname = allowedJobsS2[args.s2]
if args.jobname:
if args.jobname not in allowedJobs:
raise Exception("%s not in list of legal jobs" % args.jobname)
jobname = args.jobname
if not (args.jobname or args.x or args.e or args.s or args.s2):
# prompt the user
subtract = 0
prefix = "-e"
eDone = False
xDone = False
sDone = False
s2Done = False
while not jobname:
allAllowedJobs = allowedJobsE + allowedJobsX + allowedJobsS + allowedJobsS2
for j, job in enumerate(allAllowedJobs):
# first boundary
if not eDone and j==(subtract + len(allowedJobsE)):
subtract += len(allowedJobsE)
eDone = True
prefix = "-x"
# second boundary
if not xDone and j==(subtract + len(allowedJobsX)):
subtract += len(allowedJobsX)
xDone = True
prefix = "-s"
# third boundary
if not sDone and j==(subtract + len(allowedJobsS)):
subtract += len(allowedJobsS)
sDone = True
prefix = "-s2"
# fourth boundary
if not s2Done and j==(subtract + len(allowedJobsS2)):
subtract += len(allowedJobsS2)
prefix = "-??"
s2Done = True
print prefix, j-subtract, " [%s]: %s" % (j, job)
userInput = int(raw_input("Enter number (0 to %s): " % (len(allAllowedJobs)-1) ))
if userInput >=0 and userInput <= len(allAllowedJobs):
jobname = allAllowedJobs[userInput]
# defaults
if jobname in allEc2Jobs:
machine = 'ec2'
elif jobname in all164Jobs:
machine = '164'
elif jobname in all184Jobs:
# we're always building. current build doesn't have stats
DO_LAST_GOOD = True
machine = '184'
# print "Setting up proxy server for sm"
# os.environ['HTTP_PROXY'] = 'http://172.16.0.3:8888'
# os.environ['HTTPS_PROXY'] = 'https://172.16.0.3:8888'
elif jobname in all190Jobs:
DO_LAST_GOOD = True
machine = '190'
else:
raise Exception("%s not in lists of known jobs" % jobname)
if machine not in allUrls:
raise Exception("%s not in allUrls dict" % machine)
jenkins_url = allUrls[machine]
print "machine:", machine
#************************************************
def clean_sandbox(LOG_DIR="sandbox"):
if os.path.exists(LOG_DIR):
shutil.rmtree(LOG_DIR)
# it should have been removed, but on error it might still be there
if not os.path.exists(LOG_DIR):
os.mkdir(LOG_DIR)
return LOG_DIR
#************************************************
# get the username/pswd from files in the user's .ec2 dir (don't want cleartext here)
# prompt if doesn't exist
def login(machine='164'):
def getit(k):
if not os.path.isfile(k):
print "you probably should create this file to avoid typing %s" % k
return None
else:
with open(k) as f:
lines = f.read().splitlines()
return lines[0]
home = os.path.expanduser("~")
username = getit(home + '/.ec2/jenkins_user_' + machine)
pswd = getit(home + '/.ec2/jenkins_pswd_' + machine)
if not username:
username = raw_input("Username [%s]: " % getpass.getuser())
if not pswd:
pswd = getpass.getpass()
return username, pswd
#************************************************8
username, password = login(machine)
LOG_DIR = clean_sandbox("sandbox")
def dump_json(j):
return json.dumps(j, sort_keys=True, indent=2)
#************************************************8
J = Jenkins(jenkins_url, username, password)
print "\nCurrent jobs available at %s" % jenkins_url
print J.keys()
print "\nChecking this job:", J[jobname]
job = J[jobname]
print "\nGetting %s job config" % jobname
print job.get_config
print "\nlast good build:"
lgb = job.get_last_good_build()
print "\nlast good build revision:"
print lgb.get_revision()
from jenkinsapi.api import get_latest_complete_build
from jenkinsapi.api import get_latest_test_results
# print "************************HELLO****************************"
# print get_latest_complete_build(jenkins_url, jobname, username=username, password=password)
# print "************************HELLO****************************"
# get_latest_test_results(jenkinsurl, jobname, username=None, password=None)[source]
# search_artifact_by_regexp.py
if 1==0:
expr = "commands.log"
print("testing search_artifact_by_regexp with expression %s") % expr
from jenkinsapi.api import search_artifact_by_regexp
artifact_regexp = re.compile(expr) # A file name I want.
result = search_artifact_by_regexp(jenkins_url, jobname, artifact_regexp)
print("tested search_artifact_by_regexp", (repr(result)))
# print "last_stable_buildnumber", job.get_last_stable_buildnumber()
print "last_good_buildnumber", job.get_last_good_buildnumber()
# print "last_failed_buildnumber", job.get_last_failed_buildnumber()
print "last_buildnumber", job.get_last_buildnumber()
if DO_LAST_GOOD:
print "Using last_good_buildnumber %s for result set" % job.get_last_good_buildnumber()
build = job.get_build(job.get_last_good_buildnumber())
else:
print "Using last_buildnumber %s for result set" % job.get_last_buildnumber()
build = job.get_build(job.get_last_buildnumber())
# print out info about the job
# print "build:", build
# mjn = build.get_master_job_name()
# print "mjn:", mjn
af = build.get_artifacts()
dict_af = build.get_artifact_dict()
# for looking at object in json
# import h2o_util
# s = h2o_util.json_repr(dict_af, curr_depth=0, max_depth=12)
# print dump_json(s)
buildstatus = build.get_status()
print "build get_status", buildstatus
buildname = build.name
print "build name", buildname
buildnumber = build.get_number()
print "build number", buildnumber
buildrevision = build.get_revision()
print "build revision", buildrevision
buildbranch = build.get_revision_branch()
print "build revision branch", buildbranch
buildduration = build.get_duration()
print "build duration", buildduration
buildupstream = build.get_upstream_job_name()
print "build upstream job name", buildupstream
buildgood = build.is_good()
print "build is_good", buildgood
buildtimestamp = build.get_timestamp()
print "build timestamp", buildtimestamp
consoleTxt = open(LOG_DIR + '/console.txt', "a")
print "getting build console (how to buffer this write?)"
print "probably better to figure how to save it as file"
c = build.get_console()
consoleTxt.write(c)
consoleTxt.close()
print "build has result set", build.has_resultset()
print "build get result set"
rs = build.get_resultset()
print "build result set name", rs.name
# print "build result set items", rs.items()
print #****************************************
# print dump_json(item)
# print "build result set keys", rs.keys()
aTxt = open(LOG_DIR + '/artifacts.txt', "a")
# have just a json string in the result set?
# rs.items is a generator?
#****************************************************************************
PRINTALL = False
# keep count of status counts
# 2014-03-19 07:26:15+00:00
# buildtimestamp is a datetime object
## see(buildtimestamp)
t = buildtimestamp
# hour minute
hm = "%s_%s" % (t.hour, t.minute)
# hour minute second
hms = "%s_%s" % (hm, t.second)
failName = "%s_%s_%s_%s%s" % ("fail", jobname, buildnumber, hm, ".txt")
print "failName:", failName
regressName = "%s_%s_%s_%s%s" % ("regress", jobname, buildnumber, hm, ".txt")
print "regressName:", regressName
fixedName = "%s_%s_%s_%s%s" % ("fixed", jobname, buildnumber, hm, ".txt")
print "fixedName:", fixedName
stats = {}
def fprint (*args):
# emulate printing each as string, then join with spaces
s = ["%s" % a for a in args]
line = " ".join(s)
fTxt.write(line + "\n")
print line
def printStuff():
e1 = "\n******************************************************************************"
e2 = "%s %s %s" % (i, jobname, v)
fprint(e1)
fprint(e2)
# print "\n", k, "\n"
# print "\n", v, "\n"
# to see what you can get
# print see(v)
# print dir(v)
# print vars(v)
# .age .className .duration .errorDetails .errorStackTrace .failedSince
# .identifier() .name .skipped .skippedMessage .status .stderr .stdout
fprint (i, "v.duration", v.duration)
fprint (i, "v.errorStackTrace", v.errorStackTrace)
fprint (i, "v.failedSince", v.failedSince)
if args.verbose:
fprint (i, "v.stderr", v.stderr)
# lines = v.stdout.splitlines()
# keep newlines in the list elements
if not v.stdout:
fprint ("v.stdout is empty")
else:
fprint ("len(v.stdout):", len(v.stdout))
# have to fix the \n and \tat in the strings
stdout = v.stdout
# json string has the actual '\' and 'n' or 'tat' chars
stdout = string.replace(stdout,'\\n', '\n');
stdout = string.replace(stdout,'\\tat', '\t');
# don't need double newlines
stdout = string.replace(stdout,'\n\n', '\n');
lineList = stdout.splitlines()
fprint ("len(lineList):", len(lineList))
num = min(20, len(lineList))
if num!=0:
# print i, "Last %s lineList of stdout %s" % (num, "\n".join(lineList[-num]))
fprint (i, "Last %s lineList of stdout\n" % num)
fprint ("\n".join(lineList[-num:]))
else:
fprint ("v.stdout is empty")
#******************************************************
for i, (k, v) in enumerate(rs.items()):
if v.status in stats:
stats[v.status] += 1
else:
stats[v.status] = 1
# print rs.name
e1 = "\n******************************************************************************"
e2 = "%s %s %s" % (i, jobname, v)
aTxt.write(e1+"\n")
aTxt.write(e2+"\n")
# only if not PASSED
if v.status == 'FAILED':
fTxt = open(LOG_DIR + "/" + failName, "a")
printStuff()
fTxt.close()
if v.status == 'REGRESSION':
fTxt = open(LOG_DIR + "/" + regressName, "a")
printStuff()
fTxt.close()
if v.status == 'FIXED':
fTxt = open(LOG_DIR + "/" + fixedName, "a")
printStuff()
fTxt.close()
if PRINTALL:
fprint (i, "k", k)
fprint (i, "v", v)
fprint (i, "v.errorDetails", v.errorDetails)
fprint (i, "v.age", v.age)
fprint (i, "v.className", v.className)
fprint (i, "v.identifier()", v.identifier())
fprint (i, "v.name", v.name)
fprint (i, "v.skipped", v.age)
fprint (i, "v.skippedMessage", v.skippedMessage)
fprint (i, "v.status", v.status)
fprint (i, "v.stdout", v.stdout)
#****************************************************************************
# print "dict_af", dict_af
if 1==1:
for a in af:
# print "a.keys():", a.keys()
# txt = a.get_data()
e = "%s %s %s %s\n" % ("#", a.filename, a.url, "########### artifact saved ####################")
# print e,
aTxt.write(e+"\n")
# get the h2o output from the runit runs
# a.save_to_dir(LOG_DIR)
consoleTxt.close()
# print txt
# a.save_to_dir('./sandbox')
# print txt[0]
aTxt.close()
print "#***********************************************"
print "Build:", buildname
print buildtimestamp
print "Status:", buildstatus
if buildgood:
print "Build is good"
else:
print "Build is bad"
print "Build number", buildnumber
# print buildrevision
print buildbranch
print "Duration", buildduration
print "Upstream job", buildupstream
print "Test summary"
for s in stats:
print s, stats[s]
# rename the sandbox
dirname = "%s_%s_%s_%s" % ("sandbox", jobname, buildnumber, hm)
if os.path.exists(dirname):
shutil.rmtree(dirname)
os.rename(LOG_DIR, dirname)
print "Results are in", dirname
print "#***********************************************"
clear_env()
# from jenkins.py, we can copy jobs?
# def jobs(self):
# def get_jobs(self):
# def get_jobs_info(self):
# def get_job(self, jobname):
# def has_job(self, jobname):
# def create_job(self, jobname, config_):
# Create a job
# :param jobname: name of new job, str
# :param config: configuration of new job, xml
# :return: new Job obj
# def copy_job(self, jobname, newjobname):
# def build_job(self, jobname, params=None):
# Invoke a build by job name
# :param jobname: name of exist job, str
# :param params: the job params, dict
# :return: none
# def delete_job(self, jobname):
# def rename_job(self, jobname, newjobname):
# load config calls get_config?
# def load_config(self):
# def get_config(self):
# '''Returns the config.xml from the job'''
# def get_config_xml_url(self):
# def update_config(self, config):
# def create(self, job_name, config):
# Create a job
# :param jobname: name of new job, str
# :param config: configuration of new job, xml
# :return: new Job obj
| apache-2.0 |
naritta/numpy | numpy/distutils/tests/test_npy_pkg_config.py | 70 | 3069 | from __future__ import division, absolute_import, print_function
import os
from tempfile import mkstemp
from numpy.testing import *
from numpy.distutils.npy_pkg_config import read_config, parse_flags
simple = """\
[meta]
Name = foo
Description = foo lib
Version = 0.1
[default]
cflags = -I/usr/include
libs = -L/usr/lib
"""
simple_d = {'cflags': '-I/usr/include', 'libflags': '-L/usr/lib',
'version': '0.1', 'name': 'foo'}
simple_variable = """\
[meta]
Name = foo
Description = foo lib
Version = 0.1
[variables]
prefix = /foo/bar
libdir = ${prefix}/lib
includedir = ${prefix}/include
[default]
cflags = -I${includedir}
libs = -L${libdir}
"""
simple_variable_d = {'cflags': '-I/foo/bar/include', 'libflags': '-L/foo/bar/lib',
'version': '0.1', 'name': 'foo'}
class TestLibraryInfo(TestCase):
def test_simple(self):
fd, filename = mkstemp('foo.ini')
try:
pkg = os.path.splitext(filename)[0]
try:
os.write(fd, simple.encode('ascii'))
finally:
os.close(fd)
out = read_config(pkg)
self.assertTrue(out.cflags() == simple_d['cflags'])
self.assertTrue(out.libs() == simple_d['libflags'])
self.assertTrue(out.name == simple_d['name'])
self.assertTrue(out.version == simple_d['version'])
finally:
os.remove(filename)
def test_simple_variable(self):
fd, filename = mkstemp('foo.ini')
try:
pkg = os.path.splitext(filename)[0]
try:
os.write(fd, simple_variable.encode('ascii'))
finally:
os.close(fd)
out = read_config(pkg)
self.assertTrue(out.cflags() == simple_variable_d['cflags'])
self.assertTrue(out.libs() == simple_variable_d['libflags'])
self.assertTrue(out.name == simple_variable_d['name'])
self.assertTrue(out.version == simple_variable_d['version'])
out.vars['prefix'] = '/Users/david'
self.assertTrue(out.cflags() == '-I/Users/david/include')
finally:
os.remove(filename)
class TestParseFlags(TestCase):
def test_simple_cflags(self):
d = parse_flags("-I/usr/include")
self.assertTrue(d['include_dirs'] == ['/usr/include'])
d = parse_flags("-I/usr/include -DFOO")
self.assertTrue(d['include_dirs'] == ['/usr/include'])
self.assertTrue(d['macros'] == ['FOO'])
d = parse_flags("-I /usr/include -DFOO")
self.assertTrue(d['include_dirs'] == ['/usr/include'])
self.assertTrue(d['macros'] == ['FOO'])
def test_simple_lflags(self):
d = parse_flags("-L/usr/lib -lfoo -L/usr/lib -lbar")
self.assertTrue(d['library_dirs'] == ['/usr/lib', '/usr/lib'])
self.assertTrue(d['libraries'] == ['foo', 'bar'])
d = parse_flags("-L /usr/lib -lfoo -L/usr/lib -lbar")
self.assertTrue(d['library_dirs'] == ['/usr/lib', '/usr/lib'])
self.assertTrue(d['libraries'] == ['foo', 'bar'])
| bsd-3-clause |
johnewart/django-ldap-wizard | django_ldap_wizard/views.py | 1 | 2059 | from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext, Template, Context, loader
from django.utils.safestring import mark_safe
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.conf import settings
from django.contrib.auth import authenticate
from django.contrib.auth.models import User, Group
import django.contrib.auth
import ldap
from django.contrib.auth.decorators import login_required
from django.template import RequestContext, Context, loader
from django.db import models
def setup(request):
t = loader.get_template('django_ldap_wizard/setup.html')
successful_connect = False
if request.GET:
try:
uri = request.GET.get("ldap_url", "")
bind_dn = request.GET.get('bind_dn', "")
bind_pw = request.GET.get('bind_pw', "")
base_dn = request.GET.get('base_dn', "")
con = ldap.initialize(uri)
con.simple_bind_s( bind_dn, bind_pw )
message = "Successfully tested your connection settings."
successful_connect = True
except ldap.SERVER_DOWN:
message = "Unable to contact LDAP server -- perhaps you specified the wrong URI or the server is not accepting connections"
except ldap.INVALID_CREDENTIALS:
message = "Unable to authenticate using those credentials. Please double check them!"
except ldap.LDAPError, e:
if type(e.message) == dict and e.message.has_key('desc'):
message = e.message['desc']
else:
message = "Invalid input data, check your settings"
else:
uri = ""
bind_dn = ""
bind_pw = ""
base_dn = ""
message = ""
ctx = {
"uri": uri,
"bind_dn": bind_dn,
"bind_pw": bind_pw,
"base_dn": base_dn,
"message": message,
"success": successful_connect
}
c = RequestContext(request, ctx)
return HttpResponse(t.render(c))
| bsd-3-clause |
PauliusLabanauskis/AlgorithmsDataStructures | algo_pathfinding/main.py | 1 | 1083 | from graph_input import read_graph
def choose_node(reachable, explored):
for node in reachable:
if node not in explored:
return node
def find_path(start_node, goal_node, graph):
reachable = [start_node]
explored = set()
previous = {start_node: None}
while len(reachable) > 0:
cur_node = choose_node(reachable, explored)
if cur_node == goal_node:
return build_path(goal_node, previous)
reachable.remove(cur_node)
explored.add(cur_node)
new_reachable = graph[cur_node] - explored
for adjacent in new_reachable:
if adjacent not in reachable:
previous[adjacent] = cur_node
reachable.append(adjacent)
def build_path(to_node, previous_nodes):
path = []
while to_node != None:
path.append(to_node)
to_node = previous_nodes[to_node]
return path
def main():
graph = read_graph('sample_data/sample_1.txt')
path = find_path('A', 'T', graph)
print path
if __name__ == '__main__':
main() | unlicense |
sgml/popcorn_maker | vendor-local/lib/python/whoosh/lang/porter2.py | 117 | 8314 | """An implementation of the Porter2 stemming algorithm.
See http://snowball.tartarus.org/algorithms/english/stemmer.html
Adapted from pyporter2 by Michael Dirolf.
This algorithm is more correct but (at least in this implementation)
several times slower than the original porter algorithm as implemented
in stemming.porter.
"""
import re
r_exp = re.compile(r"[^aeiouy]*[aeiouy]+[^aeiouy](\w*)")
ewss_exp1 = re.compile(r"^[aeiouy][^aeiouy]$")
ewss_exp2 = re.compile(r".*[^aeiouy][aeiouy][^aeiouywxY]$")
ccy_exp = re.compile(r"([aeiouy])y")
s1a_exp = re.compile(r"[aeiouy].")
s1b_exp = re.compile(r"[aeiouy]")
def get_r1(word):
# exceptional forms
if word.startswith('gener') or word.startswith('arsen'):
return 5
if word.startswith('commun'):
return 6
# normal form
match = r_exp.match(word)
if match:
return match.start(1)
return len(word)
def get_r2(word):
match = r_exp.match(word, get_r1(word))
if match:
return match.start(1)
return len(word)
def ends_with_short_syllable(word):
if len(word) == 2:
if ewss_exp1.match(word):
return True
if ewss_exp2.match(word):
return True
return False
def is_short_word(word):
if ends_with_short_syllable(word):
if get_r1(word) == len(word):
return True
return False
def remove_initial_apostrophe(word):
if word.startswith("'"):
return word[1:]
return word
def capitalize_consonant_ys(word):
if word.startswith('y'):
word = 'Y' + word[1:]
return ccy_exp.sub('\g<1>Y', word)
def step_0(word):
if word.endswith("'s'"):
return word[:-3]
if word.endswith("'s"):
return word[:-2]
if word.endswith("'"):
return word[:-1]
return word
def step_1a(word):
if word.endswith('sses'):
return word[:-4] + 'ss'
if word.endswith('ied') or word.endswith('ies'):
if len(word) > 4:
return word[:-3] + 'i'
else:
return word[:-3] + 'ie'
if word.endswith('us') or word.endswith('ss'):
return word
if word.endswith('s'):
preceding = word[:-1]
if s1a_exp.search(preceding):
return preceding
return word
return word
doubles = ('bb', 'dd', 'ff', 'gg', 'mm', 'nn', 'pp', 'rr', 'tt')
def ends_with_double(word):
for double in doubles:
if word.endswith(double):
return True
return False
def step_1b_helper(word):
if word.endswith('at') or word.endswith('bl') or word.endswith('iz'):
return word + 'e'
if ends_with_double(word):
return word[:-1]
if is_short_word(word):
return word + 'e'
return word
s1b_suffixes = ('ed', 'edly', 'ing', 'ingly')
def step_1b(word, r1):
if word.endswith('eedly'):
if len(word) - 5 >= r1:
return word[:-3]
return word
if word.endswith('eed'):
if len(word) - 3 >= r1:
return word[:-1]
return word
for suffix in s1b_suffixes:
if word.endswith(suffix):
preceding = word[:-len(suffix)]
if s1b_exp.search(preceding):
return step_1b_helper(preceding)
return word
return word
def step_1c(word):
if word.endswith('y') or word.endswith('Y') and len(word) > 1:
if word[-2] not in 'aeiouy':
if len(word) > 2:
return word[:-1] + 'i'
return word
def step_2_helper(word, r1, end, repl, prev):
if word.endswith(end):
if len(word) - len(end) >= r1:
if prev == []:
return word[:-len(end)] + repl
for p in prev:
if word[:-len(end)].endswith(p):
return word[:-len(end)] + repl
return word
return None
s2_triples = (('ization', 'ize', []),
('ational', 'ate', []),
('fulness', 'ful', []),
('ousness', 'ous', []),
('iveness', 'ive', []),
('tional', 'tion', []),
('biliti', 'ble', []),
('lessli', 'less', []),
('entli', 'ent', []),
('ation', 'ate', []),
('alism', 'al', []),
('aliti', 'al', []),
('ousli', 'ous', []),
('iviti', 'ive', []),
('fulli', 'ful', []),
('enci', 'ence', []),
('anci', 'ance', []),
('abli', 'able', []),
('izer', 'ize', []),
('ator', 'ate', []),
('alli', 'al', []),
('bli', 'ble', []),
('ogi', 'og', ['l']),
('li', '', ['c', 'd', 'e', 'g', 'h', 'k', 'm', 'n', 'r', 't']))
def step_2(word, r1):
for trip in s2_triples:
attempt = step_2_helper(word, r1, trip[0], trip[1], trip[2])
if attempt:
return attempt
return word
def step_3_helper(word, r1, r2, end, repl, r2_necessary):
if word.endswith(end):
if len(word) - len(end) >= r1:
if not r2_necessary:
return word[:-len(end)] + repl
else:
if len(word) - len(end) >= r2:
return word[:-len(end)] + repl
return word
return None
s3_triples = (('ational', 'ate', False),
('tional', 'tion', False),
('alize', 'al', False),
('icate', 'ic', False),
('iciti', 'ic', False),
('ative', '', True),
('ical', 'ic', False),
('ness', '', False),
('ful', '', False))
def step_3(word, r1, r2):
for trip in s3_triples:
attempt = step_3_helper(word, r1, r2, trip[0], trip[1], trip[2])
if attempt:
return attempt
return word
s4_delete_list = ('al', 'ance', 'ence', 'er', 'ic', 'able', 'ible', 'ant', 'ement',
'ment', 'ent', 'ism', 'ate', 'iti', 'ous', 'ive', 'ize')
def step_4(word, r2):
for end in s4_delete_list:
if word.endswith(end):
if len(word) - len(end) >= r2:
return word[:-len(end)]
return word
if word.endswith('sion') or word.endswith('tion'):
if len(word) - 3 >= r2:
return word[:-3]
return word
def step_5(word, r1, r2):
if word.endswith('l'):
if len(word) - 1 >= r2 and word[-2] == 'l':
return word[:-1]
return word
if word.endswith('e'):
if len(word) - 1 >= r2:
return word[:-1]
if len(word) - 1 >= r1 and not ends_with_short_syllable(word[:-1]):
return word[:-1]
return word
def normalize_ys(word):
return word.replace('Y', 'y')
exceptional_forms = {'skis': 'ski',
'skies': 'sky',
'dying': 'die',
'lying': 'lie',
'tying': 'tie',
'idly': 'idl',
'gently': 'gentl',
'ugly': 'ugli',
'early': 'earli',
'only': 'onli',
'singly': 'singl',
'sky': 'sky',
'news': 'news',
'howe': 'howe',
'atlas': 'atlas',
'cosmos': 'cosmos',
'bias': 'bias',
'andes': 'andes'}
exceptional_early_exit_post_1a = frozenset(['inning', 'outing', 'canning', 'herring',
'earring', 'proceed', 'exceed', 'succeed'])
def stem(word):
if len(word) <= 2:
return word
word = remove_initial_apostrophe(word)
# handle some exceptional forms
if word in exceptional_forms:
return exceptional_forms[word]
word = capitalize_consonant_ys(word)
r1 = get_r1(word)
r2 = get_r2(word)
word = step_0(word)
word = step_1a(word)
# handle some more exceptional forms
if word in exceptional_early_exit_post_1a:
return word
word = step_1b(word, r1)
word = step_1c(word)
word = step_2(word, r1)
word = step_3(word, r1, r2)
word = step_4(word, r2)
word = step_5(word, r1, r2)
word = normalize_ys(word)
return word
| bsd-3-clause |
lemonad/methodiki | methodiki/methods/forms.py | 1 | 1932 | # -*- coding: utf-8 -*-
from django.forms import ModelForm, TextInput
from django.utils.translation import ugettext_lazy as _
from markitup.widgets import MarkItUpWidget
from taggit.forms import TagWidget
from common.forms import ModelFormRequestUser
from models import Method, MethodBonus
class MethodForm(ModelFormRequestUser):
""" Form for adding and editing methods """
class Meta:
model = Method
fields = ('title', 'description', 'tags', 'editor_comment')
widgets = {
'title': TextInput(attrs={'class': 'span-18 last input'}),
'description': MarkItUpWidget(auto_preview=False,
attrs={'class':
'span-18 last input'}),
'editor_comment': TextInput(attrs={'class':
'span-18 last input'}),
'tags': TagWidget(attrs={'class': 'span-18 last input'}),
}
def __init__(self, request, *args, **kwargs):
super(MethodForm, self).__init__(request, *args, **kwargs)
self.last_edited_by = request.user
def save(self, commit=True):
obj = super(MethodForm, self).save(commit=False)
obj.last_edited_by = self.user
if commit:
obj.save()
self.save_m2m() # Be careful with ModelForms and commit=False
return obj
class MethodBonusForm(ModelFormRequestUser):
""" Form for adding and editing method bonus' """
class Meta:
model = MethodBonus
fields = ('description',)
widgets = {
'description': MarkItUpWidget(auto_preview=True,
attrs={'class':
'span-18 last input'}),
}
def __init__(self, request, *args, **kwargs):
super(MethodBonusForm, self).__init__(request, *args, **kwargs)
| mit |
tersmitten/ansible | lib/ansible/modules/cloud/azure/azure_rm_mariadbconfiguration.py | 12 | 8115 | #!/usr/bin/python
#
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_mariadbconfiguration
version_added: "2.8"
short_description: Manage Configuration instance.
description:
- Create, update and delete instance of Configuration.
options:
resource_group:
description:
- The name of the resource group that contains the resource.
required: True
server_name:
description:
- The name of the server.
required: True
name:
description:
- The name of the server configuration.
required: True
value:
description:
- Value of the configuration.
state:
description:
- Assert the state of the MariaDB configuration. Use C(present) to update setting, or
C(absent) to reset to default value.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
author:
- "Zim Kalinowski (@zikalino)"
- "Matti Ranta (@techknowlogick)"
'''
EXAMPLES = '''
- name: Update SQL Server setting
azure_rm_mariadbconfiguration:
resource_group: myResourceGroup
server_name: myServer
name: event_scheduler
value: "ON"
'''
RETURN = '''
id:
description:
- Resource ID
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/myServer/confi
gurations/event_scheduler"
'''
import time
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller
from azure.mgmt.rdbms.mysql import MariaDBManagementClient
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class Actions:
NoAction, Create, Update, Delete = range(4)
class AzureRMMariaDbConfiguration(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
server_name=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
value=dict(
type='str'
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self.resource_group = None
self.server_name = None
self.name = None
self.value = None
self.results = dict(changed=False)
self.state = None
self.to_do = Actions.NoAction
super(AzureRMMariaDbConfiguration, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=False)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()):
if hasattr(self, key):
setattr(self, key, kwargs[key])
old_response = None
response = None
old_response = self.get_configuration()
if not old_response:
self.log("Configuration instance doesn't exist")
if self.state == 'absent':
self.log("Old instance didn't exist")
else:
self.to_do = Actions.Create
else:
self.log("Configuration instance already exists")
if self.state == 'absent' and old_response['source'] == 'user-override':
self.to_do = Actions.Delete
elif self.state == 'present':
self.log("Need to check if Configuration instance has to be deleted or may be updated")
if self.value != old_response.get('value'):
self.to_do = Actions.Update
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
self.log("Need to Create / Update the Configuration instance")
if self.check_mode:
self.results['changed'] = True
return self.results
response = self.create_update_configuration()
self.results['changed'] = True
self.log("Creation / Update done")
elif self.to_do == Actions.Delete:
self.log("Configuration instance deleted")
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_configuration()
else:
self.log("Configuration instance unchanged")
self.results['changed'] = False
response = old_response
if response:
self.results["id"] = response["id"]
return self.results
def create_update_configuration(self):
self.log("Creating / Updating the Configuration instance {0}".format(self.name))
try:
response = self.mariadb_client.configurations.create_or_update(resource_group_name=self.resource_group,
server_name=self.server_name,
configuration_name=self.name,
value=self.value,
source='user-override')
if isinstance(response, LROPoller):
response = self.get_poller_result(response)
except CloudError as exc:
self.log('Error attempting to create the Configuration instance.')
self.fail("Error creating the Configuration instance: {0}".format(str(exc)))
return response.as_dict()
def delete_configuration(self):
self.log("Deleting the Configuration instance {0}".format(self.name))
try:
response = self.mariadb_client.configurations.create_or_update(resource_group_name=self.resource_group,
server_name=self.server_name,
configuration_name=self.name,
source='system-default')
except CloudError as e:
self.log('Error attempting to delete the Configuration instance.')
self.fail("Error deleting the Configuration instance: {0}".format(str(e)))
return True
def get_configuration(self):
self.log("Checking if the Configuration instance {0} is present".format(self.name))
found = False
try:
response = self.mariadb_client.configurations.get(resource_group_name=self.resource_group,
server_name=self.server_name,
configuration_name=self.name)
found = True
self.log("Response : {0}".format(response))
self.log("Configuration instance : {0} found".format(response.name))
except CloudError as e:
self.log('Did not find the Configuration instance.')
if found is True:
return response.as_dict()
return False
def main():
"""Main execution"""
AzureRMMariaDbConfiguration()
if __name__ == '__main__':
main()
| gpl-3.0 |
axilleas/ansible | lib/ansible/module_utils/cloudstack.py | 118 | 13221 | # -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <[email protected]>
#
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
try:
from cs import CloudStack, CloudStackException, read_config
has_lib_cs = True
except ImportError:
has_lib_cs = False
class AnsibleCloudStack:
def __init__(self, module):
if not has_lib_cs:
module.fail_json(msg="python library cs required: pip install cs")
self.result = {
'changed': False,
}
self.module = module
self._connect()
self.domain = None
self.account = None
self.project = None
self.ip_address = None
self.zone = None
self.vm = None
self.os_type = None
self.hypervisor = None
self.capabilities = None
def _connect(self):
api_key = self.module.params.get('api_key')
api_secret = self.module.params.get('secret_key')
api_url = self.module.params.get('api_url')
api_http_method = self.module.params.get('api_http_method')
api_timeout = self.module.params.get('api_timeout')
if api_key and api_secret and api_url:
self.cs = CloudStack(
endpoint=api_url,
key=api_key,
secret=api_secret,
timeout=api_timeout,
method=api_http_method
)
else:
self.cs = CloudStack(**read_config())
def get_or_fallback(self, key=None, fallback_key=None):
value = self.module.params.get(key)
if not value:
value = self.module.params.get(fallback_key)
return value
# TODO: for backward compatibility only, remove if not used anymore
def _has_changed(self, want_dict, current_dict, only_keys=None):
return self.has_changed(want_dict=want_dict, current_dict=current_dict, only_keys=only_keys)
def has_changed(self, want_dict, current_dict, only_keys=None):
for key, value in want_dict.iteritems():
# Optionally limit by a list of keys
if only_keys and key not in only_keys:
continue;
# Skip None values
if value is None:
continue;
if key in current_dict:
# API returns string for int in some cases, just to make sure
if isinstance(value, int):
current_dict[key] = int(current_dict[key])
elif isinstance(value, str):
current_dict[key] = str(current_dict[key])
# Only need to detect a singe change, not every item
if value != current_dict[key]:
return True
return False
def _get_by_key(self, key=None, my_dict={}):
if key:
if key in my_dict:
return my_dict[key]
self.module.fail_json(msg="Something went wrong: %s not found" % key)
return my_dict
def get_project(self, key=None):
if self.project:
return self._get_by_key(key, self.project)
project = self.module.params.get('project')
if not project:
return None
args = {}
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
projects = self.cs.listProjects(**args)
if projects:
for p in projects['project']:
if project.lower() in [ p['name'].lower(), p['id'] ]:
self.project = p
return self._get_by_key(key, self.project)
self.module.fail_json(msg="project '%s' not found" % project)
def get_ip_address(self, key=None):
if self.ip_address:
return self._get_by_key(key, self.ip_address)
ip_address = self.module.params.get('ip_address')
if not ip_address:
self.module.fail_json(msg="IP address param 'ip_address' is required")
args = {}
args['ipaddress'] = ip_address
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['projectid'] = self.get_project(key='id')
ip_addresses = self.cs.listPublicIpAddresses(**args)
if not ip_addresses:
self.module.fail_json(msg="IP address '%s' not found" % args['ipaddress'])
self.ip_address = ip_addresses['publicipaddress'][0]
return self._get_by_key(key, self.ip_address)
def get_vm(self, key=None):
if self.vm:
return self._get_by_key(key, self.vm)
vm = self.module.params.get('vm')
if not vm:
self.module.fail_json(msg="Virtual machine param 'vm' is required")
args = {}
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['projectid'] = self.get_project(key='id')
args['zoneid'] = self.get_zone(key='id')
vms = self.cs.listVirtualMachines(**args)
if vms:
for v in vms['virtualmachine']:
if vm in [ v['name'], v['displayname'], v['id'] ]:
self.vm = v
return self._get_by_key(key, self.vm)
self.module.fail_json(msg="Virtual machine '%s' not found" % vm)
def get_zone(self, key=None):
if self.zone:
return self._get_by_key(key, self.zone)
zone = self.module.params.get('zone')
zones = self.cs.listZones()
# use the first zone if no zone param given
if not zone:
self.zone = zones['zone'][0]
return self._get_by_key(key, self.zone)
if zones:
for z in zones['zone']:
if zone in [ z['name'], z['id'] ]:
self.zone = z
return self._get_by_key(key, self.zone)
self.module.fail_json(msg="zone '%s' not found" % zone)
def get_os_type(self, key=None):
if self.os_type:
return self._get_by_key(key, self.zone)
os_type = self.module.params.get('os_type')
if not os_type:
return None
os_types = self.cs.listOsTypes()
if os_types:
for o in os_types['ostype']:
if os_type in [ o['description'], o['id'] ]:
self.os_type = o
return self._get_by_key(key, self.os_type)
self.module.fail_json(msg="OS type '%s' not found" % os_type)
def get_hypervisor(self):
if self.hypervisor:
return self.hypervisor
hypervisor = self.module.params.get('hypervisor')
hypervisors = self.cs.listHypervisors()
# use the first hypervisor if no hypervisor param given
if not hypervisor:
self.hypervisor = hypervisors['hypervisor'][0]['name']
return self.hypervisor
for h in hypervisors['hypervisor']:
if hypervisor.lower() == h['name'].lower():
self.hypervisor = h['name']
return self.hypervisor
self.module.fail_json(msg="Hypervisor '%s' not found" % hypervisor)
def get_account(self, key=None):
if self.account:
return self._get_by_key(key, self.account)
account = self.module.params.get('account')
if not account:
return None
domain = self.module.params.get('domain')
if not domain:
self.module.fail_json(msg="Account must be specified with Domain")
args = {}
args['name'] = account
args['domainid'] = self.get_domain(key='id')
args['listall'] = True
accounts = self.cs.listAccounts(**args)
if accounts:
self.account = accounts['account'][0]
return self._get_by_key(key, self.account)
self.module.fail_json(msg="Account '%s' not found" % account)
def get_domain(self, key=None):
if self.domain:
return self._get_by_key(key, self.domain)
domain = self.module.params.get('domain')
if not domain:
return None
args = {}
args['listall'] = True
domains = self.cs.listDomains(**args)
if domains:
for d in domains['domain']:
if d['path'].lower() in [ domain.lower(), "root/" + domain.lower(), "root" + domain.lower() ]:
self.domain = d
return self._get_by_key(key, self.domain)
self.module.fail_json(msg="Domain '%s' not found" % domain)
def get_tags(self, resource=None):
existing_tags = self.cs.listTags(resourceid=resource['id'])
if existing_tags:
return existing_tags['tag']
return []
def _delete_tags(self, resource, resource_type, tags):
existing_tags = resource['tags']
tags_to_delete = []
for existing_tag in existing_tags:
if existing_tag['key'] in tags:
if existing_tag['value'] != tags[key]:
tags_to_delete.append(existing_tag)
else:
tags_to_delete.append(existing_tag)
if tags_to_delete:
self.result['changed'] = True
if not self.module.check_mode:
args = {}
args['resourceids'] = resource['id']
args['resourcetype'] = resource_type
args['tags'] = tags_to_delete
self.cs.deleteTags(**args)
def _create_tags(self, resource, resource_type, tags):
tags_to_create = []
for i, tag_entry in enumerate(tags):
tag = {
'key': tag_entry['key'],
'value': tag_entry['value'],
}
tags_to_create.append(tag)
if tags_to_create:
self.result['changed'] = True
if not self.module.check_mode:
args = {}
args['resourceids'] = resource['id']
args['resourcetype'] = resource_type
args['tags'] = tags_to_create
self.cs.createTags(**args)
def ensure_tags(self, resource, resource_type=None):
if not resource_type or not resource:
self.module.fail_json(msg="Error: Missing resource or resource_type for tags.")
if 'tags' in resource:
tags = self.module.params.get('tags')
if tags is not None:
self._delete_tags(resource, resource_type, tags)
self._create_tags(resource, resource_type, tags)
resource['tags'] = self.get_tags(resource)
return resource
def get_capabilities(self, key=None):
if self.capabilities:
return self._get_by_key(key, self.capabilities)
capabilities = self.cs.listCapabilities()
self.capabilities = capabilities['capability']
return self._get_by_key(key, self.capabilities)
# TODO: for backward compatibility only, remove if not used anymore
def _poll_job(self, job=None, key=None):
return self.poll_job(job=job, key=key)
def poll_job(self, job=None, key=None):
if 'jobid' in job:
while True:
res = self.cs.queryAsyncJobResult(jobid=job['jobid'])
if res['jobstatus'] != 0 and 'jobresult' in res:
if 'errortext' in res['jobresult']:
self.module.fail_json(msg="Failed: '%s'" % res['jobresult']['errortext'])
if key and key in res['jobresult']:
job = res['jobresult'][key]
break
time.sleep(2)
return job
| gpl-3.0 |
anthonyfok/frescobaldi | frescobaldi_app/gadgets/drag.py | 1 | 4442 | # This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2011 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Event-filtering objects and helper functions to drag things.
"""
from PyQt5.QtCore import QEvent, QFileInfo, QMimeData, QObject, Qt, QUrl
from PyQt5.QtGui import QDrag
from PyQt5.QtWidgets import QApplication, QFileIconProvider
class ComboDrag(QObject):
"""Enables dragging from a QComboBox.
Instantiate this with a QComboBox as parent to enable dragging the
current item.
By default, drags a filename got from the current index under the
Qt.EditRole. Change the role by changing the 'role' instance attribute.
"""
column = 0
role = Qt.EditRole
def __init__(self, combobox):
super(ComboDrag, self).__init__(combobox)
self._dragpos = None
combobox.installEventFilter(self)
def eventFilter(self, combobox, ev):
if ev.type() == QEvent.MouseButtonPress and ev.button() == Qt.LeftButton:
self._dragpos = ev.pos()
return not combobox.isEditable()
elif (ev.type() == QEvent.MouseMove and ev.buttons() & Qt.LeftButton
and combobox.count() >0):
return self.mouseMoved(combobox, ev.pos()) or False
elif (ev.type() == QEvent.MouseButtonRelease
and ev.button() == Qt.LeftButton and not combobox.isEditable()):
combobox.mousePressEvent(ev)
return False
def mouseMoved(self, combobox, pos):
if (self._dragpos is not None
and (pos - self._dragpos).manhattanLength()
>= QApplication.startDragDistance()):
self.startDrag(combobox)
return True
def startDrag(self, combobox):
index = combobox.model().index(combobox.currentIndex(), self.column)
filename = combobox.model().data(index, self.role)
icon = combobox.model().data(index, Qt.DecorationRole)
dragFile(combobox, filename, icon, Qt.CopyAction)
class Dragger(QObject):
"""Drags anything from any widget.
Use dragger.installEventFilter(widget) to have it drag.
"""
def __init__(self, parent=None):
super(Dragger, self).__init__(parent)
self._dragpos = None
if parent:
parent.installEventFilter(self)
def eventFilter(self, widget, ev):
if ev.type() == QEvent.MouseButtonPress and ev.button() == Qt.LeftButton:
self._dragpos = ev.pos()
return True
elif ev.type() == QEvent.MouseMove and ev.buttons() & Qt.LeftButton:
return self.mouseMoved(widget, ev.pos()) or False
return False
def mouseMoved(self, widget, pos):
if (self._dragpos is not None
and (pos - self._dragpos).manhattanLength()
>= QApplication.startDragDistance()):
self.startDrag(widget)
return True
def startDrag(self, widget):
"""Reimplement to start a drag."""
class FileDragger(Dragger):
def filename(self):
"""Should return the filename to drag."""
def startDrag(self, widget):
filename = self.filename()
if filename:
dragFile(widget, filename)
def dragFile(widget, filename, icon=None, dropactions=Qt.CopyAction):
"""Starts dragging the given local file from the widget."""
if icon is None or icon.isNull():
icon = QFileIconProvider().icon(QFileInfo(filename))
drag = QDrag(widget)
data = QMimeData()
data.setUrls([QUrl.fromLocalFile(filename)])
drag.setMimeData(data)
drag.setPixmap(icon.pixmap(32))
drag.exec_(dropactions)
| gpl-2.0 |
volkerha/DT211-3-Cloud | euler/e11.py | 1 | 2860 | grid = '08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08 49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00 81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65 52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91 22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80 24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50 32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70 67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21 24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72 21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95 78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92 16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57 86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58 19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40 04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66 88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69 04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36 20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16 20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54 01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48'
def calcGrid():
largest = 0
current = 0
#calc horizontal
for i in range(0, 20):
for j in range(0, 17):
current = (int(grid[(i*20+j)*3])*10 + int(grid[(i*20+j)*3+1])) * (int(grid[(i*20+1+j)*3])*10 + int(grid[(i*20+j+1)*3+1])) * (int(grid[(i*20+2+j)*3])*10 + int(grid[(i*20+j+2)*3+1])) * (int(grid[(i*20+3+j)*3])*10 + int(grid[(i*20+j+3)*3+1]))
if(current > largest):
largest = current
#calc vertical
for j in range(0, 20):
for i in range(0, 17):
current = (int(grid[(i*20+j)*3])*10 + int(grid[(i*20+j)*3+1])) * (int(grid[((i+1)*20+j)*3])*10 + int(grid[((i+1)*20+j)*3+1])) * (int(grid[((i+2)*20+j)*3])*10 + int(grid[((i+2)*20+j)*3+1])) * (int(grid[((i+3)*20+j)*3])*10 + int(grid[((i+3)*20+j)*3+1]))
if(current > largest):
largest = current
#calc vertical down to right
for j in range(0, 17):
for i in range(0, 17):
current = (int(grid[(i*20+j)*3])*10 + int(grid[(i*20+j)*3+1])) * (int(grid[((i+1)*20+j+1)*3])*10 + int(grid[((i+1)*20+j+1)*3+1])) * (int(grid[((i+2)*20+j+2)*3])*10 + int(grid[((i+2)*20+j+2)*3+1])) * (int(grid[((i+3)*20+j+3)*3])*10 + int(grid[((i+3)*20+j+3)*3+1]))
if(current > largest):
largest = current
#calc vertical down to left
for j in range(3, 20):
for i in range(0, 17):
current = (int(grid[(i*20+j)*3])*10 + int(grid[(i*20+j)*3+1])) * (int(grid[((i+1)*20+j-1)*3])*10 + int(grid[((i+1)*20+j-1)*3+1])) * (int(grid[((i+2)*20+j-2)*3])*10 + int(grid[((i+2)*20+j-2)*3+1])) * (int(grid[((i+3)*20+j-3)*3])*10 + int(grid[((i+3)*20+j-3)*3+1]))
if(current > largest):
largest = current
print(largest)
calcGrid() | mit |
milankl/swm | calc/misc/Re_hist_calc_bs_old.py | 1 | 4307 | ## HISTOGRAM COMPUTATIONS FOR REYNOLDS AND ROSSBY NUMBERS
from __future__ import print_function
# path
import os
path = '/home/mkloewer/python/swm/'
#path = os.path.dirname(os.getcwd()) + '/' # on level above
os.chdir(path) # change working directory
import time as tictoc
import numpy as np
from scipy import sparse
# OPTIONS
runfolder = [14]
print('Calculating Reynolds histograms from run ' + str(runfolder))
ReH = []
Re_mean = []
Re_median = []
## read data - calculate each run separately
for r,i in zip(runfolder,range(len(runfolder))):
runpath = path+'stoch/data/run%04i' % r
skip = 5*365
u = np.load(runpath+'/u_sub.npy')[skip:,...]
v = np.load(runpath+'/v_sub.npy')[skip:,...]
eta = np.load(runpath+'/h_sub.npy')[skip:,...]
e = np.load(runpath+'/e_sub.npy')[skip:,...]
t = np.load(runpath+'/t_sub.npy')[skip:,...]
print('run %i read.' % r)
## read param
global param
param = np.load(runpath+'/param.npy').all()
# import functions
exec(open(path+'swm_param.py').read())
exec(open(path+'swm_operators.py').read())
param['output'] = 0
set_grad_mat()
set_interp_mat()
set_coriolis()
tlen = len(t)
## create ouputfolder
try:
os.mkdir(runpath+'/analysis')
except:
pass
## reshape u,v
u = u.reshape((tlen,param['Nu'])).T
v = v.reshape((tlen,param['Nv'])).T
h = eta.reshape((tlen,param['NT'])).T + param['H']
e = e.reshape((tlen,param['NT'])).T
print('Reshape done.')
## COMPUTE REYNOLDS, ROSSBY
u_T = IuT.dot(u)
v_T = IvT.dot(v)
print('u,v interpolation done.')
#advective term
adv_u = u_T*Gux.dot(u) + v_T*IqT.dot(Guy.dot(u))
adv_v = u_T*IqT.dot(Gvx.dot(v)) + v_T*Gvy.dot(v)
del u_T,v_T
adv_term = np.sqrt(adv_u**2 + adv_v**2)
del adv_u, adv_v
print('Advection term done.')
#diffusive term
S = (Gux.dot(u)-Gvy.dot(v),G2vx.dot(v) + G2uy.dot(u))
del u,v
hS = (h*S[0],ITq.dot(h)*S[1])
del S
print('Stress tensor S done.')
diff_u = (GTx.dot(hS[0]) + Gqy.dot(hS[1])) / ITu.dot(h)
diff_v = (Gqx.dot(hS[1]) - GTy.dot(hS[0])) / ITv.dot(h)
print('Harmonic part done.')
# biharmonic stress tensor R = (R11, R12, R12, -R11), store only R11, R12
R = (Gux.dot(diff_u) - Gvy.dot(diff_v), G2vx.dot(diff_v) + G2uy.dot(diff_u))
del diff_u, diff_v
nuhR = (param['nu_B']*h*R[0],param['nu_B']*ITq.dot(h)*R[1])
del R
print('Stress tensor R done.')
bidiff_u = (GTx.dot(nuhR[0]) + Gqy.dot(nuhR[1])) / ITu.dot(h)
bidiff_v = (Gqx.dot(nuhR[1]) - GTy.dot(nuhR[0])) / ITv.dot(h)
del nuhR
print('Biharmonic part done.')
# backscatter
nu_back = -param['c_back']*param['max_dxdy']*np.sqrt(2*e.clip(0,e.max()))
del e
nu_back_hS0 = nu_back*hS[0]
nu_back_hS1 = ITq.dot(nu_back)*hS[1]
print('nu_back calculated.')
del nu_back
back_diff_u = (GTx.dot(nu_back_hS0) + Gqy.dot(nu_back_hS1)) / ITu.dot(h)
back_diff_v = (Gqx.dot(nu_back_hS1) - GTy.dot(nu_back_hS0)) / ITv.dot(h)
del nu_back_hS0,nu_back_hS1
diff_term = np.sqrt(IuT.dot((bidiff_u + back_diff_u)**2) + IvT.dot((bidiff_v + back_diff_v)**2))
#diff_term = np.sqrt(IuT.dot(bidiff_u**2) + IvT.dot(bidiff_v**2))
print('Diff term done.')
del bidiff_u,bidiff_v,back_diff_u,back_diff_v
# actual number
Re = (adv_term / diff_term).flatten()
print('Re computed.')
del adv_term, diff_term
Re_mean.append(Re.mean())
Re_median.append(np.median(Re))
Re = np.log10(Re)
# histogram
Re_min = -3. # in log scale
Re_max = 5.
N = 300
ReH_temp,Re_edges = np.histogram(Re,np.linspace(Re_min,Re_max,N))
print('Re histogram done.')
del Re
# store each run in a list
ReH.append(ReH_temp)
Re_mid = Re_edges[:-1] + np.diff(Re_edges)/2.
ReH = np.array(ReH).sum(axis=0)
Re_mean = np.array(Re_mean).mean()
Re_median = np.median(np.array(Re_median)) #actually median of medians though...
## STORING in last
dic = dict()
all_var2export = ['ReH','Re_mid','Re_edges','Re_mean','Re_median']
for vars in all_var2export:
exec('dic[vars] ='+vars)
np.save(runpath+'/analysis/Re_hist.npy',dic)
| gpl-3.0 |
rodrigolucianocosta/ControleEstoque | rOne/Storage101/django-localflavor/django-localflavor-1.3/tests/test_pl.py | 4 | 22841 | from __future__ import unicode_literals
from django.test import SimpleTestCase
from localflavor.pl.forms import (PLCountySelect, PLNationalIDCardNumberField, PLNIPField, PLPESELField,
PLPostalCodeField, PLProvinceSelect, PLREGONField)
class PLLocalFlavorTests(SimpleTestCase):
def test_PLProvinceSelect(self):
f = PLProvinceSelect()
out = '''<select name="voivodeships">
<option value="lower_silesia">Lower Silesian</option>
<option value="kuyavia-pomerania">Kuyavian-Pomeranian</option>
<option value="lublin">Lublin</option>
<option value="lubusz">Lubusz</option>
<option value="lodz">Lodz</option>
<option value="lesser_poland">Lesser Poland</option>
<option value="masovia">Masovian</option>
<option value="opole">Opole</option>
<option value="subcarpatia">Subcarpathian</option>
<option value="podlasie">Podlasie</option>
<option value="pomerania" selected="selected">Pomeranian</option>
<option value="silesia">Silesian</option>
<option value="swietokrzyskie">Swietokrzyskie</option>
<option value="warmia-masuria">Warmian-Masurian</option>
<option value="greater_poland">Greater Poland</option>
<option value="west_pomerania">West Pomeranian</option>
</select>'''
self.assertHTMLEqual(f.render('voivodeships', 'pomerania'), out)
def test_PLCountrySelect(self):
f = PLCountySelect()
out = '''<select name="administrativeunit">
<option value="wroclaw">Wroc\u0142aw</option>
<option value="jeleniagora">Jelenia G\xf3ra</option>
<option value="legnica">Legnica</option>
<option value="boleslawiecki">boles\u0142awiecki</option>
<option value="dzierzoniowski">dzier\u017coniowski</option>
<option value="glogowski">g\u0142ogowski</option>
<option value="gorowski">g\xf3rowski</option>
<option value="jaworski">jaworski</option>
<option value="jeleniogorski">jeleniog\xf3rski</option>
<option value="kamiennogorski">kamiennog\xf3rski</option>
<option value="klodzki">k\u0142odzki</option>
<option value="legnicki">legnicki</option>
<option value="lubanski">luba\u0144ski</option>
<option value="lubinski">lubi\u0144ski</option>
<option value="lwowecki">lw\xf3wecki</option>
<option value="milicki">milicki</option>
<option value="olesnicki">ole\u015bnicki</option>
<option value="olawski">o\u0142awski</option>
<option value="polkowicki">polkowicki</option>
<option value="strzelinski">strzeli\u0144ski</option>
<option value="sredzki">\u015bredzki</option>
<option value="swidnicki">\u015bwidnicki</option>
<option value="trzebnicki">trzebnicki</option>
<option value="walbrzyski">wa\u0142brzyski</option>
<option value="wolowski">wo\u0142owski</option>
<option value="wroclawski">wroc\u0142awski</option>
<option value="zabkowicki">z\u0105bkowicki</option>
<option value="zgorzelecki">zgorzelecki</option>
<option value="zlotoryjski">z\u0142otoryjski</option>
<option value="bydgoszcz">Bydgoszcz</option>
<option value="torun">Toru\u0144</option>
<option value="wloclawek">W\u0142oc\u0142awek</option>
<option value="grudziadz">Grudzi\u0105dz</option>
<option value="aleksandrowski">aleksandrowski</option>
<option value="brodnicki">brodnicki</option>
<option value="bydgoski">bydgoski</option>
<option value="chelminski">che\u0142mi\u0144ski</option>
<option value="golubsko-dobrzynski">golubsko-dobrzy\u0144ski</option>
<option value="grudziadzki">grudzi\u0105dzki</option>
<option value="inowroclawski">inowroc\u0142awski</option>
<option value="lipnowski">lipnowski</option>
<option value="mogilenski">mogile\u0144ski</option>
<option value="nakielski">nakielski</option>
<option value="radziejowski">radziejowski</option>
<option value="rypinski">rypi\u0144ski</option>
<option value="sepolenski">s\u0119pole\u0144ski</option>
<option value="swiecki">\u015bwiecki</option>
<option value="torunski">toru\u0144ski</option>
<option value="tucholski">tucholski</option>
<option value="wabrzeski">w\u0105brzeski</option>
<option value="wloclawski">w\u0142oc\u0142awski</option>
<option value="zninski">\u017cni\u0144ski</option>
<option value="lublin">Lublin</option>
<option value="biala-podlaska">Bia\u0142a Podlaska</option>
<option value="chelm">Che\u0142m</option>
<option value="zamosc">Zamo\u015b\u0107</option>
<option value="bialski">bialski</option>
<option value="bilgorajski">bi\u0142gorajski</option>
<option value="chelmski">che\u0142mski</option>
<option value="hrubieszowski">hrubieszowski</option>
<option value="janowski">janowski</option>
<option value="krasnostawski">krasnostawski</option>
<option value="krasnicki">kra\u015bnicki</option>
<option value="lubartowski">lubartowski</option>
<option value="lubelski">lubelski</option>
<option value="leczynski">\u0142\u0119czy\u0144ski</option>
<option value="lukowski">\u0142ukowski</option>
<option value="opolski">opolski</option>
<option value="parczewski">parczewski</option>
<option value="pulawski">pu\u0142awski</option>
<option value="radzynski">radzy\u0144ski</option>
<option value="rycki">rycki</option>
<option value="swidnicki">\u015bwidnicki</option>
<option value="tomaszowski">tomaszowski</option>
<option value="wlodawski">w\u0142odawski</option>
<option value="zamojski">zamojski</option>
<option value="gorzow-wielkopolski">Gorz\xf3w Wielkopolski</option>
<option value="zielona-gora">Zielona G\xf3ra</option>
<option value="gorzowski">gorzowski</option>
<option value="krosnienski">kro\u015bnie\u0144ski</option>
<option value="miedzyrzecki">mi\u0119dzyrzecki</option>
<option value="nowosolski">nowosolski</option>
<option value="slubicki">s\u0142ubicki</option>
<option value="strzelecko-drezdenecki">strzelecko-drezdenecki</option>
<option value="sulecinski">sule\u0144ci\u0144ski</option>
<option value="swiebodzinski">\u015bwiebodzi\u0144ski</option>
<option value="wschowski">wschowski</option>
<option value="zielonogorski">zielonog\xf3rski</option>
<option value="zaganski">\u017caga\u0144ski</option>
<option value="zarski">\u017carski</option>
<option value="lodz">\u0141\xf3d\u017a</option>
<option value="piotrkow-trybunalski">Piotrk\xf3w Trybunalski</option>
<option value="skierniewice">Skierniewice</option>
<option value="belchatowski">be\u0142chatowski</option>
<option value="brzezinski">brzezi\u0144ski</option>
<option value="kutnowski">kutnowski</option>
<option value="laski">\u0142aski</option>
<option value="leczycki">\u0142\u0119czycki</option>
<option value="lowicki">\u0142owicki</option>
<option value="lodzki wschodni">\u0142\xf3dzki wschodni</option>
<option value="opoczynski">opoczy\u0144ski</option>
<option value="pabianicki">pabianicki</option>
<option value="pajeczanski">paj\u0119cza\u0144ski</option>
<option value="piotrkowski">piotrkowski</option>
<option value="poddebicki">podd\u0119bicki</option>
<option value="radomszczanski">radomszcza\u0144ski</option>
<option value="rawski">rawski</option>
<option value="sieradzki">sieradzki</option>
<option value="skierniewicki">skierniewicki</option>
<option value="tomaszowski">tomaszowski</option>
<option value="wielunski">wielu\u0144ski</option>
<option value="wieruszowski">wieruszowski</option>
<option value="zdunskowolski">zdu\u0144skowolski</option>
<option value="zgierski">zgierski</option>
<option value="krakow">Krak\xf3w</option>
<option value="tarnow">Tarn\xf3w</option>
<option value="nowy-sacz">Nowy S\u0105cz</option>
<option value="bochenski">boche\u0144ski</option>
<option value="brzeski">brzeski</option>
<option value="chrzanowski">chrzanowski</option>
<option value="dabrowski">d\u0105browski</option>
<option value="gorlicki">gorlicki</option>
<option value="krakowski">krakowski</option>
<option value="limanowski">limanowski</option>
<option value="miechowski">miechowski</option>
<option value="myslenicki">my\u015blenicki</option>
<option value="nowosadecki">nowos\u0105decki</option>
<option value="nowotarski">nowotarski</option>
<option value="olkuski">olkuski</option>
<option value="oswiecimski">o\u015bwi\u0119cimski</option>
<option value="proszowicki">proszowicki</option>
<option value="suski">suski</option>
<option value="tarnowski">tarnowski</option>
<option value="tatrzanski">tatrza\u0144ski</option>
<option value="wadowicki">wadowicki</option>
<option value="wielicki">wielicki</option>
<option value="warszawa">Warszawa</option>
<option value="ostroleka">Ostro\u0142\u0119ka</option>
<option value="plock">P\u0142ock</option>
<option value="radom">Radom</option>
<option value="siedlce">Siedlce</option>
<option value="bialobrzeski">bia\u0142obrzeski</option>
<option value="ciechanowski">ciechanowski</option>
<option value="garwolinski">garwoli\u0144ski</option>
<option value="gostyninski">gostyni\u0144ski</option>
<option value="grodziski">grodziski</option>
<option value="grojecki">gr\xf3jecki</option>
<option value="kozienicki">kozenicki</option>
<option value="legionowski">legionowski</option>
<option value="lipski">lipski</option>
<option value="losicki">\u0142osicki</option>
<option value="makowski">makowski</option>
<option value="minski">mi\u0144ski</option>
<option value="mlawski">m\u0142awski</option>
<option value="nowodworski">nowodworski</option>
<option value="ostrolecki">ostro\u0142\u0119cki</option>
<option value="ostrowski">ostrowski</option>
<option value="otwocki">otwocki</option>
<option value="piaseczynski">piaseczy\u0144ski</option>
<option value="plocki">p\u0142ocki</option>
<option value="plonski">p\u0142o\u0144ski</option>
<option value="pruszkowski">pruszkowski</option>
<option value="przasnyski">przasnyski</option>
<option value="przysuski">przysuski</option>
<option value="pultuski">pu\u0142tuski</option>
<option value="radomski">radomski</option>
<option value="siedlecki">siedlecki</option>
<option value="sierpecki">sierpecki</option>
<option value="sochaczewski">sochaczewski</option>
<option value="sokolowski">soko\u0142owski</option>
<option value="szydlowiecki">szyd\u0142owiecki</option>
<option value="warszawski-zachodni">warszawski zachodni</option>
<option value="wegrowski">w\u0119growski</option>
<option value="wolominski">wo\u0142omi\u0144ski</option>
<option value="wyszkowski">wyszkowski</option>
<option value="zwolenski">zwole\u0144ski</option>
<option value="zurominski">\u017curomi\u0144ski</option>
<option value="zyrardowski">\u017cyrardowski</option>
<option value="opole">Opole</option>
<option value="brzeski">brzeski</option>
<option value="glubczycki">g\u0142ubczyski</option>
<option value="kedzierzynsko-kozielski">k\u0119dzierzy\u0144sko-kozielski</option>
<option value="kluczborski">kluczborski</option>
<option value="krapkowicki">krapkowicki</option>
<option value="namyslowski">namys\u0142owski</option>
<option value="nyski">nyski</option>
<option value="oleski">oleski</option>
<option value="opolski">opolski</option>
<option value="prudnicki">prudnicki</option>
<option value="strzelecki">strzelecki</option>
<option value="rzeszow">Rzesz\xf3w</option>
<option value="krosno">Krosno</option>
<option value="przemysl">Przemy\u015bl</option>
<option value="tarnobrzeg">Tarnobrzeg</option>
<option value="bieszczadzki">bieszczadzki</option>
<option value="brzozowski">brzozowski</option>
<option value="debicki">d\u0119bicki</option>
<option value="jaroslawski">jaros\u0142awski</option>
<option value="jasielski">jasielski</option>
<option value="kolbuszowski">kolbuszowski</option>
<option value="krosnienski">kro\u015bnie\u0144ski</option>
<option value="leski">leski</option>
<option value="lezajski">le\u017cajski</option>
<option value="lubaczowski">lubaczowski</option>
<option value="lancucki">\u0142a\u0144cucki</option>
<option value="mielecki">mielecki</option>
<option value="nizanski">ni\u017ca\u0144ski</option>
<option value="przemyski">przemyski</option>
<option value="przeworski">przeworski</option>
<option value="ropczycko-sedziszowski">ropczycko-s\u0119dziszowski</option>
<option value="rzeszowski">rzeszowski</option>
<option value="sanocki">sanocki</option>
<option value="stalowowolski">stalowowolski</option>
<option value="strzyzowski">strzy\u017cowski</option>
<option value="tarnobrzeski">tarnobrzeski</option>
<option value="bialystok">Bia\u0142ystok</option>
<option value="lomza">\u0141om\u017ca</option>
<option value="suwalki">Suwa\u0142ki</option>
<option value="augustowski">augustowski</option>
<option value="bialostocki">bia\u0142ostocki</option>
<option value="bielski">bielski</option>
<option value="grajewski">grajewski</option>
<option value="hajnowski">hajnowski</option>
<option value="kolnenski">kolne\u0144ski</option>
<option value="\u0142omzynski">\u0142om\u017cy\u0144ski</option>
<option value="moniecki">moniecki</option>
<option value="sejnenski">sejne\u0144ski</option>
<option value="siemiatycki">siematycki</option>
<option value="sokolski">sok\xf3lski</option>
<option value="suwalski">suwalski</option>
<option value="wysokomazowiecki">wysokomazowiecki</option>
<option value="zambrowski">zambrowski</option>
<option value="gdansk">Gda\u0144sk</option>
<option value="gdynia">Gdynia</option>
<option value="slupsk">S\u0142upsk</option>
<option value="sopot">Sopot</option>
<option value="bytowski">bytowski</option>
<option value="chojnicki">chojnicki</option>
<option value="czluchowski">cz\u0142uchowski</option>
<option value="kartuski">kartuski</option>
<option value="koscierski">ko\u015bcierski</option>
<option value="kwidzynski">kwidzy\u0144ski</option>
<option value="leborski">l\u0119borski</option>
<option value="malborski">malborski</option>
<option value="nowodworski">nowodworski</option>
<option value="gdanski">gda\u0144ski</option>
<option value="pucki">pucki</option>
<option value="slupski">s\u0142upski</option>
<option value="starogardzki">starogardzki</option>
<option value="sztumski">sztumski</option>
<option value="tczewski">tczewski</option>
<option value="wejherowski">wejcherowski</option>
<option value="katowice" selected="selected">Katowice</option>
<option value="bielsko-biala">Bielsko-Bia\u0142a</option>
<option value="bytom">Bytom</option>
<option value="chorzow">Chorz\xf3w</option>
<option value="czestochowa">Cz\u0119stochowa</option>
<option value="dabrowa-gornicza">D\u0105browa G\xf3rnicza</option>
<option value="gliwice">Gliwice</option>
<option value="jastrzebie-zdroj">Jastrz\u0119bie Zdr\xf3j</option>
<option value="jaworzno">Jaworzno</option>
<option value="myslowice">Mys\u0142owice</option>
<option value="piekary-slaskie">Piekary \u015al\u0105skie</option>
<option value="ruda-slaska">Ruda \u015al\u0105ska</option>
<option value="rybnik">Rybnik</option>
<option value="siemianowice-slaskie">Siemianowice \u015al\u0105skie</option>
<option value="sosnowiec">Sosnowiec</option>
<option value="swietochlowice">\u015awi\u0119toch\u0142owice</option>
<option value="tychy">Tychy</option>
<option value="zabrze">Zabrze</option>
<option value="zory">\u017bory</option>
<option value="bedzinski">b\u0119dzi\u0144ski</option>
<option value="bielski">bielski</option>
<option value="bierunsko-ledzinski">bieru\u0144sko-l\u0119dzi\u0144ski</option>
<option value="cieszynski">cieszy\u0144ski</option>
<option value="czestochowski">cz\u0119stochowski</option>
<option value="gliwicki">gliwicki</option>
<option value="klobucki">k\u0142obucki</option>
<option value="lubliniecki">lubliniecki</option>
<option value="mikolowski">miko\u0142owski</option>
<option value="myszkowski">myszkowski</option>
<option value="pszczynski">pszczy\u0144ski</option>
<option value="raciborski">raciborski</option>
<option value="rybnicki">rybnicki</option>
<option value="tarnogorski">tarnog\xf3rski</option>
<option value="wodzislawski">wodzis\u0142awski</option>
<option value="zawiercianski">zawiercia\u0144ski</option>
<option value="zywiecki">\u017cywiecki</option>
<option value="kielce">Kielce</option>
<option value="buski">buski</option>
<option value="jedrzejowski">j\u0119drzejowski</option>
<option value="kazimierski">kazimierski</option>
<option value="kielecki">kielecki</option>
<option value="konecki">konecki</option>
<option value="opatowski">opatowski</option>
<option value="ostrowiecki">ostrowiecki</option>
<option value="pinczowski">pi\u0144czowski</option>
<option value="sandomierski">sandomierski</option>
<option value="skarzyski">skar\u017cyski</option>
<option value="starachowicki">starachowicki</option>
<option value="staszowski">staszowski</option>
<option value="wloszczowski">w\u0142oszczowski</option>
<option value="olsztyn">Olsztyn</option>
<option value="elblag">Elbl\u0105g</option>
<option value="bartoszycki">bartoszycki</option>
<option value="braniewski">braniewski</option>
<option value="dzialdowski">dzia\u0142dowski</option>
<option value="elblaski">elbl\u0105ski</option>
<option value="elcki">e\u0142cki</option>
<option value="gizycki">gi\u017cycki</option>
<option value="goldapski">go\u0142dapski</option>
<option value="ilawski">i\u0142awski</option>
<option value="ketrzynski">k\u0119trzy\u0144ski</option>
<option value="lidzbarski">lidzbarski</option>
<option value="mragowski">mr\u0105gowski</option>
<option value="nidzicki">nidzicki</option>
<option value="nowomiejski">nowomiejski</option>
<option value="olecki">olecki</option>
<option value="olsztynski">olszty\u0144ski</option>
<option value="ostrodzki">ostr\xf3dzki</option>
<option value="piski">piski</option>
<option value="szczycienski">szczycie\u0144ski</option>
<option value="wegorzewski">w\u0119gorzewski</option>
<option value="poznan">Pozna\u0144</option>
<option value="kalisz">Kalisz</option>
<option value="konin">Konin</option>
<option value="leszno">Leszno</option>
<option value="chodzieski">chodziejski</option>
<option value="czarnkowsko-trzcianecki">czarnkowsko-trzcianecki</option>
<option value="gnieznienski">gnie\u017anie\u0144ski</option>
<option value="gostynski">gosty\u0144ski</option>
<option value="grodziski">grodziski</option>
<option value="jarocinski">jaroci\u0144ski</option>
<option value="kaliski">kaliski</option>
<option value="kepinski">k\u0119pi\u0144ski</option>
<option value="kolski">kolski</option>
<option value="koninski">koni\u0144ski</option>
<option value="koscianski">ko\u015bcia\u0144ski</option>
<option value="krotoszynski">krotoszy\u0144ski</option>
<option value="leszczynski">leszczy\u0144ski</option>
<option value="miedzychodzki">mi\u0119dzychodzki</option>
<option value="nowotomyski">nowotomyski</option>
<option value="obornicki">obornicki</option>
<option value="ostrowski">ostrowski</option>
<option value="ostrzeszowski">ostrzeszowski</option>
<option value="pilski">pilski</option>
<option value="pleszewski">pleszewski</option>
<option value="poznanski">pozna\u0144ski</option>
<option value="rawicki">rawicki</option>
<option value="slupecki">s\u0142upecki</option>
<option value="szamotulski">szamotulski</option>
<option value="sredzki">\u015bredzki</option>
<option value="sremski">\u015bremski</option>
<option value="turecki">turecki</option>
<option value="wagrowiecki">w\u0105growiecki</option>
<option value="wolsztynski">wolszty\u0144ski</option>
<option value="wrzesinski">wrzesi\u0144ski</option>
<option value="zlotowski">z\u0142otowski</option>
<option value="bialogardzki">bia\u0142ogardzki</option>
<option value="choszczenski">choszcze\u0144ski</option>
<option value="drawski">drawski</option>
<option value="goleniowski">goleniowski</option>
<option value="gryficki">gryficki</option>
<option value="gryfinski">gryfi\u0144ski</option>
<option value="kamienski">kamie\u0144ski</option>
<option value="kolobrzeski">ko\u0142obrzeski</option>
<option value="koszalinski">koszali\u0144ski</option>
<option value="lobeski">\u0142obeski</option>
<option value="mysliborski">my\u015bliborski</option>
<option value="policki">policki</option>
<option value="pyrzycki">pyrzycki</option>
<option value="slawienski">s\u0142awie\u0144ski</option>
<option value="stargardzki">stargardzki</option>
<option value="szczecinecki">szczecinecki</option>
<option value="swidwinski">\u015bwidwi\u0144ski</option>
<option value="walecki">wa\u0142ecki</option>
</select>'''
self.assertHTMLEqual(f.render('administrativeunit', 'katowice'), out)
def test_PLPostalCodeField(self):
error_format = ['Enter a postal code in the format XX-XXX.']
valid = {
'41-403': '41-403',
}
invalid = {
'43--434': error_format,
}
self.assertFieldOutput(PLPostalCodeField, valid, invalid)
def test_PLNIPField(self):
error_format = ['Enter a tax number field (NIP) in the format XXX-XXX-XX-XX, XXX-XX-XX-XXX or XXXXXXXXXX.']
error_checksum = ['Wrong checksum for the Tax Number (NIP).']
valid = {
'646-241-41-24': '6462414124',
'646-24-14-124': '6462414124',
'6462414124': '6462414124',
}
invalid = {
'43-343-234-323': error_format,
'64-62-414-124': error_format,
'646-241-41-23': error_checksum,
}
self.assertFieldOutput(PLNIPField, valid, invalid)
def test_PLPESELField(self):
error_checksum = ['Wrong checksum for the National Identification Number.']
error_format = ['National Identification Number consists of 11 digits.']
valid = {
'80071610614': '80071610614',
}
invalid = {
'80071610610': error_checksum,
'80': error_format,
'800716106AA': error_format,
}
self.assertFieldOutput(PLPESELField, valid, invalid)
def test_PLNationalIDCardNumberField(self):
error_checksum = ['Wrong checksum for the National ID Card Number.']
error_format = ['National ID Card Number consists of 3 letters and 6 digits.']
valid = {
'ABC123458': 'ABC123458',
'abc123458': 'ABC123458',
}
invalid = {
'ABC123457': error_checksum,
'abc123457': error_checksum,
'a12Aaaaaa': error_format,
'AA1234443': error_format,
}
self.assertFieldOutput(PLNationalIDCardNumberField, valid, invalid)
def test_PLREGONField(self):
error_checksum = ['Wrong checksum for the National Business Register Number (REGON).']
error_format = ['National Business Register Number (REGON) consists of 9 or 14 digits.']
valid = {
'12345678512347': '12345678512347',
'590096454': '590096454',
# A special case where the checksum == 10 and the control
# digit == '0'
'391023200': '391023200',
}
invalid = {
'123456784': error_checksum,
'12345678412342': error_checksum,
'590096453': error_checksum,
# A special case where the checksum == 10,
# but the control digit != '0'
'111111111': error_checksum,
'590096': error_format,
}
self.assertFieldOutput(PLREGONField, valid, invalid)
| gpl-3.0 |
Poofjunior/dxf2gcode | postpro/tspoptimisation.py | 1 | 16557 | # -*- coding: utf-8 -*-
############################################################################
#
# Copyright (C) 2008-2015
# Christian Kohlöffel
# Vinzenz Schulz
# Jean-Paul Schouwstra
#
# This file is part of DXF2GCODE.
#
# DXF2GCODE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DXF2GCODE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DXF2GCODE. If not, see <http://www.gnu.org/licenses/>.
#
############################################################################
from __future__ import absolute_import
from __future__ import division
from random import random, shuffle
from math import floor, ceil
import globals.globals as g
from globals.six import text_type
import globals.constants as c
if c.PYQT5notPYQT4:
from PyQt5 import QtCore
else:
from PyQt4 import QtCore
import logging
logger = logging.getLogger("PostPro.TSP")
class TspOptimization(object):
"""
Optimization using the Travelling Salesman Problem (TSP) algorithim
"""
def __init__(self, st_end_points, order):
self.shape_nrs = len(st_end_points)
self.iterations = int(self.shape_nrs) * 10
self.pop_nr = min(int(ceil(self.shape_nrs / 8.0) * 8.0),
g.config.vars.Route_Optimisation['max_population'])
self.mutate_rate = g.config.vars.Route_Optimisation['mutation_rate']
self.opt_route = []
self.order = order
self.st_end_points = st_end_points
# Generate the Distance Matrix
self.DistanceMatrix = DistanceMatrixClass()
self.DistanceMatrix.generate_matrix(st_end_points)
# Generation Population
self.Population = PopulationClass([self.shape_nrs, self.pop_nr],
self.DistanceMatrix.matrix,
self.mutate_rate)
# Initialise the Result Class
self.Fittness = FittnessClass(self.Population,
list(range(self.Population.size[1])),
self.order)
self.Fittness.calc_st_fittness(self.DistanceMatrix.matrix,
range(self.shape_nrs))
# Anfang der Reihenfolge immer auf den letzen Punkt legen
# Beginning of the sequence always put the last point ???
self.Fittness.set_startpoint()
# Function to correct the order of the elements
self.Fittness.correct_constrain_order()
# logger.debug('Calculation of start fitness TSP: %s' %self)
# logger.debug('Size Distance matrix: %s', len(self.DistanceMatrix.matrix))
# Erstellen der ersten Ergebnisse
# Create the first result
self.Fittness.calc_cur_fittness(self.DistanceMatrix.matrix)
self.Fittness.select_best_fittness()
self.opt_route = self.Population.pop[self.Fittness.best_route]
# ERstellen der 2 opt Optimierungs Klasse
# Create the 2 opt optimization class ???
# self.optmove=ClassOptMove(dmatrix=self.DistanceMatrix.matrix, nei_nr=int(round(self.shape_nrs/10)))
def calc_next_iteration(self):
"""
calc_next_iteration()
"""
# Algorithmus ausfürhen
self.Population.genetic_algorithm(self.Fittness, self.mutate_rate)
# Für die Anzahl der Tours die Tours nach dem 2-opt Verfahren optimieren
# Optimise the number of Tours de Tours to the 2-opt method ???
# for pop_nr in range(len(self.Population.pop)):
# #print ("Vorher: %0.2f" %self.calc_tour_length(tours[tour_nr]))
# self.Population.pop[pop_nr]=self.optmove.do2optmove(self.Population.pop[pop_nr])
# #print ("Nachher: %0.2f" %self.calc_tour_length(tours[tour_nr]))
# Anfang der Reihenfolge immer auf den letzen Punkt legen
# Always put the last point at the beginning of the sequence
self.Fittness.set_startpoint()
# Korrektur Funktion um die Reihenfolge der Elemente zu korrigieren
# Function to correct the order of the elements
self.Fittness.correct_constrain_order()
# Fittness der jeweiligen Routen ausrechen
# Calculate fitness of each route
self.Fittness.calc_cur_fittness(self.DistanceMatrix.matrix)
# Straffunktion falls die Route nicht der gewünschten Reihenfolge entspricht
# Function if the route is not the desired sequence ???
# Best route to choose
self.Fittness.select_best_fittness()
self.opt_route = self.Population.pop[self.Fittness.best_route]
# logger.debug('Calculation next iteration of TSP: %s' %self)
def __str__(self):
#res = self.Population.pop
return "Iteration nrs: %i" % (self.iterations * 10) +\
"\nShape nrs: %i" % self.shape_nrs +\
"\nPopulation: %i" % self.pop_nr +\
"\nMutate rate: %0.2f" % self.mutate_rate +\
"\norder: %s" % self.order +\
"\nStart length: %0.1f" % self.Fittness.best_fittness[0] +\
"\nOpt. length: %0.1f" % self.Fittness.best_fittness[-1] +\
"\nOpt. route: %s" % self.opt_route
class PopulationClass:
def __init__(self, size, dmatrix, mutate_rate):
self.size = size
self.mutate_rate = mutate_rate
self.pop = []
self.rot = []
# logger.debug('The Population size is: %s' %self.size)
for pop_nr in range(self.size[1]):
# logger.debug("======= TSP initializing population nr %i =======" % pop_nr)
if g.config.vars.Route_Optimisation['begin_art'] == 'ordered':
self.pop.append(list(range(size[0])))
elif g.config.vars.Route_Optimisation['begin_art'] == 'random':
self.pop.append(self.random_begin(size[0]))
elif g.config.vars.Route_Optimisation['begin_art'] == 'heuristic':
self.pop.append(self.heuristic_begin(dmatrix[:]))
else:
logger.error(self.tr('Wrong begin art of TSP chosen'))
for rot_nr in range(size[0]):
self.rot.append(0)
def __str__(self):
string = "\nPopulation size: %i X %i \nMutate rate: %0.2f \nRotation Matrix:\n%s \nPop Matrix:"\
% (self.size[0], self.size[1], self.mutate_rate, self.rot)
for line in self.pop:
string += '\n' + str(line)
return string
def tr(self, string_to_translate):
"""
Translate a string using the QCoreApplication translation framework
@param: string_to_translate: a unicode string
@return: the translated unicode string if it was possible to translate
"""
return text_type(QtCore.QCoreApplication.translate("PopulationClass",
string_to_translate))
def random_begin(self, size):
"""
random_begin for TSP
"""
tour = list(range(size))
shuffle(tour)
return tour
def heuristic_begin(self, dmatrix):
"""
heuristic_begin for TSP
"""
tour = []
possibilities = list(range(len(dmatrix[0])))
start_nr = int(floor(random()*len(dmatrix[0])))
# Hinzufügen der Nr und entfernen aus possibilies
# Add and remove the number of possibilities
tour.append(start_nr)
possibilities.pop(possibilities.index(tour[-1]))
counter = 0
while len(possibilities):
counter += 1
tour.append(self.heuristic_find_next(tour[-1], possibilities, dmatrix))
possibilities.pop(possibilities.index(tour[-1]))
# if counter % 10 == 0:
# logger.debug("TSP heuristic searching nr %i" % counter)
return tour
def heuristic_find_next(self, start, possibilities, dmatrix):
"""
heuristic_find_next() for TSP
"""
# Auswahl der Entfernungen des nächsten Punkts
# The distances of the point selection
min_dist = 1e99
darray = dmatrix[start]
for pnr in possibilities:
if darray[pnr] < min_dist:
min_point = pnr
min_dist = darray[pnr]
return min_point
def genetic_algorithm(self, Result, mutate_rate):
"""
genetic_algorithm for TSP
"""
self.mutate_rate = mutate_rate
# Neue Population Matrix erstellen
# Create new Population Matrix
new_pop = []
for p_nr in range(self.size[1]):
new_pop.append([])
# Tournament Selection 1 between Parents (2 Parents remaining)
ts_r1 = list(range(self.size[1]))
shuffle(ts_r1)
winners_r1 = []
tmp_fittness = []
for nr in range(self.size[1] // 2):
if Result.cur_fittness[ts_r1[nr * 2]] < Result.cur_fittness[ts_r1[(nr * 2) + 1]]:
winners_r1.append(self.pop[ts_r1[nr * 2]])
tmp_fittness.append(Result.cur_fittness[ts_r1[nr * 2]])
else:
winners_r1.append(self.pop[ts_r1[(nr * 2) + 1]])
tmp_fittness.append(Result.cur_fittness[ts_r1[(nr * 2) + 1]])
# print(tmp_fittness)
# Tournament Selection 2 only one Parent remaining
ts_r2 = list(range(self.size[1] // 2))
shuffle(ts_r2)
for nr in range(self.size[1] // 4):
if tmp_fittness[ts_r2[nr * 2]] < tmp_fittness[ts_r2[(nr * 2) + 1]]:
winner = winners_r1[ts_r2[nr * 2]]
else:
winner = winners_r1[ts_r2[(nr * 2) + 1]]
# Schreiben der Gewinner in die neue Population Matrix
# print(winner)
for pnr in range(2):
new_pop[pnr * self.size[1] // 2 + nr] = winner[:]
# Crossover Gens from 2 Parents
crossover = list(range(self.size[1] // 2))
shuffle(crossover)
for nr in range(self.size[1] // 4):
# child = parent2
# Parents are the winners of the first round (Genetic Selection?)
parent1 = winners_r1[crossover[nr * 2]][:]
child = winners_r1[crossover[(nr * 2) + 1]][:]
# The genetic line that is exchanged in the child parent1
indx = [int(floor(random()*self.size[0])), int(floor(random()*self.size[0]))]
indx.sort()
while indx[0] == indx[1]:
indx = [int(floor(random()*self.size[0])), int(floor(random()*self.size[0]))]
indx.sort()
gens = parent1[indx[0]:indx[1] + 1]
# Remove the exchanged genes
for gen in gens:
child.pop(child.index(gen))
# Insert the new genes at a random position
ins_indx = int(floor(random()*self.size[0]))
new_children = child[0:ins_indx] + gens + child[ins_indx:len(child)]
# Write the new children in the new population matrix
for pnr in range(2):
new_pop[int((pnr + 0.5) * self.size[1] / 2 + nr)] = new_children[:]
# Mutate the 2nd half of the population matrix
mutate = list(range(self.size[1] // 2))
shuffle(mutate)
num_mutations = int(round(mutate_rate * self.size[1] / 2))
for nr in range(num_mutations):
# The genetic line that is exchanged in the child parent1 ???
indx = [int(floor(random()*self.size[0])), int(floor(random()*self.size[0]))]
indx.sort()
while indx[0] == indx[1]:
indx = [int(floor(random()*self.size[0])), int(floor(random()*self.size[0]))]
indx.sort()
# Zu mutierende Line
# Line to be mutated ????
mutline = new_pop[self.size[1] // 2 + mutate[nr]]
if random() < 0.75: # Gen Abschnitt umdrehen / Turn gene segment
cut = mutline[indx[0]:indx[1] + 1]
cut.reverse()
mutline = mutline[0:indx[0]] + cut + mutline[indx[1] + 1:len(mutline)]
else: # 2 Gene tauschen / 2 Gene exchange
orgline = mutline[:]
mutline[indx[0]] = orgline[indx[1]]
mutline[indx[1]] = orgline[indx[0]]
new_pop[self.size[1] // 2 + mutate[nr]] = mutline
# Assign the new population matrix
self.pop = new_pop
class DistanceMatrixClass:
"""
DistanceMatrixClass
"""
def __init__(self):
self.matrix = []
self.size = [0, 0]
def __str__(self):
string = ("Distance Matrix; size: %i X %i" % (self.size[0], self.size[1]))
for line_x in self.matrix:
string += "\n"
for x_vals in line_x:
string += "%8.2f" % x_vals
return string
def generate_matrix(self, st_end_points):
self.matrix = [[st_end_y[1].distance(st_end_x[0]) for st_end_x in st_end_points]
for st_end_y in st_end_points]
self.size = [len(st_end_points), len(st_end_points)]
class FittnessClass:
def __init__(self, population, cur_fittness, order):
self.population = population
self.cur_fittness = cur_fittness
self.order = order
self.best_fittness = []
self.best_route = []
def __str__(self):
return "\nBest Fittness: %s \nBest Route: %s \nBest Pop: %s"\
% (self.best_fittness[-1], self.best_route, self.population.pop[self.best_route])
def calc_st_fittness(self, matrix, st_pop):
dis = matrix[st_pop[-1]][st_pop[0]]
for nr in range(1, len(st_pop)):
dis += matrix[st_pop[nr - 1]][st_pop[nr]]
self.best_fittness.append(dis)
def calc_cur_fittness(self, matrix):
# logger.debug("Calculating current fittness len(self.population.pop): %s"
# % len(self.population.pop))
# logger.debug("Length of self.cur_fittness: %s" %(len(self.cur_fittness)))
for pop_nr in range(len(self.population.pop)):
pop = self.population.pop[pop_nr]
# logger.debug("pop_nr: %s" %pop_nr)
dis = matrix[pop[-1]][pop[0]]
for nr in range(1, len(pop)):
dis += matrix[pop[nr - 1]][pop[nr]]
self.cur_fittness[pop_nr] = dis
# 2te Möglichkeit die Reihenfolge festzulegen (Korrekturfunktion=Aktiv)
# Second option set the order (correction function = Active)
def correct_constrain_order(self):
"""FIXME: in order to change the correction to have all ordered shapes
in begin this might be the best place to change it. Maybe we can also have
an additional option in the config file?"""
for pop in self.population.pop:
# Search the current order
order_index = self.get_pop_index_list(pop)
# Momentane Reihenfolge der indexe sortieren
# Current sort order of the index ???
order_index.sort()
# Indices according to correct order
for ind_nr in range(len(order_index)):
pop[order_index[ind_nr]] = self.order[ind_nr]
def set_startpoint(self):
n_pts = len(self.population.pop[-1])
for pop in self.population.pop:
st_pt_nr = pop.index(n_pts - 1)
# Contour with the starting point at the beginning
pop[:] = pop[st_pt_nr:n_pts] + pop[0:st_pt_nr]
def get_pop_index_list(self, pop):
return [pop.index(order) for order in self.order]
def select_best_fittness(self):
self.best_fittness.append(min(self.cur_fittness))
self.best_route = self.cur_fittness.index(self.best_fittness[-1])
| gpl-3.0 |
mujiansu/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Tools/versioncheck/pyversioncheck.py | 98 | 4051 | """pyversioncheck - Module to help with checking versions"""
import types
import rfc822
import urllib
import sys
# Verbose options
VERBOSE_SILENT=0 # Single-line reports per package
VERBOSE_NORMAL=1 # Single-line reports per package, more info if outdated
VERBOSE_EACHFILE=2 # Report on each URL checked
VERBOSE_CHECKALL=3 # Check each URL for each package
# Test directory
## urllib bug: _TESTDIR="ftp://ftp.cwi.nl/pub/jack/python/versiontestdir/"
_TESTDIR="http://www.cwi.nl/~jack/versiontestdir/"
def versioncheck(package, url, version, verbose=0):
ok, newversion, fp = checkonly(package, url, version, verbose)
if verbose > VERBOSE_NORMAL:
return ok
if ok < 0:
print '%s: No correctly formatted current version file found'%(package)
elif ok == 1:
print '%s: up-to-date (version %s)'%(package, version)
else:
print '%s: version %s installed, version %s found:' % \
(package, version, newversion)
if verbose > VERBOSE_SILENT:
while 1:
line = fp.readline()
if not line: break
sys.stdout.write('\t'+line)
return ok
def checkonly(package, url, version, verbose=0):
if verbose >= VERBOSE_EACHFILE:
print '%s:'%package
if type(url) == types.StringType:
ok, newversion, fp = _check1version(package, url, version, verbose)
else:
for u in url:
ok, newversion, fp = _check1version(package, u, version, verbose)
if ok >= 0 and verbose < VERBOSE_CHECKALL:
break
return ok, newversion, fp
def _check1version(package, url, version, verbose=0):
if verbose >= VERBOSE_EACHFILE:
print ' Checking %s'%url
try:
fp = urllib.urlopen(url)
except IOError, arg:
if verbose >= VERBOSE_EACHFILE:
print ' Cannot open:', arg
return -1, None, None
msg = rfc822.Message(fp, seekable=0)
newversion = msg.getheader('current-version')
if not newversion:
if verbose >= VERBOSE_EACHFILE:
print ' No "Current-Version:" header in URL or URL not found'
return -1, None, None
version = version.lower().strip()
newversion = newversion.lower().strip()
if version == newversion:
if verbose >= VERBOSE_EACHFILE:
print ' Version identical (%s)'%newversion
return 1, version, fp
else:
if verbose >= VERBOSE_EACHFILE:
print ' Versions different (installed: %s, new: %s)'% \
(version, newversion)
return 0, newversion, fp
def _test():
print '--- TEST VERBOSE=1'
print '--- Testing existing and identical version file'
versioncheck('VersionTestPackage', _TESTDIR+'Version10.txt', '1.0', verbose=1)
print '--- Testing existing package with new version'
versioncheck('VersionTestPackage', _TESTDIR+'Version11.txt', '1.0', verbose=1)
print '--- Testing package with non-existing version file'
versioncheck('VersionTestPackage', _TESTDIR+'nonexistent.txt', '1.0', verbose=1)
print '--- Test package with 2 locations, first non-existing second ok'
versfiles = [_TESTDIR+'nonexistent.txt', _TESTDIR+'Version10.txt']
versioncheck('VersionTestPackage', versfiles, '1.0', verbose=1)
print '--- TEST VERBOSE=2'
print '--- Testing existing and identical version file'
versioncheck('VersionTestPackage', _TESTDIR+'Version10.txt', '1.0', verbose=2)
print '--- Testing existing package with new version'
versioncheck('VersionTestPackage', _TESTDIR+'Version11.txt', '1.0', verbose=2)
print '--- Testing package with non-existing version file'
versioncheck('VersionTestPackage', _TESTDIR+'nonexistent.txt', '1.0', verbose=2)
print '--- Test package with 2 locations, first non-existing second ok'
versfiles = [_TESTDIR+'nonexistent.txt', _TESTDIR+'Version10.txt']
versioncheck('VersionTestPackage', versfiles, '1.0', verbose=2)
if __name__ == '__main__':
_test()
| apache-2.0 |
iHateWEBos/shooter_kernel_34 | scripts/gcc-wrapper.py | 501 | 3410 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Code Aurora nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Invoke gcc, looking for warnings, and causing a failure if there are
# non-whitelisted warnings.
import errno
import re
import os
import sys
import subprocess
# Note that gcc uses unicode, which may depend on the locale. TODO:
# force LANG to be set to en_US.UTF-8 to get consistent warnings.
allowed_warnings = set([
"alignment.c:327",
"mmu.c:602",
"return_address.c:62",
])
# Capture the name of the object file, can find it.
ofile = None
warning_re = re.compile(r'''(.*/|)([^/]+\.[a-z]+:\d+):(\d+:)? warning:''')
def interpret_warning(line):
"""Decode the message from gcc. The messages we care about have a filename, and a warning"""
line = line.rstrip('\n')
m = warning_re.match(line)
if m and m.group(2) not in allowed_warnings:
print "error, forbidden warning:", m.group(2)
# If there is a warning, remove any object if it exists.
if ofile:
try:
os.remove(ofile)
except OSError:
pass
sys.exit(1)
def run_gcc():
args = sys.argv[1:]
# Look for -o
try:
i = args.index('-o')
global ofile
ofile = args[i+1]
except (ValueError, IndexError):
pass
compiler = sys.argv[0]
try:
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
for line in proc.stderr:
print line,
interpret_warning(line)
result = proc.wait()
except OSError as e:
result = e.errno
if result == errno.ENOENT:
print args[0] + ':',e.strerror
print 'Is your PATH set correctly?'
else:
print ' '.join(args), str(e)
return result
if __name__ == '__main__':
status = run_gcc()
sys.exit(status)
| gpl-2.0 |
yigitguler/django | tests/forms_tests/tests/test_util.py | 12 | 4366 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import copy
from django.core.exceptions import ValidationError
from django.forms.utils import flatatt, ErrorDict, ErrorList
from django.test import TestCase
from django.utils.safestring import mark_safe
from django.utils import six
from django.utils.translation import ugettext_lazy
from django.utils.encoding import python_2_unicode_compatible
class FormsUtilTestCase(TestCase):
# Tests for forms/utils.py module.
def test_flatatt(self):
###########
# flatatt #
###########
self.assertEqual(flatatt({'id': "header"}), ' id="header"')
self.assertEqual(flatatt({'class': "news", 'title': "Read this"}), ' class="news" title="Read this"')
self.assertEqual(flatatt({'class': "news", 'title': "Read this", 'required': "required"}), ' class="news" required="required" title="Read this"')
self.assertEqual(flatatt({'class': "news", 'title': "Read this", 'required': True}), ' class="news" title="Read this" required')
self.assertEqual(flatatt({'class': "news", 'title': "Read this", 'required': False}), ' class="news" title="Read this"')
self.assertEqual(flatatt({}), '')
def test_validation_error(self):
###################
# ValidationError #
###################
# Can take a string.
self.assertHTMLEqual(str(ErrorList(ValidationError("There was an error.").messages)),
'<ul class="errorlist"><li>There was an error.</li></ul>')
# Can take a unicode string.
self.assertHTMLEqual(six.text_type(ErrorList(ValidationError("Not \u03C0.").messages)),
'<ul class="errorlist"><li>Not π.</li></ul>')
# Can take a lazy string.
self.assertHTMLEqual(str(ErrorList(ValidationError(ugettext_lazy("Error.")).messages)),
'<ul class="errorlist"><li>Error.</li></ul>')
# Can take a list.
self.assertHTMLEqual(str(ErrorList(ValidationError(["Error one.", "Error two."]).messages)),
'<ul class="errorlist"><li>Error one.</li><li>Error two.</li></ul>')
# Can take a mixture in a list.
self.assertHTMLEqual(str(ErrorList(ValidationError(["First error.", "Not \u03C0.", ugettext_lazy("Error.")]).messages)),
'<ul class="errorlist"><li>First error.</li><li>Not π.</li><li>Error.</li></ul>')
@python_2_unicode_compatible
class VeryBadError:
def __str__(self):
return "A very bad error."
# Can take a non-string.
self.assertHTMLEqual(str(ErrorList(ValidationError(VeryBadError()).messages)),
'<ul class="errorlist"><li>A very bad error.</li></ul>')
# Escapes non-safe input but not input marked safe.
example = 'Example of link: <a href="http://www.example.com/">example</a>'
self.assertHTMLEqual(str(ErrorList([example])),
'<ul class="errorlist"><li>Example of link: <a href="http://www.example.com/">example</a></li></ul>')
self.assertHTMLEqual(str(ErrorList([mark_safe(example)])),
'<ul class="errorlist"><li>Example of link: <a href="http://www.example.com/">example</a></li></ul>')
self.assertHTMLEqual(str(ErrorDict({'name': example})),
'<ul class="errorlist"><li>nameExample of link: <a href="http://www.example.com/">example</a></li></ul>')
self.assertHTMLEqual(str(ErrorDict({'name': mark_safe(example)})),
'<ul class="errorlist"><li>nameExample of link: <a href="http://www.example.com/">example</a></li></ul>')
def test_error_dict_copy(self):
e = ErrorDict()
e['__all__'] = ErrorList([
ValidationError(
message='message %(i)s',
params={'i': 1},
),
ValidationError(
message='message %(i)s',
params={'i': 2},
),
])
e_copy = copy.copy(e)
self.assertEqual(e, e_copy)
self.assertEqual(e.as_data(), e_copy.as_data())
e_deepcopy = copy.deepcopy(e)
self.assertEqual(e, e_deepcopy)
self.assertEqual(e.as_data(), e_copy.as_data())
| bsd-3-clause |
seibert/numba | numba/roc/stubs.py | 4 | 2822 | from numba.core import types, typing, ir
_stub_error = NotImplementedError("This is a stub.")
def get_global_id(*args, **kargs):
"""
OpenCL get_global_id()
"""
raise _stub_error
def get_local_id(*args, **kargs):
"""
OpenCL get_local_id()
"""
raise _stub_error
def get_global_size(*args, **kargs):
"""
OpenCL get_global_size()
"""
raise _stub_error
def get_local_size(*args, **kargs):
"""
OpenCL get_local_size()
"""
raise _stub_error
def get_group_id(*args, **kargs):
"""
OpenCL get_group_id()
"""
raise _stub_error
def get_num_groups(*args, **kargs):
"""
OpenCL get_num_groups()
"""
raise _stub_error
def get_work_dim(*args, **kargs):
"""
OpenCL get_work_dim()
"""
raise _stub_error
def barrier(*args, **kargs):
"""
OpenCL barrier()
Example:
# workgroup barrier + local memory fence
hsa.barrier(hsa.CLK_LOCAL_MEM_FENCE)
# workgroup barrier + global memory fence
hsa.barrier(hsa.CLK_GLOBAL_MEM_FENCE)
# workgroup barrier + global memory fence
hsa.barrier()
"""
raise _stub_error
def mem_fence(*args, **kargs):
"""
OpenCL mem_fence()
Example:
# local memory fence
hsa.mem_fence(hsa.CLK_LOCAL_MEM_FENCE)
# global memory fence
hsa.mem_fence(hsa.CLK_GLOBAL_MEM_FENCE)
"""
raise _stub_error
def wavebarrier():
"""
HSAIL wavebarrier
"""
raise _stub_error
def activelanepermute_wavewidth(src, laneid, identity, useidentity):
"""
HSAIL activelanepermute_wavewidth_*
"""
raise _stub_error
def ds_permute(src_lane, dest_lane):
"""
AMDGCN Data Share intrinsic forwards permute (push semantics)
"""
raise _stub_error
def ds_bpermute(src_lane, dest_lane):
"""
AMDGCN Data Share intrinsic backwards permute (pull semantics)
"""
raise _stub_error
class Stub(object):
"""A stub object to represent special objects which is meaningless
outside the context of HSA-python.
"""
_description_ = '<ptx special value>'
__slots__ = () # don't allocate __dict__
def __new__(cls):
raise NotImplementedError("%s is not instantiable" % cls)
def __repr__(self):
return self._description_
class shared(Stub):
"""shared namespace
"""
_description_ = '<shared>'
def array(shape, dtype):
"""shared.array(shape, dtype)
Allocate a shared memory array.
"""
#-------------------------------------------------------------------------------
# atomic
class atomic(Stub):
"""atomic namespace
"""
_description_ = '<atomic>'
class add(Stub):
"""add(ary, idx, val)
Perform atomic ary[idx] += val
"""
| bsd-2-clause |
wldcordeiro/servo | tests/wpt/web-platform-tests/tools/pytest/_pytest/runner.py | 173 | 17105 | """ basic collect and runtest protocol implementations """
import bdb
import sys
from time import time
import py
import pytest
from _pytest._code.code import TerminalRepr, ExceptionInfo
def pytest_namespace():
return {
'fail' : fail,
'skip' : skip,
'importorskip' : importorskip,
'exit' : exit,
}
#
# pytest plugin hooks
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting", "reporting", after="general")
group.addoption('--durations',
action="store", type=int, default=None, metavar="N",
help="show N slowest setup/test durations (N=0 for all)."),
def pytest_terminal_summary(terminalreporter):
durations = terminalreporter.config.option.durations
if durations is None:
return
tr = terminalreporter
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, 'duration'):
dlist.append(rep)
if not dlist:
return
dlist.sort(key=lambda x: x.duration)
dlist.reverse()
if not durations:
tr.write_sep("=", "slowest test durations")
else:
tr.write_sep("=", "slowest %s test durations" % durations)
dlist = dlist[:durations]
for rep in dlist:
nodeid = rep.nodeid.replace("::()::", "::")
tr.write_line("%02.2fs %-8s %s" %
(rep.duration, rep.when, nodeid))
def pytest_sessionstart(session):
session._setupstate = SetupState()
def pytest_sessionfinish(session):
session._setupstate.teardown_all()
class NodeInfo:
def __init__(self, location):
self.location = location
def pytest_runtest_protocol(item, nextitem):
item.ihook.pytest_runtest_logstart(
nodeid=item.nodeid, location=item.location,
)
runtestprotocol(item, nextitem=nextitem)
return True
def runtestprotocol(item, log=True, nextitem=None):
hasrequest = hasattr(item, "_request")
if hasrequest and not item._request:
item._initrequest()
rep = call_and_report(item, "setup", log)
reports = [rep]
if rep.passed:
reports.append(call_and_report(item, "call", log))
reports.append(call_and_report(item, "teardown", log,
nextitem=nextitem))
# after all teardown hooks have been called
# want funcargs and request info to go away
if hasrequest:
item._request = False
item.funcargs = None
return reports
def pytest_runtest_setup(item):
item.session._setupstate.prepare(item)
def pytest_runtest_call(item):
try:
item.runtest()
except Exception:
# Store trace info to allow postmortem debugging
type, value, tb = sys.exc_info()
tb = tb.tb_next # Skip *this* frame
sys.last_type = type
sys.last_value = value
sys.last_traceback = tb
del tb # Get rid of it in this namespace
raise
def pytest_runtest_teardown(item, nextitem):
item.session._setupstate.teardown_exact(item, nextitem)
def pytest_report_teststatus(report):
if report.when in ("setup", "teardown"):
if report.failed:
# category, shortletter, verbose-word
return "error", "E", "ERROR"
elif report.skipped:
return "skipped", "s", "SKIPPED"
else:
return "", "", ""
#
# Implementation
def call_and_report(item, when, log=True, **kwds):
call = call_runtest_hook(item, when, **kwds)
hook = item.ihook
report = hook.pytest_runtest_makereport(item=item, call=call)
if log:
hook.pytest_runtest_logreport(report=report)
if check_interactive_exception(call, report):
hook.pytest_exception_interact(node=item, call=call, report=report)
return report
def check_interactive_exception(call, report):
return call.excinfo and not (
hasattr(report, "wasxfail") or
call.excinfo.errisinstance(skip.Exception) or
call.excinfo.errisinstance(bdb.BdbQuit))
def call_runtest_hook(item, when, **kwds):
hookname = "pytest_runtest_" + when
ihook = getattr(item.ihook, hookname)
return CallInfo(lambda: ihook(item=item, **kwds), when=when)
class CallInfo:
""" Result/Exception info a function invocation. """
#: None or ExceptionInfo object.
excinfo = None
def __init__(self, func, when):
#: context of invocation: one of "setup", "call",
#: "teardown", "memocollect"
self.when = when
self.start = time()
try:
self.result = func()
except KeyboardInterrupt:
self.stop = time()
raise
except:
self.excinfo = ExceptionInfo()
self.stop = time()
def __repr__(self):
if self.excinfo:
status = "exception: %s" % str(self.excinfo.value)
else:
status = "result: %r" % (self.result,)
return "<CallInfo when=%r %s>" % (self.when, status)
def getslaveinfoline(node):
try:
return node._slaveinfocache
except AttributeError:
d = node.slaveinfo
ver = "%s.%s.%s" % d['version_info'][:3]
node._slaveinfocache = s = "[%s] %s -- Python %s %s" % (
d['id'], d['sysplatform'], ver, d['executable'])
return s
class BaseReport(object):
def __init__(self, **kw):
self.__dict__.update(kw)
def toterminal(self, out):
if hasattr(self, 'node'):
out.line(getslaveinfoline(self.node))
longrepr = self.longrepr
if longrepr is None:
return
if hasattr(longrepr, 'toterminal'):
longrepr.toterminal(out)
else:
try:
out.line(longrepr)
except UnicodeEncodeError:
out.line("<unprintable longrepr>")
def get_sections(self, prefix):
for name, content in self.sections:
if name.startswith(prefix):
yield prefix, content
passed = property(lambda x: x.outcome == "passed")
failed = property(lambda x: x.outcome == "failed")
skipped = property(lambda x: x.outcome == "skipped")
@property
def fspath(self):
return self.nodeid.split("::")[0]
def pytest_runtest_makereport(item, call):
when = call.when
duration = call.stop-call.start
keywords = dict([(x,1) for x in item.keywords])
excinfo = call.excinfo
sections = []
if not call.excinfo:
outcome = "passed"
longrepr = None
else:
if not isinstance(excinfo, ExceptionInfo):
outcome = "failed"
longrepr = excinfo
elif excinfo.errisinstance(pytest.skip.Exception):
outcome = "skipped"
r = excinfo._getreprcrash()
longrepr = (str(r.path), r.lineno, r.message)
else:
outcome = "failed"
if call.when == "call":
longrepr = item.repr_failure(excinfo)
else: # exception in setup or teardown
longrepr = item._repr_failure_py(excinfo,
style=item.config.option.tbstyle)
for rwhen, key, content in item._report_sections:
sections.append(("Captured %s %s" %(key, rwhen), content))
return TestReport(item.nodeid, item.location,
keywords, outcome, longrepr, when,
sections, duration)
class TestReport(BaseReport):
""" Basic test report object (also used for setup and teardown calls if
they fail).
"""
def __init__(self, nodeid, location, keywords, outcome,
longrepr, when, sections=(), duration=0, **extra):
#: normalized collection node id
self.nodeid = nodeid
#: a (filesystempath, lineno, domaininfo) tuple indicating the
#: actual location of a test item - it might be different from the
#: collected one e.g. if a method is inherited from a different module.
self.location = location
#: a name -> value dictionary containing all keywords and
#: markers associated with a test invocation.
self.keywords = keywords
#: test outcome, always one of "passed", "failed", "skipped".
self.outcome = outcome
#: None or a failure representation.
self.longrepr = longrepr
#: one of 'setup', 'call', 'teardown' to indicate runtest phase.
self.when = when
#: list of (secname, data) extra information which needs to
#: marshallable
self.sections = list(sections)
#: time it took to run just the test
self.duration = duration
self.__dict__.update(extra)
def __repr__(self):
return "<TestReport %r when=%r outcome=%r>" % (
self.nodeid, self.when, self.outcome)
class TeardownErrorReport(BaseReport):
outcome = "failed"
when = "teardown"
def __init__(self, longrepr, **extra):
self.longrepr = longrepr
self.sections = []
self.__dict__.update(extra)
def pytest_make_collect_report(collector):
call = CallInfo(collector._memocollect, "memocollect")
longrepr = None
if not call.excinfo:
outcome = "passed"
else:
from _pytest import nose
skip_exceptions = (Skipped,) + nose.get_skip_exceptions()
if call.excinfo.errisinstance(skip_exceptions):
outcome = "skipped"
r = collector._repr_failure_py(call.excinfo, "line").reprcrash
longrepr = (str(r.path), r.lineno, r.message)
else:
outcome = "failed"
errorinfo = collector.repr_failure(call.excinfo)
if not hasattr(errorinfo, "toterminal"):
errorinfo = CollectErrorRepr(errorinfo)
longrepr = errorinfo
rep = CollectReport(collector.nodeid, outcome, longrepr,
getattr(call, 'result', None))
rep.call = call # see collect_one_node
return rep
class CollectReport(BaseReport):
def __init__(self, nodeid, outcome, longrepr, result,
sections=(), **extra):
self.nodeid = nodeid
self.outcome = outcome
self.longrepr = longrepr
self.result = result or []
self.sections = list(sections)
self.__dict__.update(extra)
@property
def location(self):
return (self.fspath, None, self.fspath)
def __repr__(self):
return "<CollectReport %r lenresult=%s outcome=%r>" % (
self.nodeid, len(self.result), self.outcome)
class CollectErrorRepr(TerminalRepr):
def __init__(self, msg):
self.longrepr = msg
def toterminal(self, out):
out.line(self.longrepr, red=True)
class SetupState(object):
""" shared state for setting up/tearing down test items or collectors. """
def __init__(self):
self.stack = []
self._finalizers = {}
def addfinalizer(self, finalizer, colitem):
""" attach a finalizer to the given colitem.
if colitem is None, this will add a finalizer that
is called at the end of teardown_all().
"""
assert colitem and not isinstance(colitem, tuple)
assert py.builtin.callable(finalizer)
#assert colitem in self.stack # some unit tests don't setup stack :/
self._finalizers.setdefault(colitem, []).append(finalizer)
def _pop_and_teardown(self):
colitem = self.stack.pop()
self._teardown_with_finalization(colitem)
def _callfinalizers(self, colitem):
finalizers = self._finalizers.pop(colitem, None)
exc = None
while finalizers:
fin = finalizers.pop()
try:
fin()
except Exception:
# XXX Only first exception will be seen by user,
# ideally all should be reported.
if exc is None:
exc = sys.exc_info()
if exc:
py.builtin._reraise(*exc)
def _teardown_with_finalization(self, colitem):
self._callfinalizers(colitem)
if hasattr(colitem, "teardown"):
colitem.teardown()
for colitem in self._finalizers:
assert colitem is None or colitem in self.stack \
or isinstance(colitem, tuple)
def teardown_all(self):
while self.stack:
self._pop_and_teardown()
for key in list(self._finalizers):
self._teardown_with_finalization(key)
assert not self._finalizers
def teardown_exact(self, item, nextitem):
needed_collectors = nextitem and nextitem.listchain() or []
self._teardown_towards(needed_collectors)
def _teardown_towards(self, needed_collectors):
while self.stack:
if self.stack == needed_collectors[:len(self.stack)]:
break
self._pop_and_teardown()
def prepare(self, colitem):
""" setup objects along the collector chain to the test-method
and teardown previously setup objects."""
needed_collectors = colitem.listchain()
self._teardown_towards(needed_collectors)
# check if the last collection node has raised an error
for col in self.stack:
if hasattr(col, '_prepare_exc'):
py.builtin._reraise(*col._prepare_exc)
for col in needed_collectors[len(self.stack):]:
self.stack.append(col)
try:
col.setup()
except Exception:
col._prepare_exc = sys.exc_info()
raise
def collect_one_node(collector):
ihook = collector.ihook
ihook.pytest_collectstart(collector=collector)
rep = ihook.pytest_make_collect_report(collector=collector)
call = rep.__dict__.pop("call", None)
if call and check_interactive_exception(call, rep):
ihook.pytest_exception_interact(node=collector, call=call, report=rep)
return rep
# =============================================================
# Test OutcomeExceptions and helpers for creating them.
class OutcomeException(Exception):
""" OutcomeException and its subclass instances indicate and
contain info about test and collection outcomes.
"""
def __init__(self, msg=None, pytrace=True):
Exception.__init__(self, msg)
self.msg = msg
self.pytrace = pytrace
def __repr__(self):
if self.msg:
val = self.msg
if isinstance(val, bytes):
val = py._builtin._totext(val, errors='replace')
return val
return "<%s instance>" %(self.__class__.__name__,)
__str__ = __repr__
class Skipped(OutcomeException):
# XXX hackish: on 3k we fake to live in the builtins
# in order to have Skipped exception printing shorter/nicer
__module__ = 'builtins'
class Failed(OutcomeException):
""" raised from an explicit call to pytest.fail() """
__module__ = 'builtins'
class Exit(KeyboardInterrupt):
""" raised for immediate program exits (no tracebacks/summaries)"""
def __init__(self, msg="unknown reason"):
self.msg = msg
KeyboardInterrupt.__init__(self, msg)
# exposed helper methods
def exit(msg):
""" exit testing process as if KeyboardInterrupt was triggered. """
__tracebackhide__ = True
raise Exit(msg)
exit.Exception = Exit
def skip(msg=""):
""" skip an executing test with the given message. Note: it's usually
better to use the pytest.mark.skipif marker to declare a test to be
skipped under certain conditions like mismatching platforms or
dependencies. See the pytest_skipping plugin for details.
"""
__tracebackhide__ = True
raise Skipped(msg=msg)
skip.Exception = Skipped
def fail(msg="", pytrace=True):
""" explicitly fail an currently-executing test with the given Message.
:arg pytrace: if false the msg represents the full failure information
and no python traceback will be reported.
"""
__tracebackhide__ = True
raise Failed(msg=msg, pytrace=pytrace)
fail.Exception = Failed
def importorskip(modname, minversion=None):
""" return imported module if it has at least "minversion" as its
__version__ attribute. If no minversion is specified the a skip
is only triggered if the module can not be imported.
"""
__tracebackhide__ = True
compile(modname, '', 'eval') # to catch syntaxerrors
try:
__import__(modname)
except ImportError:
skip("could not import %r" %(modname,))
mod = sys.modules[modname]
if minversion is None:
return mod
verattr = getattr(mod, '__version__', None)
if minversion is not None:
try:
from pkg_resources import parse_version as pv
except ImportError:
skip("we have a required version for %r but can not import "
"no pkg_resources to parse version strings." %(modname,))
if verattr is None or pv(verattr) < pv(minversion):
skip("module %r has __version__ %r, required is: %r" %(
modname, verattr, minversion))
return mod
| mpl-2.0 |
rjschwei/azure-sdk-for-python | azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly.py | 3 | 2832 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .catalog_item import CatalogItem
class USqlAssembly(CatalogItem):
"""A Data Lake Analytics catalog U-SQL Assembly.
:param compute_account_name: the name of the Data Lake Analytics account.
:type compute_account_name: str
:param version: the version of the catalog item.
:type version: str
:param database_name: the name of the database.
:type database_name: str
:param name: the name of the assembly.
:type name: str
:param clr_name: the name of the CLR.
:type clr_name: str
:param is_visible: the switch indicating if this assembly is visible or
not.
:type is_visible: bool
:param is_user_defined: the switch indicating if this assembly is user
defined or not.
:type is_user_defined: bool
:param files: the list of files associated with the assembly
:type files: list of :class:`USqlAssemblyFileInfo
<azure.mgmt.datalake.analytics.catalog.models.USqlAssemblyFileInfo>`
:param dependencies: the list of dependencies associated with the assembly
:type dependencies: list of :class:`USqlAssemblyDependencyInfo
<azure.mgmt.datalake.analytics.catalog.models.USqlAssemblyDependencyInfo>`
"""
_attribute_map = {
'compute_account_name': {'key': 'computeAccountName', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
'database_name': {'key': 'databaseName', 'type': 'str'},
'name': {'key': 'assemblyName', 'type': 'str'},
'clr_name': {'key': 'clrName', 'type': 'str'},
'is_visible': {'key': 'isVisible', 'type': 'bool'},
'is_user_defined': {'key': 'isUserDefined', 'type': 'bool'},
'files': {'key': 'files', 'type': '[USqlAssemblyFileInfo]'},
'dependencies': {'key': 'dependencies', 'type': '[USqlAssemblyDependencyInfo]'},
}
def __init__(self, compute_account_name=None, version=None, database_name=None, name=None, clr_name=None, is_visible=None, is_user_defined=None, files=None, dependencies=None):
super(USqlAssembly, self).__init__(compute_account_name=compute_account_name, version=version)
self.database_name = database_name
self.name = name
self.clr_name = clr_name
self.is_visible = is_visible
self.is_user_defined = is_user_defined
self.files = files
self.dependencies = dependencies
| mit |
BlueCrystalLabs/bgfx | 3rdparty/scintilla/test/lexTests.py | 65 | 3416 | # -*- coding: utf-8 -*-
# Requires Python 2.7 or later
import io, os, sys, unittest
if sys.platform == "win32":
import XiteWin as Xite
else:
import XiteQt as Xite
keywordsHTML = [
b"b body content head href html link meta "
b"name rel script strong title type xmlns",
b"function",
b"sub"
]
class TestLexers(unittest.TestCase):
def setUp(self):
self.xite = Xite.xiteFrame
self.ed = self.xite.ed
self.ed.ClearAll()
self.ed.EmptyUndoBuffer()
def AsStyled(self):
text = self.ed.Contents()
data = io.BytesIO()
prevStyle = -1
for o in range(self.ed.Length):
styleNow = self.ed.GetStyleAt(o)
if styleNow != prevStyle:
styleBuf = "{%0d}" % styleNow
data.write(styleBuf.encode('utf-8'))
prevStyle = styleNow
data.write(text[o:o+1])
return data.getvalue()
def LexExample(self, name, lexerName, keywords=None):
if keywords is None:
keywords = []
self.ed.SetCodePage(65001)
self.ed.LexerLanguage = lexerName
bits = self.ed.StyleBitsNeeded
mask = 2 << bits - 1
self.ed.StyleBits = bits
for i in range(len(keywords)):
self.ed.SetKeyWords(i, keywords[i])
nameExample = os.path.join("examples", name)
namePrevious = nameExample +".styled"
nameNew = nameExample +".new"
with open(nameExample, "rb") as f:
prog = f.read()
BOM = b"\xEF\xBB\xBF"
if prog.startswith(BOM):
prog = prog[len(BOM):]
lenDocument = len(prog)
self.ed.AddText(lenDocument, prog)
self.ed.Colourise(0, lenDocument)
self.assertEquals(self.ed.EndStyled, lenDocument)
try:
with open(namePrevious, "rb") as f:
prevStyled = f.read()
except FileNotFoundError:
prevStyled = ""
progStyled = self.AsStyled()
if progStyled != prevStyled:
with open(nameNew, "wb") as f:
f.write(progStyled)
print(progStyled)
print(prevStyled)
self.assertEquals(progStyled, prevStyled)
# The whole file doesn't parse like it did before so don't try line by line
# as that is likely to fail many times.
return
# Try partial lexes from the start of every line which should all be identical.
for line in range(self.ed.LineCount):
lineStart = self.ed.PositionFromLine(line)
self.ed.StartStyling(lineStart, mask)
self.assertEquals(self.ed.EndStyled, lineStart)
self.ed.Colourise(lineStart, lenDocument)
progStyled = self.AsStyled()
if progStyled != prevStyled:
with open(nameNew, "wb") as f:
f.write(progStyled)
self.assertEquals(progStyled, prevStyled)
# Give up after one failure
return
def testCXX(self):
self.LexExample("x.cxx", b"cpp", [b"int"])
def testPython(self):
self.LexExample("x.py", b"python",
[b"class def else for if import in print return while"])
def testHTML(self):
self.LexExample("x.html", b"hypertext", keywordsHTML)
def testASP(self):
self.LexExample("x.asp", b"hypertext", keywordsHTML)
def testPHP(self):
self.LexExample("x.php", b"hypertext", keywordsHTML)
def testVB(self):
self.LexExample("x.vb", b"vb", [b"as dim or string"])
def testLua(self):
self.LexExample("x.lua", b"lua", [b"function end"])
def testRuby(self):
self.LexExample("x.rb", b"ruby", [b"class def end"])
def testPerl(self):
self.LexExample("x.pl", b"perl", [b"printf sleep use while"])
def testD(self):
self.LexExample("x.d", b"d",
[b"keyword1", b"keyword2", b"", b"keyword4", b"keyword5",
b"keyword6", b"keyword7"])
if __name__ == '__main__':
Xite.main("lexTests")
| bsd-2-clause |
lightcn/odoo | addons/resource/__init__.py | 448 | 1086 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import resource
import faces
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
sh1nu11bi/RATDecoders | BlackShades.py | 8 | 5801 | #!/usr/bin/env python
'''
BlackShades RAT Decoder
Original Script by Brian Wallace (@botnet_hunter)
'''
__description__ = 'DarkComet Rat Config Extractor\nOriginal Script by Brian Wallace (@botnet_hunter)'
__author__ = 'Kevin Breen http://techanarchy.net'
__OrigionalCode__ = 'v1.0.0 by Brian Wallace (@botnet_hunter)'
__version__ = '0.1'
__date__ = '2014/05/23'
import os
import sys
import string
import re
from optparse import OptionParser
prng_seed = 0
def is_valid_config(config):
if config[:3] != "\x0c\x0c\x0c":
return False
if config.count("\x0C\x0C\x0C") < 15:
return False
return True
def get_next_rng_value():
global prng_seed
prng_seed = ((prng_seed * 1140671485 + 12820163) & 0xffffff)
return prng_seed / 65536
def decrypt_configuration(hex):
global prng_seed
ascii = hex.decode('hex')
tail = ascii[0x20:]
pre_check = []
for x in xrange(3):
pre_check.append(ord(tail[x]) ^ 0x0c)
for x in xrange(0xffffff):
prng_seed = x
if get_next_rng_value() != pre_check[0] or get_next_rng_value() != pre_check[1] or get_next_rng_value() != pre_check[2]:
continue
prng_seed = x
config = "".join((chr(ord(c) ^ int(get_next_rng_value())) for c in tail))
if is_valid_config(config):
return config.split("\x0c\x0c\x0c")
return None
def config_extract(raw_data):
config_pattern = re.findall('[0-9a-fA-F]{154,}', raw_data)
for s in config_pattern:
if (len(s) % 2) == 1:
s = s[:-1]
return s
def config_parser(config):
config_dict = {}
config_dict['Domain'] = config[1]
config_dict['Client Control Port'] = config[2]
config_dict['Client Transfer Port'] = config[3]
config_dict['Campaign ID'] = config[4]
config_dict['File Name'] = config[5]
config_dict['Install Path'] = config[6]
config_dict['Registry Key'] = config[7]
config_dict['ActiveX Key'] = config[8]
config_dict['Install Flag'] = config[9]
config_dict['Hide File'] = config[10]
config_dict['Melt File'] = config[11]
config_dict['Delay'] = config[12]
config_dict['USB Spread'] = config[13]
config_dict['Mutex'] = config[14]
config_dict['Log File'] = config[15]
config_dict['Folder Name'] = config[16]
config_dict['Smart DNS'] = config[17]
config_dict['Protect Process'] = config[18]
return config_dict
def run(data):
raw_config = config_extract(data)
config = decrypt_configuration(raw_config)
if config is not None and len(config) > 15:
sorted_config = config_parser(config)
return sorted_config
return None
#Recursive Function Goes Here
def runRecursive(folder, output):
counter1 = 0
counter2 = 0
print "[+] Writing Configs to File {0}".format(output)
with open(output, 'a+') as out:
#This line will need changing per Decoder
out.write("File Name, Campaign ID, Domain, Transfer Port, Control Port, File Name, Install Path, Registry Key, ActiveX Key, Install Flag, Hide File, Melt File, Delay, USB Spread, Mutex, Log File, Folder Name, Smart DNS, Protect Process\n")
for server in os.listdir(folder):
fileData = open(os.path.join(folder,server), 'rb').read()
configOut = run(fileData)
if configOut != None:
#This line will need changing per Decoder
out.write('{0},{1},{2},{3},{4},{5},{6},{7},{8},{9},{10},{11},{12},{13},{14},{15},{16},{17},{18}\n'.format(server, configOut["Campaign ID"],configOut["Domain"],configOut["Client Transfer Port"],configOut["Client Control Port"],configOut["File Name"],configOut["Install Path"],configOut["Registry Key"],configOut["ActiveX Key"],configOut["Install Flag"],configOut["Hide File"],configOut["Melt File"],configOut["Delay"],configOut["USB Spread"],configOut["Mutex"],configOut["Log File"],configOut["Folder Name"],configOut["Smart DNS"],configOut["Protect Process"]))
counter1 += 1
counter2 += 1
print "[+] Decoded {0} out of {1} Files".format(counter1, counter2)
return "Complete"
if __name__ == "__main__":
parser = OptionParser(usage='usage: %prog inFile outConfig\n' + __description__, version='%prog ' + __version__)
parser.add_option("-r", "--recursive", action='store_true', default=False, help="Recursive Mode")
(options, args) = parser.parse_args()
# If we dont have args quit with help page
if len(args) > 0:
pass
else:
parser.print_help()
sys.exit()
# if we want a recursive extract run this function
if options.recursive == True:
if len(args) == 2:
runRecursive(args[0], args[1])
sys.exit()
else:
print "[+] You need to specify Both Dir to read AND Output File"
parser.print_help()
sys.exit()
# If not recurisve try to open file
try:
print "[+] Reading file"
fileData = open(args[0], 'rb').read()
except:
print "[+] Couldn't Open File {0}".format(args[0])
sys.exit()
#Run the config extraction
print "[+] Searching for Config"
config = run(fileData)
#If we have a config figure out where to dump it out.
if config == None:
print "[+] Config not found"
sys.exit()
#if you gave me two args im going to assume the 2nd arg is where you want to save the file
if len(args) == 2:
print "[+] Writing Config to file {0}".format(args[1])
with open(args[1], 'a') as outFile:
for key, value in sorted(config.iteritems()):
clean_value = filter(lambda x: x in string.printable, value)
outFile.write("Key: {0}\t Value: {1}\n".format(key,clean_value))
# if no seconds arg then assume you want it printing to screen
else:
print "[+] Printing Config to screen"
for key, value in sorted(config.iteritems()):
clean_value = filter(lambda x: x in string.printable, value)
print " [-] Key: {0}\t Value: {1}".format(key,clean_value)
print "[+] End of Config"
| gpl-3.0 |
tornadozou/tensorflow | tensorflow/contrib/signal/python/ops/spectral_ops.py | 15 | 7918 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Spectral operations (e.g. Short-time Fourier Transform)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.contrib.signal.python.ops import reconstruction_ops
from tensorflow.contrib.signal.python.ops import shape_ops
from tensorflow.contrib.signal.python.ops import window_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import spectral_ops
def stft(signals, frame_length, frame_step, fft_length=None,
window_fn=functools.partial(window_ops.hann_window, periodic=True),
pad_end=False, name=None):
"""Computes the [Short-time Fourier Transform][stft] of `signals`.
Implemented with GPU-compatible ops and supports gradients.
Args:
signals: A `[..., samples]` `float32` `Tensor` of real-valued signals.
frame_length: An integer scalar `Tensor`. The window length in samples.
frame_step: An integer scalar `Tensor`. The number of samples to step.
fft_length: An integer scalar `Tensor`. The size of the FFT to apply.
If not provided, uses the smallest power of 2 enclosing `frame_length`.
window_fn: A callable that takes a window length and a `dtype` keyword
argument and returns a `[window_length]` `Tensor` of samples in the
provided datatype. If set to `None`, no windowing is used.
pad_end: Whether to pad the end of `signals` with zeros when the provided
frame length and step produces a frame that lies partially past its end.
name: An optional name for the operation.
Returns:
A `[..., frames, fft_unique_bins]` `Tensor` of `complex64` STFT values where
`fft_unique_bins` is `fft_length // 2 + 1` (the unique components of the
FFT).
Raises:
ValueError: If `signals` is not at least rank 1, `frame_length` is
not scalar, `frame_step` is not scalar, or `frame_length`
is greater than `fft_length`.
[stft]: https://en.wikipedia.org/wiki/Short-time_Fourier_transform
"""
with ops.name_scope(name, 'stft', [signals, frame_length,
frame_step]):
signals = ops.convert_to_tensor(signals, name='signals')
signals.shape.with_rank_at_least(1)
frame_length = ops.convert_to_tensor(frame_length, name='frame_length')
frame_length.shape.assert_has_rank(0)
frame_step = ops.convert_to_tensor(frame_step, name='frame_step')
frame_step.shape.assert_has_rank(0)
if fft_length is None:
fft_length = _enclosing_power_of_two(frame_length)
else:
fft_length = ops.convert_to_tensor(fft_length, name='fft_length')
frame_length_static = tensor_util.constant_value(
frame_length)
fft_length_static = tensor_util.constant_value(fft_length)
if (frame_length_static is not None and fft_length_static is not None and
frame_length_static > fft_length_static):
raise ValueError('frame_length (%d) may not be larger than '
'fft_length (%d)' % (frame_length_static,
fft_length_static))
framed_signals = shape_ops.frame(
signals, frame_length, frame_step, pad_end=pad_end)
# Optionally window the framed signals.
if window_fn is not None:
window = window_fn(frame_length, dtype=framed_signals.dtype)
framed_signals *= window
# spectral_ops.rfft produces the (fft_length/2 + 1) unique components of the
# FFT of the real windowed signals in framed_signals.
return spectral_ops.rfft(framed_signals, [fft_length])
def inverse_stft(stfts,
frame_length,
frame_step,
fft_length=None,
window_fn=functools.partial(window_ops.hann_window,
periodic=True),
name=None):
"""Computes the inverse [Short-time Fourier Transform][stft] of `stfts`.
Implemented with GPU-compatible ops and supports gradients.
Args:
stfts: A `complex64` `[..., frames, fft_unique_bins]` `Tensor` of STFT bins
representing a batch of `fft_length`-point STFTs where `fft_unique_bins`
is `fft_length // 2 + 1`
frame_length: An integer scalar `Tensor`. The window length in samples.
frame_step: An integer scalar `Tensor`. The number of samples to step.
fft_length: An integer scalar `Tensor`. The size of the FFT that produced
`stfts`. If not provided, uses the smallest power of 2 enclosing
`frame_length`.
window_fn: A callable that takes a window length and a `dtype` keyword
argument and returns a `[window_length]` `Tensor` of samples in the
provided datatype. If set to `None`, no windowing is used.
name: An optional name for the operation.
Returns:
A `[..., samples]` `Tensor` of `float32` signals representing the inverse
STFT for each input STFT in `stfts`.
Raises:
ValueError: If `stfts` is not at least rank 2, `frame_length` is not scalar,
`frame_step` is not scalar, or `fft_length` is not scalar, or
`frame_length` is greater than `fft_length`.
[stft]: https://en.wikipedia.org/wiki/Short-time_Fourier_transform
"""
with ops.name_scope(name, 'inverse_stft', [stfts]):
stfts = ops.convert_to_tensor(stfts, name='stfts')
stfts.shape.with_rank_at_least(2)
frame_length = ops.convert_to_tensor(frame_length, name='frame_length')
frame_length.shape.assert_has_rank(0)
frame_step = ops.convert_to_tensor(frame_step, name='frame_step')
frame_step.shape.assert_has_rank(0)
if fft_length is None:
fft_length = _enclosing_power_of_two(frame_length)
else:
fft_length = ops.convert_to_tensor(fft_length, name='fft_length')
fft_length.shape.assert_has_rank(0)
frame_length_static = tensor_util.constant_value(
frame_length)
fft_length_static = tensor_util.constant_value(fft_length)
if (frame_length_static is not None and fft_length_static is not None and
frame_length_static > fft_length_static):
raise ValueError('frame_length (%d) may not be larger than '
'fft_length (%d)' % (frame_length_static,
fft_length_static))
real_frames = spectral_ops.irfft(stfts, [fft_length])[..., :frame_length]
# Optionally window and overlap-add the inner 2 dimensions of real_frames
# into a single [samples] dimension.
if window_fn is not None:
window = window_fn(frame_length, dtype=stfts.dtype.real_dtype)
real_frames *= window
return reconstruction_ops.overlap_and_add(real_frames, frame_step)
def _enclosing_power_of_two(value):
"""Return 2**N for integer N such that 2**N >= value."""
value_static = tensor_util.constant_value(value)
if value_static is not None:
return constant_op.constant(
int(2**np.ceil(np.log(value_static) / np.log(2.0))), value.dtype)
return math_ops.cast(
math_ops.pow(2.0, math_ops.ceil(
math_ops.log(math_ops.to_float(value)) / math_ops.log(2.0))),
value.dtype)
| apache-2.0 |
pism/pism | examples/inverse/test_invssa_gn.py | 1 | 5837 | #! /usr/bin/env python3
#
# Copyright (C) 2012, 2014, 2015, 2016, 2017, 2018, 2019 David Maxwell
#
# This file is part of PISM.
#
# PISM is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
#
# PISM is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License
# along with PISM; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import sys
import petsc4py
petsc4py.init(sys.argv)
from petsc4py import PETSc
import numpy as np
import os
import math
import PISM
def adjustTauc(mask, tauc):
"""Where ice is floating or land is ice-free, tauc should be adjusted to have some preset default values."""
grid = mask.grid()
high_tauc = grid.ctx().config().get_number("basal_yield_stress.ice_free_bedrock")
with PISM.vec.Access(comm=tauc, nocomm=mask):
for (i, j) in grid.points():
if mask.ocean(i, j):
tauc[i, j] = 0
elif mask.ice_free(i, j):
tauc[i, j] = high_tauc
# Main code starts here
if __name__ == "__main__":
context = PISM.Context()
config = context.config
com = context.com
PISM.set_abort_on_sigint(True)
append_mode = False
input_filename = config.get_string("input.file")
inv_data_filename = PISM.OptionString("-inv_data", "inverse data file", input_filename).value()
use_tauc_prior = PISM.OptionBool("-inv_use_tauc_prior",
"Use tauc_prior from inverse data file as initial guess.")
ssarun = PISM.invert.ssa.SSAForwardRunFromInputFile(input_filename, inv_data_filename, 'tauc')
ssarun.setup()
vecs = ssarun.modeldata.vecs
grid = ssarun.grid
# Determine the prior guess for tauc. This can be one of
# a) tauc from the input file (default)
# b) tauc_prior from the inv_datafile if -use_tauc_prior is set
tauc_prior = PISM.model.createYieldStressVec(grid, 'tauc_prior')
tauc_prior.set_attrs("diagnostic",
"initial guess for (pseudo-plastic) basal yield stress in an inversion",
"Pa", "Pa", "", 0)
tauc = PISM.model.createYieldStressVec(grid)
if use_tauc_prior:
tauc_prior.regrid(inv_data_filename, critical=True)
else:
if not PISM.util.fileHasVariable(input_filename, "tauc"):
PISM.verbPrintf(
1, com, "Initial guess for tauc is not available as 'tauc' in %s.\nYou can provide an initial guess as 'tauc_prior' using the command line option -use_tauc_prior." % input_filename)
exit(1)
tauc.regrid(input_filename, True)
tauc_prior.copy_from(tauc)
adjustTauc(vecs.ice_mask, tauc_prior)
# Convert tauc_prior -> zeta_prior
zeta = PISM.IceModelVec2S()
WIDE_STENCIL = int(grid.ctx().config().get_number("grid.max_stencil_width"))
zeta.create(grid, "", PISM.WITH_GHOSTS, WIDE_STENCIL)
ssarun.tauc_param.convertFromDesignVariable(tauc_prior, zeta)
ssarun.ssa.linearize_at(zeta)
vel_ssa_observed = None
vel_ssa_observed = PISM.model.create2dVelocityVec(grid, '_ssa_observed', stencil_width=2)
if PISM.util.fileHasVariable(inv_data_filename, "u_ssa_observed"):
vel_ssa_observed.regrid(inv_data_filename, True)
else:
if not PISM.util.fileHasVariable(inv_data_filename, "u_surface_observed"):
PISM.verbPrintf(
1, context.com, "Neither u/v_ssa_observed nor u/v_surface_observed is available in %s.\nAt least one must be specified.\n" % inv_data_filename)
exit(1)
vel_surface_observed = PISM.model.create2dVelocityVec(grid, '_surface_observed', stencil_width=2)
vel_surface_observed.regrid(inv_data_filename, True)
sia_solver = PISM.SIAFD
if is_regional:
sia_solver = PISM.SIAFD_Regional
vel_sia_observed = PISM.sia.computeSIASurfaceVelocities(modeldata, sia_solver)
vel_sia_observed.metadata(0).set_name('u_sia_observed')
vel_sia_observed.metadata(0).set_string('long_name', "x-component of the 'observed' SIA velocities")
vel_sia_observed.metadata(1).set_name('v_sia_observed')
vel_sia_observed.metadata(1).set_string('long_name', "y-component of the 'observed' SIA velocities")
vel_ssa_observed.copy_from(vel_surface_observed)
vel_ssa_observed.add(-1, vel_sia_observed)
(designFunctional, stateFunctional) = PISM.invert.ssa.createTikhonovFunctionals(ssarun)
eta = config.get_number("inverse.tikhonov.penalty_weight")
solver_gn = PISM.InvSSATikhonovGN(ssarun.ssa, zeta, vel_ssa_observed, eta, designFunctional, stateFunctional)
seed = PISM.OptionInteger("-inv_seed", "random generator seed")
if seed.is_set():
np.random.seed(seed.value() + PISM.Context().rank)
d1 = PISM.vec.randVectorS(grid, 1)
d2 = PISM.vec.randVectorS(grid, 1)
GNd1 = PISM.IceModelVec2S()
GNd1.create(grid, "", PISM.WITHOUT_GHOSTS)
GNd2 = PISM.IceModelVec2S()
GNd2.create(grid, "", PISM.WITHOUT_GHOSTS)
solver_gn.apply_GN(d1, GNd1)
solver_gn.apply_GN(d2, GNd2)
ip1 = d1.get_vec().dot(GNd2.get_vec())
ip2 = d2.get_vec().dot(GNd1.get_vec())
PISM.verbPrintf(1, grid.com, "Test of Gauss-Newton symmetry (x^t GN y) vs (y^t GN x)\n")
PISM.verbPrintf(1, grid.com, "ip1 %.10g ip2 %.10g\n" % (ip1, ip2))
PISM.verbPrintf(1, grid.com, "relative error %.10g\n" % abs((ip1 - ip2) / ip1))
| gpl-3.0 |
paramecio/pastafari | scripts/monit/debian_wheezy/alive.py | 1 | 4720 | #!/usr/bin/python3 -u
# A script for install alive script
import subprocess
import argparse
import re
import os
import shutil
import pwd
from subprocess import call
parser = argparse.ArgumentParser(description='A script for install alive script and cron')
parser.add_argument('--url', help='The url where notify that this server is alive', required=True)
parser.add_argument('--user', help='The user for pastafari', required=True)
parser.add_argument('--pub_key', help='The pub key used in pastafari user', required=True)
args = parser.parse_args()
url=args.url
check_url = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
if check_url.match(args.url):
# Create users
if call("sudo useradd -m -s /bin/sh %s" % args.user, shell=True) > 0:
print('Error, cannot add a new user')
exit(1)
else:
print('Added user')
if call("sudo mkdir -p /home/"+args.user+"/.ssh && sudo chown "+args.user+":"+args.user+" /home/"+args.user+"/.ssh && sudo chmod 700 /home/"+args.user+"/.ssh", shell=True) > 0:
print('Error, cannot add ssh directory')
exit(1)
else:
print('Added ssh directory')
if call("sudo cp "+args.pub_key+" /home/"+args.user+"/.ssh/authorized_keys && sudo chown "+args.user+":"+args.user+" /home/"+args.user+"/.ssh/authorized_keys && sudo chmod 600 /home/"+args.user+"/.ssh/authorized_keys", shell=True) > 0:
print('Error, cannot pub key to user')
exit(1)
else:
print('Added pub key to user')
# Edit alive cron
with open('modules/pastafari/scripts/monit/debian_wheezy/files/crontab/alive') as f:
alive_cron=f.read()
with open('modules/pastafari/scripts/monit/debian_wheezy/files/crontab/alive', 'w') as f:
alive_cron=alive_cron.replace('/home/spanel/modules/pastafari/scripts/monit/debian_wheezy/files/get_info.py', '/usr/local/bin/get_info.py')
f.write(alive_cron)
# Edit get_info.py
with open('modules/pastafari/scripts/monit/debian_wheezy/files/get_info.py') as f:
get_info=f.read()
with open('/usr/local/bin/get_info.py', 'w') as f:
get_info=get_info.replace("http://url/to/server/token/ip", args.url)
f.write(get_info)
os.chmod('/usr/local/bin/get_info.py', 0o700)
user_passwd=pwd.getpwnam(args.user)
os.chown('/usr/local/bin/get_info.py', user_passwd[2], user_passwd[3])
#shutil.chown('/usr/local/bin/get_info.py', args.user, args.user)
# Edit get_updates.py
with open('modules/pastafari/scripts/monit/debian_wheezy/files/get_updates.py') as f:
get_updates=f.read()
with open('/etc/cron.daily/get_updates.py', 'w') as f:
url_updates=args.url.replace('/getinfo/', '/getupdates/')
get_updates=get_updates.replace("http://url/to/server/token/ip", url_updates)
f.write(get_updates)
os.chmod('/etc/cron.daily/get_updates.py', 0o700)
# Edit sudo file
with open('modules/pastafari/scripts/monit/debian_wheezy/files/sudoers.d/spanel') as f:
sudoers=f.read()
with open('/etc/sudoers.d/spanel', 'w') as f:
sudoers=sudoers.replace("spanel", args.user)
f.write(sudoers)
# Copy cron alive to /etc/cron.d/
if call("sudo cp modules/pastafari/scripts/monit/debian_wheezy/files/crontab/alive /etc/cron.d/alive", shell=True) > 0:
print('Error, cannot install crontab alive file in cron.d')
exit(1)
else:
print('Added contrab alive file in cron.d')
print('Script installed successfully')
# Copy script for upgrades in /usr/local/bin
if call("mkdir /home/"+args.user+"/bin/ && cp modules/pastafari/scripts/standard/debian_wheezy/upgrade.sh /home/"+args.user+"/bin/ && chown -R "+args.user+":"+args.user+" /home/"+args.user+"/bin/", shell=True) > 0:
print('Error, cannot install upgrade.py in /home/'+args.user+'/bin/')
exit(1)
else:
print('Added /home/'+args.user+'/bin/upgrade.py')
print('Script installed successfully')
# Making first call to site
if subprocess.call('/usr/local/bin/get_info.py', shell=True) > 0:
print('Error')
exit(1)
else:
print('Your server should be up in your panel...')
exit(0)
else:
print('Error installing the module, not valid url')
exit(1)
| gpl-2.0 |
broferek/ansible | test/units/module_utils/test_database.py | 75 | 4393 | import pytest
from ansible.module_utils.database import (
pg_quote_identifier,
SQLParseError,
)
# These are all valid strings
# The results are based on interpreting the identifier as a table name
VALID = {
# User quoted
'"public.table"': '"public.table"',
'"public"."table"': '"public"."table"',
'"schema test"."table test"': '"schema test"."table test"',
# We quote part
'public.table': '"public"."table"',
'"public".table': '"public"."table"',
'public."table"': '"public"."table"',
'schema test.table test': '"schema test"."table test"',
'"schema test".table test': '"schema test"."table test"',
'schema test."table test"': '"schema test"."table test"',
# Embedded double quotes
'table "test"': '"table ""test"""',
'public."table ""test"""': '"public"."table ""test"""',
'public.table "test"': '"public"."table ""test"""',
'schema "test".table': '"schema ""test"""."table"',
'"schema ""test""".table': '"schema ""test"""."table"',
'"""wat"""."""test"""': '"""wat"""."""test"""',
# Sigh, handle these as well:
'"no end quote': '"""no end quote"',
'schema."table': '"schema"."""table"',
'"schema.table': '"""schema"."table"',
'schema."table.something': '"schema"."""table"."something"',
# Embedded dots
'"schema.test"."table.test"': '"schema.test"."table.test"',
'"schema.".table': '"schema."."table"',
'"schema."."table"': '"schema."."table"',
'schema.".table"': '"schema".".table"',
'"schema".".table"': '"schema".".table"',
'"schema.".".table"': '"schema.".".table"',
# These are valid but maybe not what the user intended
'."table"': '".""table"""',
'table.': '"table."',
}
INVALID = {
('test.too.many.dots', 'table'): 'PostgreSQL does not support table with more than 3 dots',
('"test.too".many.dots', 'database'): 'PostgreSQL does not support database with more than 1 dots',
('test.too."many.dots"', 'database'): 'PostgreSQL does not support database with more than 1 dots',
('"test"."too"."many"."dots"', 'database'): "PostgreSQL does not support database with more than 1 dots",
('"test"."too"."many"."dots"', 'schema'): "PostgreSQL does not support schema with more than 2 dots",
('"test"."too"."many"."dots"', 'table'): "PostgreSQL does not support table with more than 3 dots",
('"test"."too"."many"."dots"."for"."column"', 'column'): "PostgreSQL does not support column with more than 4 dots",
('"table "invalid" double quote"', 'table'): 'User escaped identifiers must escape extra quotes',
('"schema "invalid"""."table "invalid"', 'table'): 'User escaped identifiers must escape extra quotes',
('"schema."table"', 'table'): 'User escaped identifiers must escape extra quotes',
('"schema".', 'table'): 'Identifier name unspecified or unquoted trailing dot',
}
HOW_MANY_DOTS = (
('role', 'role', '"role"',
'PostgreSQL does not support role with more than 1 dots'),
('db', 'database', '"db"',
'PostgreSQL does not support database with more than 1 dots'),
('db.schema', 'schema', '"db"."schema"',
'PostgreSQL does not support schema with more than 2 dots'),
('db.schema.table', 'table', '"db"."schema"."table"',
'PostgreSQL does not support table with more than 3 dots'),
('db.schema.table.column', 'column', '"db"."schema"."table"."column"',
'PostgreSQL does not support column with more than 4 dots'),
)
VALID_QUOTES = ((test, VALID[test]) for test in sorted(VALID))
INVALID_QUOTES = ((test[0], test[1], INVALID[test]) for test in sorted(INVALID))
@pytest.mark.parametrize("identifier, quoted_identifier", VALID_QUOTES)
def test_valid_quotes(identifier, quoted_identifier):
assert pg_quote_identifier(identifier, 'table') == quoted_identifier
@pytest.mark.parametrize("identifier, id_type, msg", INVALID_QUOTES)
def test_invalid_quotes(identifier, id_type, msg):
with pytest.raises(SQLParseError) as ex:
pg_quote_identifier(identifier, id_type)
ex.match(msg)
@pytest.mark.parametrize("identifier, id_type, quoted_identifier, msg", HOW_MANY_DOTS)
def test_how_many_dots(identifier, id_type, quoted_identifier, msg):
assert pg_quote_identifier(identifier, id_type) == quoted_identifier
with pytest.raises(SQLParseError) as ex:
pg_quote_identifier('%s.more' % identifier, id_type)
ex.match(msg)
| gpl-3.0 |
macborowy/dajsiepoznac-feed | DajSiePoznacFeed-Server/tests/workers_tests/test_participant_worker.py | 1 | 2277 | import unittest
import mock
from crawler.src.controllers.worker import ParticipantsWorker
import webapp2
import webtest
from google.appengine.ext import testbed
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
class ParticipantsWorkerTests(unittest.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_taskqueue_stub()
self.taskqueue_stub = self.testbed.get_stub(
testbed.TASKQUEUE_SERVICE_NAME)
def tearDown(self):
self.taskqueue_stub.Shutdown()
self.testbed.deactivate()
def test_participants_should_enqueue_GetAllParticipantsTask(self):
worker = ParticipantsWorker()
worker._enqueue()
tasks = self.taskqueue_stub.get_filtered_tasks()
self.assertTrue(len(tasks) == 1)
@mock.patch("crawler.src.scrapper.blogs.getAllParticipants")
def test_GetAllParticipantsTask_should_spawn_multiple_child_jobs(self, mock_method):
mock_method.return_value = [
{"blog_url": "http://mborowy.com", "author": "Maciej Borowy"},
{"blog_url": "http://google.com", "author": "Google"}
]
parent_task = ParticipantsWorker.GetAllParticipantsTask()
parent_task._start(None)
tasks = self.taskqueue_stub.get_filtered_tasks()
self.assertTrue(len(tasks) == 2)
class GetAllParticipantsTaskTest(unittest.TestCase):
@mock.patch("crawler.src.scrapper.blogs.getAllParticipants")
def test_task_runs_getAllParticipants_method_from_scrapper_module(self, mock_method):
task = ParticipantsWorker.GetAllParticipantsTask()
task._task(None)
self.assertTrue(mock_method.called == 1)
class GetFeedUrlTaskTests(unittest.TestCase):
def setUp(self):
app = webapp2.WSGIApplication([("/", ParticipantsWorker.GetFeedUrlTask)])
self.test_app = webtest.TestApp(app)
@mock.patch("crawler.src.scrapper.blogs.getFeedUrl")
def test_blogs_getFeedUrl_is_called_with_params_from_request(self, mock_getFeedUrl):
params = {"blog_url": "http://mborowy.com", "author": "Maciej Borowy"}
self.test_app.post("/", params=params)
mock_getFeedUrl.assert_called_with(participant=params)
| mit |
NGSchool2016/ngschool2016-materials | src/snpEff/scripts/gsa/checkGeneNames.py | 2 | 4985 | #!/usr/bin/env python
import sys
# Debug mode?
debug = False
#------------------------------------------------------------------------------
# Read genes file
#------------------------------------------------------------------------------
def readGenes(genesFile):
print >> sys.stderr, "Reading file " + genesFile
genes2new = {}
genes2old = {}
id2nameNew = {}
id2nameOld = {}
for line in open(genesFile) :
fields = line.rstrip().split("\t")
if debug: print fields
geneId, nameOld = fields[0], fields[1]
nameNew = ''
if len(fields) > 2: nameNew = fields[2]
if nameNew:
genes2new[nameOld] = nameNew
id2nameNew[id] = nameNew
if nameOld:
genes2old[nameNew] = nameOld
id2nameOld[id] = nameOld
return genes2new, genes2old, id2nameNew, id2nameOld
#------------------------------------------------------------------------------
# Read HGNC file: gene names, previous names and synonyms.
#------------------------------------------------------------------------------
def readHgcn(hgncFile):
print >> sys.stderr, "Reading file " + hgncFile
genesHgcn = {}
for line in open(hgncFile) :
fields = line.rstrip().split("\t")
if len(fields) < 8: continue
geneName, prevName, synonyms = fields[1], fields[6], fields[8]
if debug: print "{}\t|{}|\t|{}|".format(geneName, prevName, synonyms)
# Add all 'previous names'
for g in prevName.split(",") :
alias = g.strip()
if alias:
if alias in genesHgcn:
print >> sys.stderr, "Error: Alias '{}' already exists ( {} vs {} )!".format( alias, genesHgcn[alias], geneName )
else :
genesHgcn[alias] = geneName
if debug: print "\tPrev: |{}|".format( alias )
# Add all 'synonyms'
for g in synonyms.split(",") :
alias = g.strip()
if alias:
if alias in genesHgcn:
print >> sys.stderr, "Error: Alias '{}' already exists ( {} vs {} )!".format( alias, genesHgcn[alias], geneName )
else :
genesHgcn[alias] = geneName
if debug: print "\tSyn: |{}|".format( alias )
return genesHgcn
#------------------------------------------------------------------------------
# Find gene
#------------------------------------------------------------------------------
#def findGeneName(g, genes2new, genes2old, genesHgcn):
def findGeneName(g):
# Gene name found, no need to find a new name
if isValid(g, genes2new): return g
# Try translating the name using 'genes2old' dictionary
geneOld = genes2old.get(g, "")
if isValid(geneOld, genes2new): return geneOld
# Try an alias
geneHgcn = genesHgcn.get(g, "")
if isValid(geneHgcn, genes2new): return geneHgcn
# We have an alias, but it was not valid.
if geneHgcn:
# Try to find an 'old' name for the alias
geneNew = genes2old.get(geneHgcn, "")
if isValid(geneNew, genes2new): return geneNew
# Desperate attempt: Find a gene that matches
for gn in genes2new:
if gn.startswith(g): return gn
for gn in genes2old:
if gn.startswith(g): return genes2old[gn]
return ""
# Valid gene name (not empty and is in 'genes' dictionary)
def isValid(gname, genes):
if gname and (gname in genes): return True
return False
#------------------------------------------------------------------------------
# Main
#------------------------------------------------------------------------------
#---
# Parse command line
#---
if len(sys.argv) != 3:
print >> sys.stderr, "Usage: " + sys.argv[0] + " hgnc_complete_set.txt genes.list"
sys.exit(1)
hgncFile = sys.argv[1] # This argument is a Hugo File. Note: You can download the latest version from ftp://ftp.ebi.ac.uk/pub/databases/genenames/hgnc_complete_set.txt.gz
genesFile = sys.argv[2] # This is a "geneId \t geneName" list created from a GTF file
# Read files
genes2new, genes2old, id2nameNew, id2nameOld = readGenes(genesFile)
genesHgcn = readHgcn(hgncFile)
#---
# Read all lines from STDIN
# Note: This is counter intuitive because we are trying to
# replace 'new' names with 'old' names (and not the
# other way arround which is what you'd expect)
#---
for line in sys.stdin:
f = line.rstrip().split('\t')
geneSet = f[0]
genesNames = f[2:]
# Check that each gene has a valid geneID
missing = ""
missingCount = 0
foundAlias = 0
out = "{}\t{}".format(geneSet, f[1]);
for g in genesNames :
geneOld = findGeneName(g)
if not geneOld:
# No valid replacement found
missing += "\t\t'{}'\n".format(g)
missingCount += 1
elif g != geneOld:
# Replacement found
missingCount += 1
foundAlias += 1
missing += "\t\t'{}'\t->\t'{}'\n".format(g, geneOld)
# Add only if there is a gene name (skip if no replacement has been found)
if geneOld : out += "\t" + geneOld
# Show line (names have been replaced)
print out
if missingCount > 0 :
total = (len(f) - 2)
missingPerc = 100.0 * missingCount / total
print >> sys.stderr, "{}\n\tMissing : {} ( {:.1f}% )\n\tTotal : {}\n\tReplaced: {}\n\tGenes ( -> Replacement ) :\n{}".format(geneSet, missingCount, missingPerc, total, foundAlias, missing)
| gpl-3.0 |
slohse/ansible | lib/ansible/modules/cloud/azure/azure_rm_appgateway.py | 4 | 35537 | #!/usr/bin/python
#
# Copyright (c) 2017 Zim Kalinowski, <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_appgateway
version_added: "2.7"
short_description: Manage Application Gateway instance.
description:
- Create, update and delete instance of Application Gateway.
options:
resource_group:
description:
- The name of the resource group.
required: True
name:
description:
- The name of the application gateway.
required: True
location:
description:
- Resource location. If not set, location from the resource group will be used as default.
sku:
description:
- SKU of the application gateway resource.
suboptions:
name:
description:
- Name of an application gateway SKU.
choices:
- 'standard_small'
- 'standard_medium'
- 'standard_large'
- 'waf_medium'
- 'waf_large'
tier:
description:
- Tier of an application gateway.
choices:
- 'standard'
- 'waf'
capacity:
description:
- Capacity (instance count) of an application gateway.
ssl_policy:
description:
- SSL policy of the application gateway resource.
suboptions:
disabled_ssl_protocols:
description:
- List of SSL protocols to be disabled on application gateway.
choices:
- 'tls_v1_0'
- 'tls_v1_1'
- 'tls_v1_2'
policy_type:
description:
- Type of SSL Policy.
choices:
- 'predefined'
- 'custom'
policy_name:
description:
- Name of Ssl C(predefined) policy.
choices:
- 'ssl_policy20150501'
- 'ssl_policy20170401'
- 'ssl_policy20170401_s'
cipher_suites:
description:
- List of SSL cipher suites to be enabled in the specified order to application gateway.
choices:
- tls_ecdhe_rsa_with_aes_256_gcm_sha384
- tls_ecdhe_rsa_with_aes_128_gcm_sha256
- tls_ecdhe_rsa_with_aes_256_cbc_sha384
- tls_ecdhe_rsa_with_aes_128_cbc_sha256
- tls_ecdhe_rsa_with_aes_256_cbc_sha
- tls_ecdhe_rsa_with_aes_128_cbc_sha
- tls_dhe_rsa_with_aes_256_gcm_sha384
- tls_dhe_rsa_with_aes_128_gcm_sha256
- tls_dhe_rsa_with_aes_256_cbc_sha
- tls_dhe_rsa_with_aes_128_cbc_sha
- tls_rsa_with_aes_256_gcm_sha384
- tls_rsa_with_aes_128_gcm_sha256
- tls_rsa_with_aes_256_cbc_sha256
- tls_rsa_with_aes_128_cbc_sha256
- tls_rsa_with_aes_256_cbc_sha
- tls_rsa_with_aes_128_cbc_sha
- tls_ecdhe_ecdsa_with_aes_256_gcm_sha384
- tls_ecdhe_ecdsa_with_aes_128_gcm_sha256
- tls_ecdhe_ecdsa_with_aes_256_cbc_sha384
- tls_ecdhe_ecdsa_with_aes_128_cbc_sha256
- tls_ecdhe_ecdsa_with_aes_256_cbc_sha
- tls_ecdhe_ecdsa_with_aes_128_cbc_sha
- tls_dhe_dss_with_aes_256_cbc_sha256
- tls_dhe_dss_with_aes_128_cbc_sha256
- tls_dhe_dss_with_aes_256_cbc_sha
- tls_dhe_dss_with_aes_128_cbc_sha
- tls_rsa_with_3des_ede_cbc_sha
- tls_dhe_dss_with_3des_ede_cbc_sha
min_protocol_version:
description:
- Minimum version of Ssl protocol to be supported on application gateway.
choices:
- 'tls_v1_0'
- 'tls_v1_1'
- 'tls_v1_2'
gateway_ip_configurations:
description:
- List of subnets used by the application gateway.
suboptions:
subnet:
description:
- Reference of the subnet resource. A subnet from where application gateway gets its private address.
name:
description:
- Name of the resource that is unique within a resource group. This name can be used to access the resource.
authentication_certificates:
description:
- Authentication certificates of the application gateway resource.
suboptions:
data:
description:
- Certificate public data - base64 encoded pfx
name:
description:
- Name of the resource that is unique within a resource group. This name can be used to access the resource.
ssl_certificates:
description:
- SSL certificates of the application gateway resource.
suboptions:
data:
description:
- Base-64 encoded pfx certificate.
password:
description:
- Password for the pfx file specified in I(data).
name:
description:
- Name of the resource that is unique within a resource group. This name can be used to access the resource.
frontend_ip_configurations:
description:
- Frontend IP addresses of the application gateway resource.
suboptions:
private_ip_address:
description:
- PrivateIPAddress of the network interface IP Configuration.
private_ip_allocation_method:
description:
- PrivateIP allocation method.
choices:
- 'static'
- 'dynamic'
subnet:
description:
- Reference of the subnet resource.
public_ip_address:
description:
- Reference of the PublicIP resource.
name:
description:
- Name of the resource that is unique within a resource group. This name can be used to access the resource.
frontend_ports:
description:
- List of frontend ports of the application gateway resource.
suboptions:
port:
description:
- Frontend port
name:
description:
- Name of the resource that is unique within a resource group. This name can be used to access the resource.
backend_address_pools:
description:
- List of backend address pool of the application gateway resource.
suboptions:
backend_addresses:
description:
- List of backend addresses
suboptions:
fqdn:
description:
- Fully qualified domain name (FQDN).
ip_address:
description:
- IP address
name:
description:
- Resource that is unique within a resource group. This name can be used to access the resource.
backend_http_settings_collection:
description:
- Backend http settings of the application gateway resource.
suboptions:
port:
description:
- Port
protocol:
description:
- Protocol.
choices:
- 'http'
- 'https'
cookie_based_affinity:
description:
- Cookie based affinity.
choices:
- 'enabled'
- 'disabled'
request_timeout:
description:
- "Request timeout in seconds. Application Gateway will fail the request if response is not received within RequestTimeout. Acceptable va
lues are from 1 second to 86400 seconds."
authentication_certificates:
description:
- List of references to application gateway authentication certificates.
suboptions:
id:
description:
- Resource ID.
host_name:
description:
- Host header to be sent to the backend servers.
pick_host_name_from_backend_address:
description:
- Whether to pick host header should be picked from the host name of the backend server. Default value is false.
affinity_cookie_name:
description:
- Cookie name to use for the affinity cookie.
path:
description:
- Path which should be used as a prefix for all C(http) requests. Null means no path will be prefixed. Default value is null.
name:
description:
- Name of the resource that is unique within a resource group. This name can be used to access the resource.
http_listeners:
description:
- List of HTTP listeners of the application gateway resource.
suboptions:
frontend_ip_configuration:
description:
- Frontend IP configuration resource of an application gateway.
frontend_port:
description:
- Frontend port resource of an application gateway.
protocol:
description:
- Protocol.
choices:
- 'http'
- 'https'
host_name:
description:
- Host name of C(http) listener.
ssl_certificate:
description:
- SSL certificate resource of an application gateway.
require_server_name_indication:
description:
- Applicable only if I(protocol) is C(https). Enables SNI for multi-hosting.
name:
description:
- Name of the resource that is unique within a resource group. This name can be used to access the resource.
request_routing_rules:
description:
- List of request routing rules of the application gateway resource.
suboptions:
rule_type:
description:
- Rule I(type).
choices:
- 'basic'
- 'path_based_routing'
backend_address_pool:
description:
- Backend address pool resource of the application gateway.
backend_http_settings:
description:
- Frontend port resource of the application gateway.
http_listener:
description:
- Http listener resource of the application gateway.
name:
description:
- Name of the resource that is unique within a resource group. This name can be used to access the resource.
state:
description:
- Assert the state of the Public IP. Use 'present' to create or update a and
'absent' to delete.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Zim Kalinowski (@zikalino)"
'''
EXAMPLES = '''
- name: Create instance of Application Gateway
azure_rm_appgateway:
resource_group: myresourcegroup
name: myappgateway
sku:
name: standard_small
tier: standard
capacity: 2
gateway_ip_configurations:
- subnet:
id: "{{ subnet_id }}"
name: app_gateway_ip_config
frontend_ip_configurations:
- subnet:
id: "{{ subnet_id }}"
name: sample_gateway_frontend_ip_config
frontend_ports:
- port: 90
name: ag_frontend_port
backend_address_pools:
- backend_addresses:
- ip_address: 10.0.0.4
name: test_backend_address_pool
backend_http_settings_collection:
- port: 80
protocol: http
cookie_based_affinity: enabled
name: sample_appgateway_http_settings
http_listeners:
- frontend_ip_configuration: sample_gateway_frontend_ip_config
frontend_port: ag_frontend_port
name: sample_http_listener
request_routing_rules:
- rule_type: Basic
backend_address_pool: test_backend_address_pool
backend_http_settings: sample_appgateway_http_settings
http_listener: sample_http_listener
name: rule1
'''
RETURN = '''
id:
description:
- Resource ID.
returned: always
type: str
sample: id
'''
import time
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
from copy import deepcopy
from ansible.module_utils.network.common.utils import dict_merge
from ansible.module_utils.common.dict_transformations import (
camel_dict_to_snake_dict, snake_dict_to_camel_dict,
_camel_to_snake, _snake_to_camel,
)
try:
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
from azure.mgmt.network import NetworkManagementClient
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class Actions:
NoAction, Create, Update, Delete = range(4)
ssl_policy_spec = dict(
disabled_ssl_protocols=dict(type='list'),
policy_type=dict(type='str', choices=['predefined', 'custom']),
policy_name=dict(type='str', choices=['ssl_policy20150501', 'ssl_policy20170401', 'ssl_policy20170401_s']),
cipher_suites=dict(type='list'),
min_protocol_version=dict(type='str', choices=['tls_v1_0', 'tls_v1_1', 'tls_v1_2'])
)
class AzureRMApplicationGateways(AzureRMModuleBase):
"""Configuration class for an Azure RM Application Gateway resource"""
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
location=dict(
type='str'
),
sku=dict(
type='dict'
),
ssl_policy=dict(
type='dict',
options=ssl_policy_spec
),
gateway_ip_configurations=dict(
type='list'
),
authentication_certificates=dict(
type='list'
),
ssl_certificates=dict(
type='list'
),
frontend_ip_configurations=dict(
type='list'
),
frontend_ports=dict(
type='list'
),
backend_address_pools=dict(
type='list'
),
backend_http_settings_collection=dict(
type='list'
),
http_listeners=dict(
type='list'
),
request_routing_rules=dict(
type='list'
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self.resource_group = None
self.name = None
self.parameters = dict()
self.results = dict(changed=False)
self.mgmt_client = None
self.state = None
self.to_do = Actions.NoAction
super(AzureRMApplicationGateways, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=True)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()) + ['tags']:
if hasattr(self, key):
setattr(self, key, kwargs[key])
elif kwargs[key] is not None:
if key == "id":
self.parameters["id"] = kwargs[key]
elif key == "location":
self.parameters["location"] = kwargs[key]
elif key == "sku":
ev = kwargs[key]
if 'name' in ev:
if ev['name'] == 'standard_small':
ev['name'] = 'Standard_Small'
elif ev['name'] == 'standard_medium':
ev['name'] = 'Standard_Medium'
elif ev['name'] == 'standard_large':
ev['name'] = 'Standard_Large'
elif ev['name'] == 'waf_medium':
ev['name'] = 'WAF_Medium'
elif ev['name'] == 'waf_large':
ev['name'] = 'WAF_Large'
if 'tier' in ev:
if ev['tier'] == 'standard':
ev['tier'] = 'Standard'
elif ev['tier'] == 'waf':
ev['tier'] = 'WAF'
self.parameters["sku"] = ev
elif key == "ssl_policy":
ev = kwargs[key]
if 'policy_type' in ev:
ev['policy_type'] = _snake_to_camel(ev['policy_type'], True)
if 'policy_name' in ev:
if ev['policy_name'] == 'ssl_policy20150501':
ev['policy_name'] = 'AppGwSslPolicy20150501'
elif ev['policy_name'] == 'ssl_policy20170401':
ev['policy_name'] = 'AppGwSslPolicy20170401'
elif ev['policy_name'] == 'ssl_policy20170401_s':
ev['policy_name'] = 'AppGwSslPolicy20170401S'
if 'min_protocol_version' in ev:
if ev['min_protocol_version'] == 'tls_v1_0':
ev['min_protocol_version'] = 'TLSv1_0'
elif ev['min_protocol_version'] == 'tls_v1_1':
ev['min_protocol_version'] = 'TLSv1_1'
elif ev['min_protocol_version'] == 'tls_v1_2':
ev['min_protocol_version'] = 'TLSv1_2'
if 'disabled_ssl_protocols' in ev:
protocols = ev['disabled_ssl_protocols']
if protocols is not None:
for i in range(len(protocols)):
if protocols[i] == 'tls_v1_0':
protocols[i] = 'TLSv1_0'
elif protocols[i] == 'tls_v1_1':
protocols[i] = 'TLSv1_1'
elif protocols[i] == 'tls_v1_2':
protocols[i] = 'TLSv1_2'
if 'cipher_suites' in ev:
suites = ev['cipher_suites']
if suites is not None:
for i in range(len(suites)):
suites[i] = suites[i].upper()
elif key == "gateway_ip_configurations":
self.parameters["gateway_ip_configurations"] = kwargs[key]
elif key == "authentication_certificates":
self.parameters["authentication_certificates"] = kwargs[key]
elif key == "ssl_certificates":
self.parameters["ssl_certificates"] = kwargs[key]
elif key == "frontend_ip_configurations":
ev = kwargs[key]
for i in range(len(ev)):
item = ev[i]
if 'private_ip_allocation_method' in item:
item['private_ip_allocation_method'] = _snake_to_camel(item['private_ip_allocation_method'], True)
if 'public_ip_address' in item:
id = public_ip_id(self.subscription_id,
kwargs['resource_group'],
item['public_ip_address'])
item['public_ip_address'] = {'id': id}
self.parameters["frontend_ip_configurations"] = ev
elif key == "frontend_ports":
self.parameters["frontend_ports"] = kwargs[key]
elif key == "backend_address_pools":
self.parameters["backend_address_pools"] = kwargs[key]
elif key == "backend_http_settings_collection":
ev = kwargs[key]
for i in range(len(ev)):
item = ev[i]
if 'protocol' in item:
item['protocol'] = _snake_to_camel(item['protocol'], True)
if 'cookie_based_affinity' in item:
item['cookie_based_affinity'] = _snake_to_camel(item['cookie_based_affinity'], True)
self.parameters["backend_http_settings_collection"] = ev
elif key == "http_listeners":
ev = kwargs[key]
for i in range(len(ev)):
item = ev[i]
if 'frontend_ip_configuration' in item:
id = frontend_ip_configuration_id(self.subscription_id,
kwargs['resource_group'],
kwargs['name'],
item['frontend_ip_configuration'])
item['frontend_ip_configuration'] = {'id': id}
if 'frontend_port' in item:
id = frontend_port_id(self.subscription_id,
kwargs['resource_group'],
kwargs['name'],
item['frontend_port'])
item['frontend_port'] = {'id': id}
if 'ssl_certificate' in item:
id = ssl_certificate_id(self.subscription_id,
kwargs['resource_group'],
kwargs['name'],
item['ssl_certificate'])
item['ssl_certificate'] = {'id': id}
if 'protocol' in item:
item['protocol'] = _snake_to_camel(item['protocol'], True)
ev[i] = item
self.parameters["http_listeners"] = ev
elif key == "request_routing_rules":
ev = kwargs[key]
for i in range(len(ev)):
item = ev[i]
if 'backend_address_pool' in item:
id = backend_address_pool_id(self.subscription_id,
kwargs['resource_group'],
kwargs['name'],
item['backend_address_pool'])
item['backend_address_pool'] = {'id': id}
if 'backend_http_settings' in item:
id = backend_http_settings_id(self.subscription_id,
kwargs['resource_group'],
kwargs['name'],
item['backend_http_settings'])
item['backend_http_settings'] = {'id': id}
if 'http_listener' in item:
id = http_listener_id(self.subscription_id,
kwargs['resource_group'],
kwargs['name'],
item['http_listener'])
item['http_listener'] = {'id': id}
if 'protocol' in item:
item['protocol'] = _snake_to_camel(item['protocol'], True)
if 'rule_type' in ev:
item['rule_type'] = _snake_to_camel(item['rule_type'], True)
ev[i] = item
self.parameters["request_routing_rules"] = ev
elif key == "etag":
self.parameters["etag"] = kwargs[key]
old_response = None
response = None
self.mgmt_client = self.get_mgmt_svc_client(NetworkManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager)
resource_group = self.get_resource_group(self.resource_group)
if "location" not in self.parameters:
self.parameters["location"] = resource_group.location
old_response = self.get_applicationgateway()
if not old_response:
self.log("Application Gateway instance doesn't exist")
if self.state == 'absent':
self.log("Old instance didn't exist")
else:
self.to_do = Actions.Create
else:
self.log("Application Gateway instance already exists")
if self.state == 'absent':
self.to_do = Actions.Delete
elif self.state == 'present':
self.log("Need to check if Application Gateway instance has to be deleted or may be updated")
self.to_do = Actions.Update
if (self.to_do == Actions.Update):
if (self.parameters['location'] != old_response['location'] or
self.parameters['sku']['name'] != old_response['sku']['name'] or
self.parameters['sku']['tier'] != old_response['sku']['tier'] or
self.parameters['sku']['capacity'] != old_response['sku']['capacity'] or
not compare_arrays(old_response, self.parameters, 'authentication_certificates') or
not compare_arrays(old_response, self.parameters, 'gateway_ip_configurations') or
not compare_arrays(old_response, self.parameters, 'frontend_ip_configurations') or
not compare_arrays(old_response, self.parameters, 'frontend_ports') or
not compare_arrays(old_response, self.parameters, 'backend_address_pools') or
not compare_arrays(old_response, self.parameters, 'backend_http_settings_collection') or
not compare_arrays(old_response, self.parameters, 'request_routing_rules') or
not compare_arrays(old_response, self.parameters, 'http_listeners')):
self.to_do = Actions.Update
else:
self.to_do = Actions.NoAction
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
self.log("Need to Create / Update the Application Gateway instance")
if self.check_mode:
self.results['changed'] = True
self.results["parameters"] = self.parameters
return self.results
response = self.create_update_applicationgateway()
if not old_response:
self.results['changed'] = True
else:
self.results['changed'] = old_response.__ne__(response)
self.log("Creation / Update done")
elif self.to_do == Actions.Delete:
self.log("Application Gateway instance deleted")
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_applicationgateway()
# make sure instance is actually deleted, for some Azure resources, instance is hanging around
# for some time after deletion -- this should be really fixed in Azure
while self.get_applicationgateway():
time.sleep(20)
else:
self.log("Application Gateway instance unchanged")
self.results['changed'] = False
response = old_response
if response:
self.results["id"] = response["id"]
return self.results
def create_update_applicationgateway(self):
'''
Creates or updates Application Gateway with the specified configuration.
:return: deserialized Application Gateway instance state dictionary
'''
self.log("Creating / Updating the Application Gateway instance {0}".format(self.name))
try:
response = self.mgmt_client.application_gateways.create_or_update(resource_group_name=self.resource_group,
application_gateway_name=self.name,
parameters=self.parameters)
if isinstance(response, AzureOperationPoller):
response = self.get_poller_result(response)
except CloudError as exc:
self.log('Error attempting to create the Application Gateway instance.')
self.fail("Error creating the Application Gateway instance: {0}".format(str(exc)))
return response.as_dict()
def delete_applicationgateway(self):
'''
Deletes specified Application Gateway instance in the specified subscription and resource group.
:return: True
'''
self.log("Deleting the Application Gateway instance {0}".format(self.name))
try:
response = self.mgmt_client.application_gateways.delete(resource_group_name=self.resource_group,
application_gateway_name=self.name)
except CloudError as e:
self.log('Error attempting to delete the Application Gateway instance.')
self.fail("Error deleting the Application Gateway instance: {0}".format(str(e)))
return True
def get_applicationgateway(self):
'''
Gets the properties of the specified Application Gateway.
:return: deserialized Application Gateway instance state dictionary
'''
self.log("Checking if the Application Gateway instance {0} is present".format(self.name))
found = False
try:
response = self.mgmt_client.application_gateways.get(resource_group_name=self.resource_group,
application_gateway_name=self.name)
found = True
self.log("Response : {0}".format(response))
self.log("Application Gateway instance : {0} found".format(response.name))
except CloudError as e:
self.log('Did not find the Application Gateway instance.')
if found is True:
return response.as_dict()
return False
def public_ip_id(subscription_id, resource_group_name, name):
"""Generate the id for a frontend ip configuration"""
return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/publicIPAddresses/{2}'.format(
subscription_id,
resource_group_name,
name
)
def frontend_ip_configuration_id(subscription_id, resource_group_name, appgateway_name, name):
"""Generate the id for a frontend ip configuration"""
return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/applicationGateways/{2}/frontendIPConfigurations/{3}'.format(
subscription_id,
resource_group_name,
appgateway_name,
name
)
def frontend_port_id(subscription_id, resource_group_name, appgateway_name, name):
"""Generate the id for a frontend port"""
return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/applicationGateways/{2}/frontendPorts/{3}'.format(
subscription_id,
resource_group_name,
appgateway_name,
name
)
def ssl_certificate_id(subscription_id, resource_group_name, ssl_certificate_name, name):
"""Generate the id for a frontend port"""
return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/applicationGateways/{2}/sslCertificates/{3}'.format(
subscription_id,
resource_group_name,
ssl_certificate_name,
name
)
def backend_address_pool_id(subscription_id, resource_group_name, appgateway_name, name):
"""Generate the id for an address pool"""
return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/applicationGateways/{2}/backendAddressPools/{3}'.format(
subscription_id,
resource_group_name,
appgateway_name,
name
)
def backend_http_settings_id(subscription_id, resource_group_name, appgateway_name, name):
"""Generate the id for a http settings"""
return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/applicationGateways/{2}/backendHttpSettingsCollection/{3}'.format(
subscription_id,
resource_group_name,
appgateway_name,
name
)
def http_listener_id(subscription_id, resource_group_name, appgateway_name, name):
"""Generate the id for a http listener"""
return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/applicationGateways/{2}/httpListeners/{3}'.format(
subscription_id,
resource_group_name,
appgateway_name,
name
)
def compare_arrays(old_params, new_params, param_name):
old = old_params.get(param_name) or []
new = new_params.get(param_name) or []
oldd = {}
for item in old:
name = item['name']
oldd[name] = item
newd = {}
for item in new:
name = item['name']
newd[name] = item
newd = dict_merge(oldd, newd)
return newd == oldd
def main():
"""Main execution"""
AzureRMApplicationGateways()
if __name__ == '__main__':
main()
| gpl-3.0 |
Tithen-Firion/youtube-dl | youtube_dl/extractor/ondemandkorea.py | 62 | 2036 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
ExtractorError,
js_to_json,
)
class OnDemandKoreaIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?ondemandkorea\.com/(?P<id>[^/]+)\.html'
_GEO_COUNTRIES = ['US', 'CA']
_TEST = {
'url': 'http://www.ondemandkorea.com/ask-us-anything-e43.html',
'info_dict': {
'id': 'ask-us-anything-e43',
'ext': 'mp4',
'title': 'Ask Us Anything : E43',
'thumbnail': r're:^https?://.*\.jpg$',
},
'params': {
'skip_download': 'm3u8 download'
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id, fatal=False)
if not webpage:
# Page sometimes returns captcha page with HTTP 403
raise ExtractorError(
'Unable to access page. You may have been blocked.',
expected=True)
if 'msg_block_01.png' in webpage:
self.raise_geo_restricted(
msg='This content is not available in your region',
countries=self._GEO_COUNTRIES)
if 'This video is only available to ODK PLUS members.' in webpage:
raise ExtractorError(
'This video is only available to ODK PLUS members.',
expected=True)
title = self._og_search_title(webpage)
jw_config = self._parse_json(
self._search_regex(
r'(?s)jwplayer\(([\'"])(?:(?!\1).)+\1\)\.setup\s*\((?P<options>.+?)\);',
webpage, 'jw config', group='options'),
video_id, transform_source=js_to_json)
info = self._parse_jwplayer_data(
jw_config, video_id, require_title=False, m3u8_id='hls',
base_url=url)
info.update({
'title': title,
'thumbnail': self._og_search_thumbnail(webpage),
})
return info
| unlicense |
t-neumann/slamdunk | bin/_preamble.py | 1 | 1062 | # Copyright (c) 2015 Tobias Neumann, Philipp Rescheneder.
#
# This file is part of Slamdunk.
#
# Slamdunk is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Slamdunk is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys, os
path = os.path.abspath(sys.argv[0])
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, 'slamdunk', '__init__.py')):
#sys.path.insert(0, os.path.join(path, 'slamdunk'))
sys.path.insert(0, path)
break
path = os.path.dirname(path)
| agpl-3.0 |
jmartinezchaine/OpenERP | openerp/addons/base/test/test_ir_cron.py | 15 | 5020 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from dateutil.relativedelta import relativedelta
import openerp
JOB = {
'function': u'_0_seconds',
'interval_type': u'minutes',
'user_id': 1,
'name': u'test',
'args': False,
'numbercall': 1,
'nextcall': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'priority': 5,
'doall': True,
'active': True,
'interval_number': 1,
'model': u'ir.cron'
}
class test_ir_cron(openerp.osv.osv.osv):
""" Add a few handy methods to test cron jobs scheduling. """
_inherit = "ir.cron"
def _0_seconds(a, b, c):
print ">>> _0_seconds"
def _20_seconds(self, cr, uid):
print ">>> in _20_seconds"
time.sleep(20)
print ">>> out _20_seconds"
def _80_seconds(self, cr, uid):
print ">>> in _80_seconds"
time.sleep(80)
print ">>> out _80_seconds"
def test_0(self, cr, uid):
now = datetime.now()
t1 = (now + relativedelta(minutes=1)).strftime('%Y-%m-%d %H:%M:%S')
t2 = (now + relativedelta(minutes=1, seconds=5)).strftime('%Y-%m-%d %H:%M:%S')
t3 = (now + relativedelta(minutes=1, seconds=10)).strftime('%Y-%m-%d %H:%M:%S')
self.create(cr, uid, dict(JOB, name='test_0 _20_seconds A', function='_20_seconds', nextcall=t1))
self.create(cr, uid, dict(JOB, name='test_0 _20_seconds B', function='_20_seconds', nextcall=t2))
self.create(cr, uid, dict(JOB, name='test_0 _20_seconds C', function='_20_seconds', nextcall=t3))
def test_1(self, cr, uid):
now = datetime.now()
t1 = (now + relativedelta(minutes=1)).strftime('%Y-%m-%d %H:%M:%S')
self.create(cr, uid, dict(JOB, name='test_1 _20_seconds * 3', function='_20_seconds', nextcall=t1, numbercall=3))
def test_2(self, cr, uid):
now = datetime.now()
t1 = (now + relativedelta(minutes=1)).strftime('%Y-%m-%d %H:%M:%S')
self.create(cr, uid, dict(JOB, name='test_2 _80_seconds * 2', function='_80_seconds', nextcall=t1, numbercall=2))
def test_3(self, cr, uid):
now = datetime.now()
t1 = (now + relativedelta(minutes=1)).strftime('%Y-%m-%d %H:%M:%S')
t2 = (now + relativedelta(minutes=1, seconds=5)).strftime('%Y-%m-%d %H:%M:%S')
t3 = (now + relativedelta(minutes=1, seconds=10)).strftime('%Y-%m-%d %H:%M:%S')
self.create(cr, uid, dict(JOB, name='test_3 _80_seconds A', function='_80_seconds', nextcall=t1))
self.create(cr, uid, dict(JOB, name='test_3 _20_seconds B', function='_20_seconds', nextcall=t2))
self.create(cr, uid, dict(JOB, name='test_3 _20_seconds C', function='_20_seconds', nextcall=t3))
# This test assumes 4 cron threads.
def test_00(self, cr, uid):
self.test_00_set = set()
now = datetime.now()
t1 = (now + relativedelta(minutes=1)).strftime('%Y-%m-%d %H:%M:%S')
t2 = (now + relativedelta(minutes=1, seconds=5)).strftime('%Y-%m-%d %H:%M:%S')
t3 = (now + relativedelta(minutes=1, seconds=10)).strftime('%Y-%m-%d %H:%M:%S')
self.create(cr, uid, dict(JOB, name='test_00 _20_seconds_A', function='_20_seconds_A', nextcall=t1))
self.create(cr, uid, dict(JOB, name='test_00 _20_seconds_B', function='_20_seconds_B', nextcall=t2))
self.create(cr, uid, dict(JOB, name='test_00 _20_seconds_C', function='_20_seconds_C', nextcall=t3))
def _expect(self, cr, uid, to_add, to_sleep, to_expect_in, to_expect_out):
assert self.test_00_set == to_expect_in
self.test_00_set.add(to_add)
time.sleep(to_sleep)
self.test_00_set.discard(to_add)
assert self.test_00_set == to_expect_out
def _20_seconds_A(self, cr, uid):
self._expect(cr, uid, 'A', 20, set(), set(['B', 'C']))
def _20_seconds_B(self, cr, uid):
self._expect(cr, uid, 'B', 20, set('A'), set('C'))
def _20_seconds_C(self, cr, uid):
self._expect(cr, uid, 'C', 20, set(['A', 'B']), set())
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
zooba/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/pip/_vendor/html5lib/_trie/py.py | 1323 | 1775 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from bisect import bisect_left
from ._base import Trie as ABCTrie
class Trie(ABCTrie):
def __init__(self, data):
if not all(isinstance(x, text_type) for x in data.keys()):
raise TypeError("All keys must be strings")
self._data = data
self._keys = sorted(data.keys())
self._cachestr = ""
self._cachepoints = (0, len(data))
def __contains__(self, key):
return key in self._data
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
def __getitem__(self, key):
return self._data[key]
def keys(self, prefix=None):
if prefix is None or prefix == "" or not self._keys:
return set(self._keys)
if prefix.startswith(self._cachestr):
lo, hi = self._cachepoints
start = i = bisect_left(self._keys, prefix, lo, hi)
else:
start = i = bisect_left(self._keys, prefix)
keys = set()
if start == len(self._keys):
return keys
while self._keys[i].startswith(prefix):
keys.add(self._keys[i])
i += 1
self._cachestr = prefix
self._cachepoints = (start, i)
return keys
def has_keys_with_prefix(self, prefix):
if prefix in self._data:
return True
if prefix.startswith(self._cachestr):
lo, hi = self._cachepoints
i = bisect_left(self._keys, prefix, lo, hi)
else:
i = bisect_left(self._keys, prefix)
if i == len(self._keys):
return False
return self._keys[i].startswith(prefix)
| apache-2.0 |
rafaeltomesouza/frontend-class1 | aula2/a11/linkedin/client/.gradle/nodejs/node-v7.5.0-darwin-x64/lib/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/easy_xml_test.py | 2698 | 3270 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Unit tests for the easy_xml.py file. """
import gyp.easy_xml as easy_xml
import unittest
import StringIO
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.stderr = StringIO.StringIO()
def test_EasyXml_simple(self):
self.assertEqual(
easy_xml.XmlToString(['test']),
'<?xml version="1.0" encoding="utf-8"?><test/>')
self.assertEqual(
easy_xml.XmlToString(['test'], encoding='Windows-1252'),
'<?xml version="1.0" encoding="Windows-1252"?><test/>')
def test_EasyXml_simple_with_attributes(self):
self.assertEqual(
easy_xml.XmlToString(['test2', {'a': 'value1', 'b': 'value2'}]),
'<?xml version="1.0" encoding="utf-8"?><test2 a="value1" b="value2"/>')
def test_EasyXml_escaping(self):
original = '<test>\'"\r&\nfoo'
converted = '<test>\'"
&
foo'
converted_apos = converted.replace("'", ''')
self.assertEqual(
easy_xml.XmlToString(['test3', {'a': original}, original]),
'<?xml version="1.0" encoding="utf-8"?><test3 a="%s">%s</test3>' %
(converted, converted_apos))
def test_EasyXml_pretty(self):
self.assertEqual(
easy_xml.XmlToString(
['test3',
['GrandParent',
['Parent1',
['Child']
],
['Parent2']
]
],
pretty=True),
'<?xml version="1.0" encoding="utf-8"?>\n'
'<test3>\n'
' <GrandParent>\n'
' <Parent1>\n'
' <Child/>\n'
' </Parent1>\n'
' <Parent2/>\n'
' </GrandParent>\n'
'</test3>\n')
def test_EasyXml_complex(self):
# We want to create:
target = (
'<?xml version="1.0" encoding="utf-8"?>'
'<Project>'
'<PropertyGroup Label="Globals">'
'<ProjectGuid>{D2250C20-3A94-4FB9-AF73-11BC5B73884B}</ProjectGuid>'
'<Keyword>Win32Proj</Keyword>'
'<RootNamespace>automated_ui_tests</RootNamespace>'
'</PropertyGroup>'
'<Import Project="$(VCTargetsPath)\\Microsoft.Cpp.props"/>'
'<PropertyGroup '
'Condition="\'$(Configuration)|$(Platform)\'=='
'\'Debug|Win32\'" Label="Configuration">'
'<ConfigurationType>Application</ConfigurationType>'
'<CharacterSet>Unicode</CharacterSet>'
'</PropertyGroup>'
'</Project>')
xml = easy_xml.XmlToString(
['Project',
['PropertyGroup', {'Label': 'Globals'},
['ProjectGuid', '{D2250C20-3A94-4FB9-AF73-11BC5B73884B}'],
['Keyword', 'Win32Proj'],
['RootNamespace', 'automated_ui_tests']
],
['Import', {'Project': '$(VCTargetsPath)\\Microsoft.Cpp.props'}],
['PropertyGroup',
{'Condition': "'$(Configuration)|$(Platform)'=='Debug|Win32'",
'Label': 'Configuration'},
['ConfigurationType', 'Application'],
['CharacterSet', 'Unicode']
]
])
self.assertEqual(xml, target)
if __name__ == '__main__':
unittest.main()
| mit |
pongem/python-bot-project | appengine/standard/botapp/env/lib/python2.7/site-packages/django/db/utils.py | 143 | 10368 | import os
import pkgutil
from importlib import import_module
from threading import local
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils import six
from django.utils._os import npath, upath
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
DEFAULT_DB_ALIAS = 'default'
DJANGO_VERSION_PICKLE_KEY = '_django_version'
class Error(Exception if six.PY3 else StandardError): # NOQA: StandardError undefined on PY3
pass
class InterfaceError(Error):
pass
class DatabaseError(Error):
pass
class DataError(DatabaseError):
pass
class OperationalError(DatabaseError):
pass
class IntegrityError(DatabaseError):
pass
class InternalError(DatabaseError):
pass
class ProgrammingError(DatabaseError):
pass
class NotSupportedError(DatabaseError):
pass
class DatabaseErrorWrapper(object):
"""
Context manager and decorator that re-throws backend-specific database
exceptions using Django's common wrappers.
"""
def __init__(self, wrapper):
"""
wrapper is a database wrapper.
It must have a Database attribute defining PEP-249 exceptions.
"""
self.wrapper = wrapper
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
return
for dj_exc_type in (
DataError,
OperationalError,
IntegrityError,
InternalError,
ProgrammingError,
NotSupportedError,
DatabaseError,
InterfaceError,
Error,
):
db_exc_type = getattr(self.wrapper.Database, dj_exc_type.__name__)
if issubclass(exc_type, db_exc_type):
dj_exc_value = dj_exc_type(*exc_value.args)
dj_exc_value.__cause__ = exc_value
if not hasattr(exc_value, '__traceback__'):
exc_value.__traceback__ = traceback
# Only set the 'errors_occurred' flag for errors that may make
# the connection unusable.
if dj_exc_type not in (DataError, IntegrityError):
self.wrapper.errors_occurred = True
six.reraise(dj_exc_type, dj_exc_value, traceback)
def __call__(self, func):
# Note that we are intentionally not using @wraps here for performance
# reasons. Refs #21109.
def inner(*args, **kwargs):
with self:
return func(*args, **kwargs)
return inner
def load_backend(backend_name):
"""
Return a database backend's "base" module given a fully qualified database
backend name, or raise an error if it doesn't exist.
"""
# This backend was renamed in Django 1.9.
if backend_name == 'django.db.backends.postgresql_psycopg2':
backend_name = 'django.db.backends.postgresql'
try:
return import_module('%s.base' % backend_name)
except ImportError as e_user:
# The database backend wasn't found. Display a helpful error message
# listing all possible (built-in) database backends.
backend_dir = os.path.join(os.path.dirname(upath(__file__)), 'backends')
try:
builtin_backends = [
name for _, name, ispkg in pkgutil.iter_modules([npath(backend_dir)])
if ispkg and name not in {'base', 'dummy', 'postgresql_psycopg2'}
]
except EnvironmentError:
builtin_backends = []
if backend_name not in ['django.db.backends.%s' % b for b in
builtin_backends]:
backend_reprs = map(repr, sorted(builtin_backends))
error_msg = ("%r isn't an available database backend.\n"
"Try using 'django.db.backends.XXX', where XXX "
"is one of:\n %s\nError was: %s" %
(backend_name, ", ".join(backend_reprs), e_user))
raise ImproperlyConfigured(error_msg)
else:
# If there's some other error, this must be an error in Django
raise
class ConnectionDoesNotExist(Exception):
pass
class ConnectionHandler(object):
def __init__(self, databases=None):
"""
databases is an optional dictionary of database definitions (structured
like settings.DATABASES).
"""
self._databases = databases
self._connections = local()
@cached_property
def databases(self):
if self._databases is None:
self._databases = settings.DATABASES
if self._databases == {}:
self._databases = {
DEFAULT_DB_ALIAS: {
'ENGINE': 'django.db.backends.dummy',
},
}
if self._databases[DEFAULT_DB_ALIAS] == {}:
self._databases[DEFAULT_DB_ALIAS]['ENGINE'] = 'django.db.backends.dummy'
if DEFAULT_DB_ALIAS not in self._databases:
raise ImproperlyConfigured("You must define a '%s' database" % DEFAULT_DB_ALIAS)
return self._databases
def ensure_defaults(self, alias):
"""
Puts the defaults into the settings dictionary for a given connection
where no settings is provided.
"""
try:
conn = self.databases[alias]
except KeyError:
raise ConnectionDoesNotExist("The connection %s doesn't exist" % alias)
conn.setdefault('ATOMIC_REQUESTS', False)
conn.setdefault('AUTOCOMMIT', True)
conn.setdefault('ENGINE', 'django.db.backends.dummy')
if conn['ENGINE'] == 'django.db.backends.' or not conn['ENGINE']:
conn['ENGINE'] = 'django.db.backends.dummy'
conn.setdefault('CONN_MAX_AGE', 0)
conn.setdefault('OPTIONS', {})
conn.setdefault('TIME_ZONE', None)
for setting in ['NAME', 'USER', 'PASSWORD', 'HOST', 'PORT']:
conn.setdefault(setting, '')
def prepare_test_settings(self, alias):
"""
Makes sure the test settings are available in the 'TEST' sub-dictionary.
"""
try:
conn = self.databases[alias]
except KeyError:
raise ConnectionDoesNotExist("The connection %s doesn't exist" % alias)
test_settings = conn.setdefault('TEST', {})
for key in ['CHARSET', 'COLLATION', 'NAME', 'MIRROR']:
test_settings.setdefault(key, None)
def __getitem__(self, alias):
if hasattr(self._connections, alias):
return getattr(self._connections, alias)
self.ensure_defaults(alias)
self.prepare_test_settings(alias)
db = self.databases[alias]
backend = load_backend(db['ENGINE'])
conn = backend.DatabaseWrapper(db, alias)
setattr(self._connections, alias, conn)
return conn
def __setitem__(self, key, value):
setattr(self._connections, key, value)
def __delitem__(self, key):
delattr(self._connections, key)
def __iter__(self):
return iter(self.databases)
def all(self):
return [self[alias] for alias in self]
def close_all(self):
for alias in self:
try:
connection = getattr(self._connections, alias)
except AttributeError:
continue
connection.close()
class ConnectionRouter(object):
def __init__(self, routers=None):
"""
If routers is not specified, will default to settings.DATABASE_ROUTERS.
"""
self._routers = routers
@cached_property
def routers(self):
if self._routers is None:
self._routers = settings.DATABASE_ROUTERS
routers = []
for r in self._routers:
if isinstance(r, six.string_types):
router = import_string(r)()
else:
router = r
routers.append(router)
return routers
def _router_func(action):
def _route_db(self, model, **hints):
chosen_db = None
for router in self.routers:
try:
method = getattr(router, action)
except AttributeError:
# If the router doesn't have a method, skip to the next one.
pass
else:
chosen_db = method(model, **hints)
if chosen_db:
return chosen_db
instance = hints.get('instance')
if instance is not None and instance._state.db:
return instance._state.db
return DEFAULT_DB_ALIAS
return _route_db
db_for_read = _router_func('db_for_read')
db_for_write = _router_func('db_for_write')
def allow_relation(self, obj1, obj2, **hints):
for router in self.routers:
try:
method = router.allow_relation
except AttributeError:
# If the router doesn't have a method, skip to the next one.
pass
else:
allow = method(obj1, obj2, **hints)
if allow is not None:
return allow
return obj1._state.db == obj2._state.db
def allow_migrate(self, db, app_label, **hints):
for router in self.routers:
try:
method = router.allow_migrate
except AttributeError:
# If the router doesn't have a method, skip to the next one.
continue
allow = method(db, app_label, **hints)
if allow is not None:
return allow
return True
def allow_migrate_model(self, db, model):
return self.allow_migrate(
db,
model._meta.app_label,
model_name=model._meta.model_name,
model=model,
)
def get_migratable_models(self, app_config, db, include_auto_created=False):
"""
Return app models allowed to be synchronized on provided db.
"""
models = app_config.get_models(include_auto_created=include_auto_created)
return [model for model in models if self.allow_migrate_model(db, model)]
| apache-2.0 |
dd00/commandergenius | project/jni/python/src/Demo/scripts/lpwatch.py | 32 | 3200 | #! /usr/bin/env python
# Watch line printer queue(s).
# Intended for BSD 4.3 lpq.
import posix
import sys
import time
import string
DEF_PRINTER = 'psc'
DEF_DELAY = 10
def main():
delay = DEF_DELAY # XXX Use getopt() later
try:
thisuser = posix.environ['LOGNAME']
except:
thisuser = posix.environ['USER']
printers = sys.argv[1:]
if printers:
# Strip '-P' from printer names just in case
# the user specified it...
for i in range(len(printers)):
if printers[i][:2] == '-P':
printers[i] = printers[i][2:]
else:
if posix.environ.has_key('PRINTER'):
printers = [posix.environ['PRINTER']]
else:
printers = [DEF_PRINTER]
#
clearhome = posix.popen('clear', 'r').read()
#
while 1:
text = clearhome
for name in printers:
text = text + makestatus(name, thisuser) + '\n'
print text
time.sleep(delay)
def makestatus(name, thisuser):
pipe = posix.popen('lpq -P' + name + ' 2>&1', 'r')
lines = []
users = {}
aheadbytes = 0
aheadjobs = 0
userseen = 0
totalbytes = 0
totaljobs = 0
while 1:
line = pipe.readline()
if not line: break
fields = string.split(line)
n = len(fields)
if len(fields) >= 6 and fields[n-1] == 'bytes':
rank = fields[0]
user = fields[1]
job = fields[2]
files = fields[3:-2]
bytes = eval(fields[n-2])
if user == thisuser:
userseen = 1
elif not userseen:
aheadbytes = aheadbytes + bytes
aheadjobs = aheadjobs + 1
totalbytes = totalbytes + bytes
totaljobs = totaljobs + 1
if users.has_key(user):
ujobs, ubytes = users[user]
else:
ujobs, ubytes = 0, 0
ujobs = ujobs + 1
ubytes = ubytes + bytes
users[user] = ujobs, ubytes
else:
if fields and fields[0] <> 'Rank':
line = string.strip(line)
if line == 'no entries':
line = name + ': idle'
elif line[-22:] == ' is ready and printing':
line = name
lines.append(line)
#
if totaljobs:
line = '%d K' % ((totalbytes+1023)//1024)
if totaljobs <> len(users):
line = line + ' (%d jobs)' % totaljobs
if len(users) == 1:
line = line + ' for %s' % (users.keys()[0],)
else:
line = line + ' for %d users' % len(users)
if userseen:
if aheadjobs == 0:
line = line + ' (%s first)' % thisuser
else:
line = line + ' (%d K before %s)' % (
(aheadbytes+1023)//1024, thisuser)
lines.append(line)
#
sts = pipe.close()
if sts:
lines.append('lpq exit status %r' % (sts,))
return string.joinfields(lines, ': ')
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| lgpl-2.1 |
victoredwardocallaghan/xen | tools/python/xen/util/pci.py | 25 | 50406 | #!/usr/bin/env python
#
# PCI Device Information Class
# - Helps obtain information about which I/O resources a PCI device needs
#
# Author: Ryan Wilson <[email protected]>
import sys
import os, os.path
import errno
import resource
import re
import types
import struct
import time
import threading
from xen.util import utils
from xen.xend import uuid
from xen.xend import sxp
from xen.xend.XendConstants import AUTO_PHP_SLOT
from xen.xend.XendSXPDev import dev_dict_to_sxp
from xen.xend.XendLogging import log
# for 2.3 compatibility
try:
set()
except NameError:
from sets import Set as set
PROC_PCI_PATH = '/proc/bus/pci/devices'
PROC_PCI_NUM_RESOURCES = 7
SYSFS_PCI_DEVS_PATH = '/bus/pci/devices'
SYSFS_PCI_DEV_RESOURCE_PATH = '/resource'
SYSFS_PCI_DEV_CONFIG_PATH = '/config'
SYSFS_PCI_DEV_IRQ_PATH = '/irq'
SYSFS_PCI_DEV_DRIVER_DIR_PATH = '/driver'
SYSFS_PCI_DEV_VENDOR_PATH = '/vendor'
SYSFS_PCI_DEV_DEVICE_PATH = '/device'
SYSFS_PCI_DEV_SUBVENDOR_PATH = '/subsystem_vendor'
SYSFS_PCI_DEV_SUBDEVICE_PATH = '/subsystem_device'
SYSFS_PCI_DEV_CLASS_PATH = '/class'
SYSFS_PCIBACK_PATH = '/bus/pci/drivers/pciback/'
SYSFS_PCISTUB_PATH = '/bus/pci/drivers/pci-stub/'
LSPCI_CMD = 'lspci'
PCI_DEV_REG_EXPRESS_STR = r"[0-9a-fA-F]{4}:[0-9a-fA-F]{2}:[0-9a-fA-F]{2}."+ \
r"[0-9a-fA-F]{1}"
DEV_TYPE_PCIe_ENDPOINT = 0
DEV_TYPE_PCIe_BRIDGE = 1
DEV_TYPE_PCI_BRIDGE = 2
DEV_TYPE_PCI = 3
PCI_VENDOR_ID = 0x0
PCI_STATUS = 0x6
PCI_CLASS_DEVICE = 0x0a
PCI_CLASS_BRIDGE_PCI = 0x0604
PCI_HEADER_TYPE = 0x0e
PCI_HEADER_TYPE_MASK = 0x7f
PCI_HEADER_TYPE_NORMAL = 0
PCI_HEADER_TYPE_BRIDGE = 1
PCI_HEADER_TYPE_CARDBUS = 2
PCI_CAPABILITY_LIST = 0x34
PCI_CB_BRIDGE_CONTROL = 0x3e
PCI_BRIDGE_CTL_BUS_RESET= 0x40
PCI_CAP_ID_EXP = 0x10
PCI_EXP_FLAGS = 0x2
PCI_EXP_FLAGS_TYPE = 0x00f0
PCI_EXP_TYPE_DOWNSTREAM = 0x6
PCI_EXP_TYPE_PCI_BRIDGE = 0x7
PCI_EXP_DEVCAP = 0x4
PCI_EXP_DEVCAP_FLR = (0x1 << 28)
PCI_EXP_DEVCTL = 0x8
PCI_EXP_DEVCTL_FLR = (0x1 << 15)
PCI_EXT_CAP_ID_ACS = 0x000d
PCI_EXT_CAP_ACS_ENABLED = 0x1d # The bits V, R, C, U.
PCI_EXT_ACS_CTRL = 0x06
PCI_CAP_ID_PM = 0x01
PCI_PM_CTRL = 4
PCI_PM_CTRL_NO_SOFT_RESET = 0x0008
PCI_PM_CTRL_STATE_MASK = 0x0003
PCI_D3hot = 3
PCI_D0hot = 0
VENDOR_INTEL = 0x8086
PCI_CAP_ID_VENDOR_SPECIFIC_CAP = 0x09
PCI_CLASS_ID_USB = 0x0c03
PCI_USB_FLRCTRL = 0x4
PCI_DEVICE_ID = 0x02
PCI_COMMAND = 0x04
PCI_CLASS_ID_VGA = 0x0300
PCI_DEVICE_ID_IGFX_GM45 = 0x2a42
PCI_DEVICE_ID_IGFX_EAGLELAKE = 0x2e02
PCI_DEVICE_ID_IGFX_Q45 = 0x2e12
PCI_DEVICE_ID_IGFX_G45 = 0x2e22
PCI_DEVICE_ID_IGFX_G41 = 0x2e32
PCI_CAP_IGFX_CAP09_OFFSET = 0xa4
PCI_CAP_IGFX_CAP13_OFFSET = 0xa4
PCI_CAP_IGFX_GDRST = 0X0d
PCI_CAP_IGFX_GDRST_OFFSET = 0xc0
# The VF of Intel 82599 10GbE Controller
# See http://download.intel.com/design/network/datashts/82599_datasheet.pdf
# For 'VF PCIe Configuration Space', see its Table 9.7.
DEVICE_ID_82599 = 0x10ed
PCI_CAP_ID_AF = 0x13
PCI_AF_CAPs = 0x3
PCI_AF_CAPs_TP_FLR = 0x3
PCI_AF_CTL = 0x4
PCI_AF_CTL_FLR = 0x1
PCI_BAR_0 = 0x10
PCI_BAR_5 = 0x24
PCI_BAR_SPACE = 0x01
PCI_BAR_IO = 0x01
PCI_BAR_IO_MASK = ~0x03
PCI_BAR_MEM = 0x00
PCI_BAR_MEM_MASK = ~0x0f
PCI_STATUS_CAP_MASK = 0x10
PCI_STATUS_OFFSET = 0x6
PCI_CAP_OFFSET = 0x34
MSIX_BIR_MASK = 0x7
MSIX_SIZE_MASK = 0x7ff
# Global variable to store information from lspci
lspci_info = None
lspci_info_lock = threading.RLock()
#Calculate PAGE_SHIFT: number of bits to shift an address to get the page number
PAGE_SIZE = resource.getpagesize()
PAGE_SHIFT = 0
t = PAGE_SIZE
while not (t&1):
t>>=1
PAGE_SHIFT+=1
PAGE_MASK=~(PAGE_SIZE - 1)
# Definitions from Linux: include/linux/pci.h
def PCI_DEVFN(slot, func):
return ((((slot) & 0x1f) << 3) | ((func) & 0x07))
def PCI_SLOT(devfn):
return (devfn >> 3) & 0x1f
def PCI_FUNC(devfn):
return devfn & 0x7
def PCI_BDF(domain, bus, slot, func):
return (((domain & 0xffff) << 16) | ((bus & 0xff) << 8) |
PCI_DEVFN(slot, func))
def check_pci_opts(opts):
def f((k, v)):
if k not in ['msitranslate', 'power_mgmt'] or \
not v.lower() in ['0', '1', 'yes', 'no']:
raise PciDeviceParseError('Invalid pci option %s=%s: ' % (k, v))
map(f, opts)
def serialise_pci_opts(opts):
return ','.join(map(lambda x: '='.join(x), opts))
def split_pci_opts(opts):
return map(lambda x: x.split('='),
filter(lambda x: x != '', opts.split(',')))
def append_default_pci_opts(opts, defopts):
optsdict = dict(opts)
return opts + filter(lambda (k, v): not optsdict.has_key(k), defopts)
def pci_opts_list_to_sxp(list):
return dev_dict_to_sxp({'opts': list})
def pci_opts_list_from_sxp(dev):
return map(lambda x: sxp.children(x)[0], sxp.children(dev, 'opts'))
def pci_convert_dict_to_sxp(dev, state, sub_state = None):
pci_sxp = ['pci', dev_dict_to_sxp(dev), ['state', state]]
if sub_state != None:
pci_sxp.append(['sub_state', sub_state])
return pci_sxp
def pci_convert_sxp_to_dict(dev_sxp):
"""Convert pci device sxp to dict
@param dev_sxp: device configuration
@type dev_sxp: SXP object (parsed config)
@return: dev_config
@rtype: dictionary
"""
# Parsing the device SXP's. In most cases, the SXP looks
# like this:
#
# [device, [vif, [mac, xx:xx:xx:xx:xx:xx], [ip 1.3.4.5]]]
#
# However, for PCI devices it looks like this:
#
# [device, [pci, [dev, [domain, 0], [bus, 0], [slot, 1], [func, 2]]]
#
# It seems the reasoning for this difference is because
# pciif.py needs all the PCI device configurations at
# the same time when creating the devices.
#
# To further complicate matters, Xen 2.0 configuration format
# uses the following for pci device configuration:
#
# [device, [pci, [domain, 0], [bus, 0], [dev, 1], [func, 2]]]
# For PCI device hotplug support, the SXP of PCI devices is
# extendend like this:
#
# [device, [pci, [dev, [domain, 0], [bus, 0], [slot, 1], [func, 2],
# [vdevfn, 0]],
# [state, 'Initialising']]]
#
# 'vdevfn' shows the virtual hotplug slot number which the PCI device
# is inserted in. This is only effective for HVM domains.
#
# state 'Initialising' indicates that the device is being attached,
# while state 'Closing' indicates that the device is being detached.
#
# The Dict looks like this:
#
# { devs: [{domain: 0, bus: 0, slot: 1, func: 2, vdevfn: 0}],
# states: ['Initialising'] }
dev_config = {}
pci_devs = []
for pci_dev in sxp.children(dev_sxp, 'dev'):
pci_dev_info = dict(pci_dev[1:])
if 'opts' in pci_dev_info:
pci_dev_info['opts'] = pci_opts_list_from_sxp(pci_dev)
# If necessary, initialize uuid, key, and vdevfn for each pci device
if not pci_dev_info.has_key('uuid'):
pci_dev_info['uuid'] = uuid.createString()
if not pci_dev_info.has_key('key'):
pci_dev_info['key'] = "%02x:%02x.%x" % \
(int(pci_dev_info['bus'], 16),
int(pci_dev_info['slot'], 16),
int(pci_dev_info['func'], 16))
if not pci_dev_info.has_key('vdevfn'):
pci_dev_info['vdevfn'] = "0x%02x" % AUTO_PHP_SLOT
pci_devs.append(pci_dev_info)
dev_config['devs'] = pci_devs
pci_states = []
for pci_state in sxp.children(dev_sxp, 'state'):
try:
pci_states.append(pci_state[1])
except IndexError:
raise XendError("Error reading state while parsing pci sxp")
dev_config['states'] = pci_states
return dev_config
def parse_hex(val):
try:
if isinstance(val, types.StringTypes):
return int(val, 16)
else:
return val
except ValueError:
return None
AUTO_PHP_FUNC = 1
MANUAL_PHP_FUNC = 2
def parse_pci_pfunc_vfunc(func_str):
list = func_str.split('=')
l = len(list)
if l == 0 or l > 2:
raise PciDeviceParseError('Invalid function: ' + func_str)
p = int(list[0], 16)
if p < 0 or p > 7:
raise PciDeviceParseError('Invalid physical function in: ' + func_str)
if l == 1:
# This defaults to linear mapping of physical to virtual functions
return (p, p, AUTO_PHP_FUNC)
else:
v = int(list[1], 16)
if v < 0 or v > 7:
raise PciDeviceParseError('Invalid virtual function in: ' +
func_str)
return (p, v, MANUAL_PHP_FUNC)
def pci_func_range(start, end):
if end < start:
x = pci_func_range(end, start)
x.reverse()
return x
return range(start, end + 1)
def pci_pfunc_vfunc_range(orig, a, b):
phys = pci_func_range(a[0], b[0])
virt = pci_func_range(a[1], b[1])
if len(phys) != len(virt):
raise PciDeviceParseError('Invalid range in: ' + orig)
return map(lambda x: x + (MANUAL_PHP_FUNC,), zip(phys, virt))
def pci_func_list_map_fn(key, func_str):
if func_str == "*":
return map(lambda x: parse_pci_pfunc_vfunc(x['func']),
filter(lambda x:
pci_dict_cmp(x, key, ['domain', 'bus', 'slot']),
get_all_pci_dict()))
l = map(parse_pci_pfunc_vfunc, func_str.split("-"))
if len(l) == 1:
return l
if len(l) == 2:
return pci_pfunc_vfunc_range(func_str, l[0], l[1])
return []
def pci_func_list_process(pci_dev_str, template, func_str):
l = reduce(lambda x, y: x + y,
(map(lambda x: pci_func_list_map_fn(template, x),
func_str.split(","))))
phys = map(lambda x: x[0], l)
virt = map(lambda x: x[1], l)
if len(phys) != len(set(phys)) or len(virt) != len(set(virt)):
raise PciDeviceParseError("Duplicate functions: %s" % pci_dev_str)
return l
def parse_pci_name_extended(pci_dev_str):
pci_match = re.match(r"((?P<domain>[0-9a-fA-F]{1,4})[:,])?" +
r"(?P<bus>[0-9a-fA-F]{1,2})[:,]" +
r"(?P<slot>[0-9a-fA-F]{1,2})[.,]" +
r"(?P<func>(\*|[0-7]([,-=][0-7])*))" +
r"(@(?P<vdevfn>[01]?[0-9a-fA-F]))?" +
r"(,(?P<opts>.*))?$", pci_dev_str)
if pci_match == None:
raise PciDeviceParseError("Failed to parse pci device: %s" %
pci_dev_str)
pci_dev_info = pci_match.groupdict('')
template = {}
if pci_dev_info['domain'] != '':
domain = int(pci_dev_info['domain'], 16)
else:
domain = 0
template['domain'] = "0x%04x" % domain
template['bus'] = "0x%02x" % int(pci_dev_info['bus'], 16)
template['slot'] = "0x%02x" % int(pci_dev_info['slot'], 16)
template['key'] = pci_dev_str.split(',')[0]
if pci_dev_info['opts'] != '':
template['opts'] = split_pci_opts(pci_dev_info['opts'])
check_pci_opts(template['opts'])
# This is where virtual function assignment takes place
func_list = pci_func_list_process(pci_dev_str, template,
pci_dev_info['func'])
if len(func_list) == 0:
return []
# Set the virtual function of the numerically lowest physical function
# to zero if it has not been manually set
if not filter(lambda x: x[1] == 0, func_list):
auto = filter(lambda x: x[2] == AUTO_PHP_FUNC, func_list)
manual = filter(lambda x: x[2] == MANUAL_PHP_FUNC, func_list)
if not auto:
raise PciDeviceParseError('Virtual device does not include '
'virtual function 0: ' + pci_dev_str)
auto.sort(lambda x,y: cmp(x[1], y[1]))
auto[0] = (auto[0][0], 0, AUTO_PHP_FUNC)
func_list = auto + manual
# For pci attachment and detachment is it important that virtual
# function 0 is done last. This is because is virtual function 0 that
# is used to singnal changes to the guest using ACPI
func_list.sort(lambda x,y: cmp(PCI_FUNC(y[1]), PCI_FUNC(x[1])))
# Virtual slot assignment takes place here if specified in the bdf,
# else it is done inside qemu-xen, as it knows which slots are free
pci = []
for (pfunc, vfunc, auto) in func_list:
pci_dev = template.copy()
pci_dev['func'] = "0x%x" % pfunc
if pci_dev_info['vdevfn'] == '':
vdevfn = AUTO_PHP_SLOT | vfunc
else:
vdevfn = PCI_DEVFN(int(pci_dev_info['vdevfn'], 16), vfunc)
pci_dev['vdevfn'] = "0x%02x" % vdevfn
pci.append(pci_dev)
return pci
def parse_pci_name(pci_name_string):
dev = parse_pci_name_extended(pci_name_string)
if len(dev) != 1:
raise PciDeviceParseError(("Failed to parse pci device: %s: "
"multiple functions specified prohibited") %
pci_name_string)
pci = dev[0]
if not int(pci['vdevfn'], 16) & AUTO_PHP_SLOT:
raise PciDeviceParseError(("Failed to parse pci device: %s: " +
"vdevfn provided where prohibited: 0x%02x") %
(pci_name_string,
PCI_SLOT(int(pci['vdevfn'], 16))))
if 'opts' in pci:
raise PciDeviceParseError(("Failed to parse pci device: %s: " +
"options provided where prohibited: %s") %
(pci_name_string, pci['opts']))
return pci
def __pci_dict_to_fmt_str(fmt, dev):
return fmt % (int(dev['domain'], 16), int(dev['bus'], 16),
int(dev['slot'], 16), int(dev['func'], 16))
def pci_dict_to_bdf_str(dev):
return __pci_dict_to_fmt_str('%04x:%02x:%02x.%01x', dev)
def pci_dict_to_xc_str(dev):
return __pci_dict_to_fmt_str('0x%x, 0x%x, 0x%x, 0x%x', dev)
def pci_dict_cmp(a, b, keys=['domain', 'bus', 'slot', 'func']):
return reduce(lambda x, y: x and y,
map(lambda k: int(a[k], 16) == int(b[k], 16), keys))
def extract_the_exact_pci_names(pci_names):
result = []
if isinstance(pci_names, types.StringTypes):
pci_names = pci_names.split()
elif isinstance(pci_names, types.ListType):
pci_names = re.findall(PCI_DEV_REG_EXPRESS_STR, '%s' % pci_names)
else:
raise PciDeviceParseError('Invalid argument: %s' % pci_names)
for pci in pci_names:
# The length of DDDD:bb:dd.f is 12.
if len(pci) != 12:
continue
if re.match(PCI_DEV_REG_EXPRESS_STR, pci) is None:
continue
result = result + [pci]
return result
def find_sysfs_mnt():
try:
return utils.find_sysfs_mount()
except IOError, (errno, strerr):
raise PciDeviceParseError(('Failed to locate sysfs mount: %s: %s (%d)'%
(PROC_PCI_PATH, strerr, errno)))
return None
def get_all_pci_names():
if not sys.platform.startswith('linux'): return []
sysfs_mnt = find_sysfs_mnt()
if sysfs_mnt is None:
return None
pci_names = os.popen('ls ' + sysfs_mnt + SYSFS_PCI_DEVS_PATH).read().split()
return pci_names
def get_all_pci_dict():
return map(parse_pci_name, get_all_pci_names())
def get_all_pci_devices():
return map(PciDevice, get_all_pci_dict())
def _create_lspci_info():
"""Execute 'lspci' command and parse the result.
If the command does not exist, lspci_info will be kept blank ({}).
Expects to be protected by lspci_info_lock.
"""
global lspci_info
lspci_info = {}
for paragraph in os.popen(LSPCI_CMD + ' -vmm').read().split('\n\n'):
device_name = None
device_info = {}
# FIXME: workaround for pciutils without the -mm option.
# see: git://git.kernel.org/pub/scm/utils/pciutils/pciutils.git
# commit: 3fd6b4d2e2fda814047664ffc67448ac782a8089
first_device = True
for line in paragraph.split('\n'):
try:
(opt, value) = line.split(':\t')
if opt == 'Slot' or (opt == 'Device' and first_device):
device_name = pci_dict_to_bdf_str(parse_pci_name(value))
first_device = False
else:
device_info[opt] = value
except:
pass
if device_name is not None:
lspci_info[device_name] = device_info
def create_lspci_info():
global lspci_info_lock
lspci_info_lock.acquire()
try:
_create_lspci_info()
finally:
lspci_info_lock.release()
def save_pci_conf_space(devs_string):
pci_list = []
cfg_list = []
sysfs_mnt = find_sysfs_mnt()
for pci_str in devs_string:
pci_path = sysfs_mnt + SYSFS_PCI_DEVS_PATH + '/' + pci_str + \
SYSFS_PCI_DEV_CONFIG_PATH
fd = os.open(pci_path, os.O_RDONLY)
configs = []
for i in range(0, 256, 4):
configs = configs + [os.read(fd,4)]
os.close(fd)
pci_list = pci_list + [pci_path]
cfg_list = cfg_list + [configs]
return (pci_list, cfg_list)
def restore_pci_conf_space(pci_cfg_list):
time.sleep(1.0)
pci_list = pci_cfg_list[0]
cfg_list = pci_cfg_list[1]
for i in range(0, len(pci_list)):
pci_path = pci_list[i]
configs = cfg_list[i]
fd = os.open(pci_path, os.O_WRONLY)
for dw in configs:
os.write(fd, dw)
os.close(fd)
def find_all_assignable_devices():
''' devices owned by pcibak or pci-stub can be directly assigned to
guest with IOMMU (VT-d or AMD IOMMU), find all these devices.
'''
sysfs_mnt = find_sysfs_mnt()
pciback_path = sysfs_mnt + SYSFS_PCIBACK_PATH
pcistub_path = sysfs_mnt + SYSFS_PCISTUB_PATH
pci_names1 = os.popen('ls %s 2>/dev/null' % pciback_path).read()
pci_names2 = os.popen('ls %s 2>/dev/null' % pcistub_path).read()
if len(pci_names1) + len(pci_names2) == 0 :
return None
pci_list = extract_the_exact_pci_names(pci_names1)
pci_list = pci_list + extract_the_exact_pci_names(pci_names2)
dev_list = []
for pci in pci_list:
dev = PciDevice(parse_pci_name(pci))
dev_list = dev_list + [dev]
return dev_list
def transform_list(target, src):
''' src: its element is pci string (Format: xxxx:xx:xx.x).
target: its element is pci string, or a list of pci string.
If all the elements in src are in target, we remove them from target
and add src into target; otherwise, we remove from target all the
elements that also appear in src.
'''
result = []
target_contains_src = True
for e in src:
if not e in target:
target_contains_src = False
break
if target_contains_src:
result = result + [src]
for e in target:
if not e in src:
result = result + [e]
return result
def check_FLR_capability(dev_list):
if len(dev_list) == 0:
return []
pci_list = []
pci_dev_dict = {}
for dev in dev_list:
pci_list = pci_list + [dev.name]
pci_dev_dict[dev.name] = dev
while True:
need_transform = False
for pci in pci_list:
if isinstance(pci, types.StringTypes):
dev = pci_dev_dict[pci]
if dev.bus == 0:
continue
if dev.dev_type == DEV_TYPE_PCIe_ENDPOINT and not dev.pcie_flr:
coassigned_pci_list = dev.find_all_the_multi_functions()
need_transform = True
elif dev.dev_type == DEV_TYPE_PCI and not dev.pci_af_flr:
coassigned_pci_list = dev.find_coassigned_pci_devices(True)
del coassigned_pci_list[0]
need_transform = True
if need_transform:
pci_list = transform_list(pci_list, coassigned_pci_list)
if not need_transform:
break
if len(pci_list) == 0:
return []
for i in range(0, len(pci_list)):
if isinstance(pci_list[i], types.StringTypes):
pci_list[i] = [pci_list[i]]
# Now every element in pci_list is a list of pci string.
result = []
for pci_names in pci_list:
devs = []
for pci in pci_names:
devs = devs + [pci_dev_dict[pci]]
result = result + [devs]
return result
def check_mmio_bar(devs_list):
result = []
for dev_list in devs_list:
non_aligned_bar_found = False
for dev in dev_list:
if dev.has_non_page_aligned_bar:
non_aligned_bar_found = True
break
if not non_aligned_bar_found:
result = result + [dev_list]
return result
class PciDeviceParseError(Exception):
def __init__(self,msg):
self.message = msg
def __str__(self):
return self.message
class PciDeviceAssignmentError(Exception):
def __init__(self,msg):
self.message = msg
def __str__(self):
return 'pci: improper device assignment specified: ' + \
self.message
class PciDeviceVslotMissing(Exception):
def __init__(self,msg):
self.message = msg
def __str__(self):
return 'pci: no vslot: ' + self.message
class PciDevice:
def __init__(self, dev):
self.domain = int(dev['domain'], 16)
self.bus = int(dev['bus'], 16)
self.slot = int(dev['slot'], 16)
self.func = int(dev['func'], 16)
self.name = pci_dict_to_bdf_str(dev)
self.cfg_space_path = find_sysfs_mnt()+SYSFS_PCI_DEVS_PATH+'/'+ \
self.name + SYSFS_PCI_DEV_CONFIG_PATH
self.irq = 0
self.iomem = []
self.ioports = []
self.driver = None
self.vendor = None
self.device = None
self.subvendor = None
self.subdevice = None
self.msix = 0
self.msix_iomem = []
self.revision = 0
self.classcode = None
self.vendorname = ""
self.devicename = ""
self.classname = ""
self.subvendorname = ""
self.subdevicename = ""
self.dev_type = None
self.is_downstream_port = False
self.acs_enabled = False
self.has_non_page_aligned_bar = False
self.pcie_flr = False
self.pci_af_flr = False
self.detect_dev_info()
if (self.dev_type == DEV_TYPE_PCI_BRIDGE) or \
(self.dev_type == DEV_TYPE_PCIe_BRIDGE):
return
self.get_info_from_sysfs()
self.get_info_from_lspci()
def find_parent(self):
# i.e., /sys/bus/pci/devices/0000:00:19.0 or
# /sys/bus/pci/devices/0000:03:04.0
path = find_sysfs_mnt()+SYSFS_PCI_DEVS_PATH+'/'+ self.name
# i.e., ../../../devices/pci0000:00/0000:00:19.0
# ../../../devices/pci0000:00/0000:00:02.0/0000:01:00.2/0000:03:04.0
try:
target = os.readlink(path)
lst = target.split('/')
parent = lst[len(lst)-2]
if parent[0:3] == 'pci':
# We have reached the upmost one.
return None
return parse_pci_name(parent)
except OSError, (errno, strerr):
raise PciDeviceParseError('Can not locate the parent of %s',
self.name)
def find_the_uppermost_pci_bridge(self):
# Find the uppermost PCI/PCI-X bridge
dev = self.find_parent()
if dev is None:
return None
dev = dev_parent = PciDevice(dev)
while dev_parent.dev_type != DEV_TYPE_PCIe_BRIDGE:
parent = dev_parent.find_parent()
if parent is None:
break
dev = dev_parent
dev_parent = PciDevice(parent)
return dev
def find_all_devices_behind_the_bridge(self, ignore_bridge):
sysfs_mnt = find_sysfs_mnt()
self_path = sysfs_mnt + SYSFS_PCI_DEVS_PATH + '/' + self.name
pci_names = os.popen('ls ' + self_path).read()
dev_list = extract_the_exact_pci_names(pci_names)
list = [self.name]
for pci_str in dev_list:
dev = PciDevice(parse_pci_name(pci_str))
if dev.dev_type == DEV_TYPE_PCI_BRIDGE or \
dev.dev_type == DEV_TYPE_PCIe_BRIDGE:
sub_list_including_self = \
dev.find_all_devices_behind_the_bridge(ignore_bridge)
if ignore_bridge:
del sub_list_including_self[0]
list = list + [sub_list_including_self]
else:
list = list + [dev.name]
return list
def find_coassigned_pci_devices(self, ignore_bridge = True):
''' Here'self' is a PCI device, we need find the uppermost PCI/PCI-X
bridge, and all devices behind it must be co-assigned to the same
guest.
Parameter:
[ignore_bridge]: if set, the returned result doesn't include
any bridge behind the uppermost PCI/PCI-X bridge.
Note: The first element of the return value is the uppermost
PCI/PCI-X bridge. If the caller doesn't need the first
element, the caller itself can remove it explicitly.
'''
dev = self.find_the_uppermost_pci_bridge()
# The 'self' device is on bus0.
if dev is None:
return [self.name]
dev_list = dev.find_all_devices_behind_the_bridge(ignore_bridge)
dev_list = extract_the_exact_pci_names(dev_list)
return dev_list
def do_secondary_bus_reset(self, target_bus, devs):
# Save the config spaces of all the devices behind the bus.
(pci_list, cfg_list) = save_pci_conf_space(devs)
#Do the Secondary Bus Reset
sysfs_mnt = find_sysfs_mnt()
parent_path = sysfs_mnt + SYSFS_PCI_DEVS_PATH + '/' + \
target_bus + SYSFS_PCI_DEV_CONFIG_PATH
fd = os.open(parent_path, os.O_RDWR)
os.lseek(fd, PCI_CB_BRIDGE_CONTROL, 0)
br_cntl = (struct.unpack('H', os.read(fd, 2)))[0]
# Assert Secondary Bus Reset
os.lseek(fd, PCI_CB_BRIDGE_CONTROL, 0)
br_cntl |= PCI_BRIDGE_CTL_BUS_RESET
os.write(fd, struct.pack('H', br_cntl))
time.sleep(0.100)
# De-assert Secondary Bus Reset
os.lseek(fd, PCI_CB_BRIDGE_CONTROL, 0)
br_cntl &= ~PCI_BRIDGE_CTL_BUS_RESET
os.write(fd, struct.pack('H', br_cntl))
time.sleep(0.100)
os.close(fd)
# Restore the config spaces
restore_pci_conf_space((pci_list, cfg_list))
def do_Dstate_transition(self):
pos = self.find_cap_offset(PCI_CAP_ID_PM)
if pos == 0:
return False
# No_Soft_Reset - When set 1, this bit indicates that
# devices transitioning from D3hot to D0 because of
# PowerState commands do not perform an internal reset.
pm_ctl = self.pci_conf_read32(pos + PCI_PM_CTRL)
if (pm_ctl & PCI_PM_CTRL_NO_SOFT_RESET) == PCI_PM_CTRL_NO_SOFT_RESET:
return False
(pci_list, cfg_list) = save_pci_conf_space([self.name])
# Enter D3hot
pm_ctl &= ~PCI_PM_CTRL_STATE_MASK
pm_ctl |= PCI_D3hot
self.pci_conf_write32(pos + PCI_PM_CTRL, pm_ctl)
time.sleep(0.010)
# From D3hot to D0
pm_ctl &= ~PCI_PM_CTRL_STATE_MASK
pm_ctl |= PCI_D0hot
self.pci_conf_write32(pos + PCI_PM_CTRL, pm_ctl)
time.sleep(0.010)
restore_pci_conf_space((pci_list, cfg_list))
return True
def do_vendor_specific_FLR_method(self):
pos = self.find_cap_offset(PCI_CAP_ID_VENDOR_SPECIFIC_CAP)
if pos == 0:
return
vendor_id = self.pci_conf_read16(PCI_VENDOR_ID)
if vendor_id != VENDOR_INTEL:
return
class_id = self.pci_conf_read16(PCI_CLASS_DEVICE)
if class_id != PCI_CLASS_ID_USB:
return
(pci_list, cfg_list) = save_pci_conf_space([self.name])
self.pci_conf_write8(pos + PCI_USB_FLRCTRL, 1)
time.sleep(0.100)
restore_pci_conf_space((pci_list, cfg_list))
def do_FLR_for_integrated_device(self):
if not self.do_Dstate_transition():
self.do_vendor_specific_FLR_method()
def do_AF_FLR(self, af_pos):
''' use PCI Advanced Capability to do FLR
'''
(pci_list, cfg_list) = save_pci_conf_space([self.name])
self.pci_conf_write8(af_pos + PCI_AF_CTL, PCI_AF_CTL_FLR)
time.sleep(0.100)
restore_pci_conf_space((pci_list, cfg_list))
def do_FLR_for_intel_4Series_iGFX(self):
af_pos = PCI_CAP_IGFX_CAP13_OFFSET
self.do_AF_FLR(af_pos)
log.debug("Intel 4 Series iGFX FLR done")
def do_FLR_for_GM45_iGFX(self):
reg32 = self.pci_conf_read32(PCI_CAP_IGFX_CAP09_OFFSET)
if ((reg32 >> 16) & 0x000000FF) != 0x06 or \
((reg32 >> 24) & 0x000000F0) != 0x20:
return
self.pci_conf_write8(PCI_CAP_IGFX_GDRST_OFFSET, PCI_CAP_IGFX_GDRST)
for i in range(0, 10):
time.sleep(0.100)
reg8 = self.pci_conf_read8(PCI_CAP_IGFX_GDRST_OFFSET)
if (reg8 & 0x01) == 0:
break
if i == 10:
log.debug("Intel iGFX FLR fail on GM45")
return
# This specific reset will hang if the command register does not have
# memory space access enabled
cmd = self.pci_conf_read16(PCI_COMMAND)
self.pci_conf_write16(PCI_COMMAND, (cmd | 0x02))
af_pos = PCI_CAP_IGFX_CAP09_OFFSET
self.do_AF_FLR(af_pos)
self.pci_conf_write16(PCI_COMMAND, cmd)
log.debug("Intel iGFX FLR on GM45 done")
def find_all_the_multi_functions(self):
sysfs_mnt = find_sysfs_mnt()
parentdict = self.find_parent()
if parentdict is None :
return [ self.name ]
parent = pci_dict_to_bdf_str(parentdict)
pci_names = os.popen('ls ' + sysfs_mnt + SYSFS_PCI_DEVS_PATH + '/' + \
parent + '/').read()
funcs = extract_the_exact_pci_names(pci_names)
return funcs
def find_coassigned_devices(self):
if self.dev_type == DEV_TYPE_PCIe_ENDPOINT and not self.pcie_flr:
return self.find_all_the_multi_functions()
elif self.dev_type == DEV_TYPE_PCI and not self.pci_af_flr:
coassigned_pci_list = self.find_coassigned_pci_devices(True)
if len(coassigned_pci_list) > 1:
del coassigned_pci_list[0]
return coassigned_pci_list
else:
return [self.name]
def find_cap_offset(self, cap):
path = find_sysfs_mnt()+SYSFS_PCI_DEVS_PATH+'/'+ \
self.name+SYSFS_PCI_DEV_CONFIG_PATH
pos = PCI_CAPABILITY_LIST
try:
fd = None
fd = os.open(path, os.O_RDONLY)
os.lseek(fd, PCI_STATUS, 0)
status = struct.unpack('H', os.read(fd, 2))[0]
if (status & 0x10) == 0:
os.close(fd)
# The device doesn't support PCI_STATUS_CAP_LIST
return 0
max_cap = 48
while max_cap > 0:
os.lseek(fd, pos, 0)
pos = ord(os.read(fd, 1))
if pos < 0x40:
pos = 0
break;
os.lseek(fd, pos + 0, 0)
id = ord(os.read(fd, 1))
if id == 0xff:
pos = 0
break;
# Found the capability
if id == cap:
break;
# Test the next one
pos = pos + 1
max_cap = max_cap - 1;
os.close(fd)
except OSError, (errno, strerr):
if fd is not None:
os.close(fd)
raise PciDeviceParseError(('Error when accessing sysfs: %s (%d)' %
(strerr, errno)))
return pos
def find_ext_cap(self, cap):
path = find_sysfs_mnt()+SYSFS_PCI_DEVS_PATH+'/'+ \
self.name+SYSFS_PCI_DEV_CONFIG_PATH
ttl = 480; # 3840 bytes, minimum 8 bytes per capability
pos = 0x100
try:
fd = os.open(path, os.O_RDONLY)
os.lseek(fd, pos, 0)
h = os.read(fd, 4)
if len(h) == 0: # MMCONF is not enabled?
return 0
header = struct.unpack('I', h)[0]
if header == 0 or header == -1:
return 0
while ttl > 0:
if (header & 0x0000ffff) == cap:
return pos
pos = (header >> 20) & 0xffc
if pos < 0x100:
break
os.lseek(fd, pos, 0)
header = struct.unpack('I', os.read(fd, 4))[0]
ttl = ttl - 1
os.close(fd)
except OSError, (errno, strerr):
raise PciDeviceParseError(('Error when accessing sysfs: %s (%d)' %
(strerr, errno)))
return 0
def is_behind_switch_lacking_acs(self):
# If there is intermediate PCIe switch, which doesn't support ACS or
# doesn't enable ACS, between Root Complex and the function, we return
# True, meaning the function is not allowed to be assigned to guest due
# to potential security issue.
parent = self.find_parent()
while parent is not None:
dev_parent = PciDevice(parent)
if dev_parent.is_downstream_port and not dev_parent.acs_enabled:
return True
parent = dev_parent.find_parent()
return False
def pci_conf_read8(self, pos):
fd = os.open(self.cfg_space_path, os.O_RDONLY)
os.lseek(fd, pos, 0)
str = os.read(fd, 1)
os.close(fd)
val = struct.unpack('B', str)[0]
return val
def pci_conf_read16(self, pos):
fd = os.open(self.cfg_space_path, os.O_RDONLY)
os.lseek(fd, pos, 0)
str = os.read(fd, 2)
os.close(fd)
val = struct.unpack('H', str)[0]
return val
def pci_conf_read32(self, pos):
fd = os.open(self.cfg_space_path, os.O_RDONLY)
os.lseek(fd, pos, 0)
str = os.read(fd, 4)
os.close(fd)
val = struct.unpack('I', str)[0]
return val
def pci_conf_write8(self, pos, val):
str = struct.pack('B', val)
fd = os.open(self.cfg_space_path, os.O_WRONLY)
os.lseek(fd, pos, 0)
os.write(fd, str)
os.close(fd)
def pci_conf_write16(self, pos, val):
str = struct.pack('H', val)
fd = os.open(self.cfg_space_path, os.O_WRONLY)
os.lseek(fd, pos, 0)
os.write(fd, str)
os.close(fd)
def pci_conf_write32(self, pos, val):
str = struct.pack('I', val)
fd = os.open(self.cfg_space_path, os.O_WRONLY)
os.lseek(fd, pos, 0)
os.write(fd, str)
os.close(fd)
def detect_dev_info(self):
try:
class_dev = self.pci_conf_read16(PCI_CLASS_DEVICE)
except OSError, (err, strerr):
if err == errno.ENOENT:
strerr = "the device doesn't exist?"
raise PciDeviceParseError('%s: %s' %\
(self.name, strerr))
pos = self.find_cap_offset(PCI_CAP_ID_EXP)
if class_dev == PCI_CLASS_BRIDGE_PCI:
if pos == 0:
self.dev_type = DEV_TYPE_PCI_BRIDGE
else:
creg = self.pci_conf_read16(pos + PCI_EXP_FLAGS)
type = (creg & PCI_EXP_FLAGS_TYPE) >> 4
if type == PCI_EXP_TYPE_PCI_BRIDGE:
self.dev_type = DEV_TYPE_PCI_BRIDGE
else:
self.dev_type = DEV_TYPE_PCIe_BRIDGE
if type == PCI_EXP_TYPE_DOWNSTREAM:
self.is_downstream_port = True
pos = self.find_ext_cap(PCI_EXT_CAP_ID_ACS)
if pos != 0:
ctrl = self.pci_conf_read16(pos + PCI_EXT_ACS_CTRL)
if (ctrl & PCI_EXT_CAP_ACS_ENABLED) == \
(PCI_EXT_CAP_ACS_ENABLED):
self.acs_enabled = True
else:
if pos != 0:
self.dev_type = DEV_TYPE_PCIe_ENDPOINT
else:
self.dev_type = DEV_TYPE_PCI
# Force 0000:00:00.0 to be DEV_TYPE_PCIe_BRIDGE
if self.name == '0000:00:00.0':
self.dev_type = DEV_TYPE_PCIe_BRIDGE
if (self.dev_type == DEV_TYPE_PCI_BRIDGE) or \
(self.dev_type == DEV_TYPE_PCIe_BRIDGE):
return
# Try to findthe PCIe FLR capability
if self.dev_type == DEV_TYPE_PCIe_ENDPOINT:
dev_cap = self.pci_conf_read32(pos + PCI_EXP_DEVCAP)
if dev_cap & PCI_EXP_DEVCAP_FLR:
self.pcie_flr = True
else:
# Quirk for the VF of Intel 82599 10GbE Controller.
# We know it does have PCIe FLR capability even if it doesn't
# report that (dev_cap.PCI_EXP_DEVCAP_FLR is 0).
# See the 82599 datasheet.
dev_path = find_sysfs_mnt()+SYSFS_PCI_DEVS_PATH+'/'+self.name
vendor_id = parse_hex(os.popen('cat %s/vendor' % dev_path).read())
device_id = parse_hex(os.popen('cat %s/device' % dev_path).read())
if (vendor_id == VENDOR_INTEL) and \
(device_id == DEVICE_ID_82599):
self.pcie_flr = True
elif self.dev_type == DEV_TYPE_PCI:
# Try to find the "PCI Advanced Capabilities"
pos = self.find_cap_offset(PCI_CAP_ID_AF)
if pos != 0:
af_cap = self.pci_conf_read8(pos + PCI_AF_CAPs)
if (af_cap & PCI_AF_CAPs_TP_FLR) == PCI_AF_CAPs_TP_FLR:
self.pci_af_flr = True
bar_addr = PCI_BAR_0
while bar_addr <= PCI_BAR_5:
bar = self.pci_conf_read32(bar_addr)
if (bar & PCI_BAR_SPACE) == PCI_BAR_MEM:
bar = bar & PCI_BAR_MEM_MASK
bar = bar & ~PAGE_MASK
if bar != 0:
self.has_non_page_aligned_bar = True
break
bar_addr = bar_addr + 4
def devs_check_driver(self, devs):
if len(devs) == 0:
return
for pci_dev in devs:
dev = PciDevice(parse_pci_name(pci_dev))
if dev.driver == 'pciback' or dev.driver == 'pci-stub':
continue
err_msg = 'pci: %s must be co-assigned to the same guest with %s' + \
', but it is not owned by pciback or pci-stub.'
raise PciDeviceAssignmentError(err_msg % (pci_dev, self.name))
def do_FLR(self, is_hvm, strict_check):
""" Perform FLR (Functional Level Reset) for the device.
"""
if self.dev_type == DEV_TYPE_PCIe_ENDPOINT:
# If PCIe device supports FLR, we use it.
if self.pcie_flr:
(pci_list, cfg_list) = save_pci_conf_space([self.name])
pos = self.find_cap_offset(PCI_CAP_ID_EXP)
self.pci_conf_write32(pos + PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_FLR)
# We must sleep at least 100ms for the completion of FLR
time.sleep(0.100)
restore_pci_conf_space((pci_list, cfg_list))
else:
if self.bus == 0:
self.do_FLR_for_integrated_device()
else:
funcs = self.find_all_the_multi_functions()
if not is_hvm and (len(funcs) > 1):
return
if is_hvm and not strict_check:
return
self.devs_check_driver(funcs)
parent = pci_dict_to_bdf_str(self.find_parent())
# Do Secondary Bus Reset.
self.do_secondary_bus_reset(parent, funcs)
# PCI devices
else:
# For PCI device on host bus, we test "PCI Advanced Capabilities".
if self.bus == 0 and self.pci_af_flr:
af_pos = self.find_cap_offset(PCI_CAP_ID_AF)
self.do_AF_FLR(af_pos)
else:
if self.bus == 0:
if self.slot == 0x02 and self.func == 0x0:
vendor_id = self.pci_conf_read16(PCI_VENDOR_ID)
if vendor_id != VENDOR_INTEL:
return
class_id = self.pci_conf_read16(PCI_CLASS_DEVICE)
if class_id != PCI_CLASS_ID_VGA:
return
device_id = self.pci_conf_read16(PCI_DEVICE_ID)
if device_id == PCI_DEVICE_ID_IGFX_GM45:
self.do_FLR_for_GM45_iGFX()
elif device_id == PCI_DEVICE_ID_IGFX_EAGLELAKE or \
device_id == PCI_DEVICE_ID_IGFX_Q45 or \
device_id == PCI_DEVICE_ID_IGFX_G45 or \
device_id == PCI_DEVICE_ID_IGFX_G41:
self.do_FLR_for_intel_4Series_iGFX()
else:
log.debug("Unknown iGFX device_id:%x", device_id)
else:
self.do_FLR_for_integrated_device()
else:
devs = self.find_coassigned_pci_devices(False)
# Remove the element 0 which is a bridge
target_bus = devs[0]
del devs[0]
if not is_hvm and (len(devs) > 1):
return
if is_hvm and not strict_check:
return
self.devs_check_driver(devs)
# Do Secondary Bus Reset.
self.do_secondary_bus_reset(target_bus, devs)
def find_capability(self, type):
sysfs_mnt = find_sysfs_mnt()
if sysfs_mnt == None:
return False
path = sysfs_mnt+SYSFS_PCI_DEVS_PATH+'/'+ \
self.name+SYSFS_PCI_DEV_CONFIG_PATH
try:
conf_file = open(path, 'rb')
conf_file.seek(PCI_HEADER_TYPE)
header_type = ord(conf_file.read(1)) & PCI_HEADER_TYPE_MASK
if header_type == PCI_HEADER_TYPE_CARDBUS:
return
conf_file.seek(PCI_STATUS_OFFSET)
status = ord(conf_file.read(1))
if status&PCI_STATUS_CAP_MASK:
conf_file.seek(PCI_CAP_OFFSET)
capa_pointer = ord(conf_file.read(1))
capa_count = 0
while capa_pointer:
if capa_pointer < 0x40:
raise PciDeviceParseError(
('Broken capability chain: %s' % self.name))
capa_count += 1
if capa_count > 96:
raise PciDeviceParseError(
('Looped capability chain: %s' % self.name))
conf_file.seek(capa_pointer)
capa_id = ord(conf_file.read(1))
capa_pointer = ord(conf_file.read(1))
if capa_id == type:
# get the type
message_cont_lo = ord(conf_file.read(1))
message_cont_hi = ord(conf_file.read(1))
self.msix=1
self.msix_entries = (message_cont_lo + \
(message_cont_hi << 8)) \
& MSIX_SIZE_MASK
t_off=conf_file.read(4)
p_off=conf_file.read(4)
self.table_offset=ord(t_off[0]) | (ord(t_off[1])<<8) | \
(ord(t_off[2])<<16)| \
(ord(t_off[3])<<24)
self.pba_offset=ord(p_off[0]) | (ord(p_off[1]) << 8)| \
(ord(p_off[2])<<16) | \
(ord(p_off[3])<<24)
self.table_index = self.table_offset & MSIX_BIR_MASK
self.table_offset = self.table_offset & ~MSIX_BIR_MASK
self.pba_index = self.pba_offset & MSIX_BIR_MASK
self.pba_offset = self.pba_offset & ~MSIX_BIR_MASK
break
except IOError, (errno, strerr):
raise PciDeviceParseError(('Failed to locate sysfs mount: %s: %s (%d)' %
(PROC_PCI_PATH, strerr, errno)))
except TypeError, err:
log.debug("Caught TypeError '%s'" % err)
pass
def get_info_from_sysfs(self):
self.find_capability(0x11)
sysfs_mnt = find_sysfs_mnt()
if sysfs_mnt == None:
return False
path = sysfs_mnt+SYSFS_PCI_DEVS_PATH+'/'+ \
self.name+SYSFS_PCI_DEV_RESOURCE_PATH
try:
resource_file = open(path,'r')
for i in range(PROC_PCI_NUM_RESOURCES):
line = resource_file.readline()
sline = line.split()
if len(sline)<3:
continue
start = int(sline[0],16)
end = int(sline[1],16)
flags = int(sline[2],16)
size = end-start+1
if start!=0:
if flags&PCI_BAR_IO:
self.ioports.append( (start,size) )
else:
self.iomem.append( (start,size) )
except IOError, (errno, strerr):
raise PciDeviceParseError(('Failed to open & read %s: %s (%d)' %
(path, strerr, errno)))
path = sysfs_mnt+SYSFS_PCI_DEVS_PATH+'/'+ \
self.name+SYSFS_PCI_DEV_IRQ_PATH
try:
self.irq = int(open(path,'r').readline())
except IOError, (errno, strerr):
raise PciDeviceParseError(('Failed to open & read %s: %s (%d)' %
(path, strerr, errno)))
path = sysfs_mnt+SYSFS_PCI_DEVS_PATH+'/'+ \
self.name+SYSFS_PCI_DEV_DRIVER_DIR_PATH
try:
self.driver = os.path.basename(os.readlink(path))
except OSError, (errno, strerr):
self.driver = ""
path = sysfs_mnt+SYSFS_PCI_DEVS_PATH+'/'+ \
self.name+SYSFS_PCI_DEV_VENDOR_PATH
try:
self.vendor = int(open(path,'r').readline(), 16)
except IOError, (errno, strerr):
raise PciDeviceParseError(('Failed to open & read %s: %s (%d)' %
(path, strerr, errno)))
path = sysfs_mnt+SYSFS_PCI_DEVS_PATH+'/'+ \
self.name+SYSFS_PCI_DEV_DEVICE_PATH
try:
self.device = int(open(path,'r').readline(), 16)
except IOError, (errno, strerr):
raise PciDeviceParseError(('Failed to open & read %s: %s (%d)' %
(path, strerr, errno)))
path = sysfs_mnt+SYSFS_PCI_DEVS_PATH+'/'+ \
self.name+SYSFS_PCI_DEV_SUBVENDOR_PATH
try:
self.subvendor = int(open(path,'r').readline(), 16)
except IOError, (errno, strerr):
raise PciDeviceParseError(('Failed to open & read %s: %s (%d)' %
(path, strerr, errno)))
path = sysfs_mnt+SYSFS_PCI_DEVS_PATH+'/'+ \
self.name+SYSFS_PCI_DEV_SUBDEVICE_PATH
try:
self.subdevice = int(open(path,'r').readline(), 16)
except IOError, (errno, strerr):
raise PciDeviceParseError(('Failed to open & read %s: %s (%d)' %
(path, strerr, errno)))
path = sysfs_mnt+SYSFS_PCI_DEVS_PATH+'/'+ \
self.name+SYSFS_PCI_DEV_CLASS_PATH
try:
self.classcode = int(open(path,'r').readline(), 16)
except IOError, (errno, strerr):
raise PciDeviceParseError(('Failed to open & read %s: %s (%d)' %
(path, strerr, errno)))
return True
def get_info_from_lspci(self):
""" Get information such as vendor name, device name, class name, etc.
Since we cannot obtain these data from sysfs, use 'lspci' command.
"""
global lspci_info
global lspci_info_lock
lspci_info_lock.acquire()
try:
if lspci_info is None:
_create_lspci_info()
device_info = lspci_info.get(self.name)
if device_info:
try:
self.revision = int(device_info.get('Rev', '0'), 16)
except ValueError:
pass
self.vendorname = device_info.get('Vendor', '')
self.devicename = device_info.get('Device', '')
self.classname = device_info.get('Class', '')
self.subvendorname = device_info.get('SVendor', '')
self.subdevicename = device_info.get('SDevice', '')
return True
finally:
lspci_info_lock.release()
def __str__(self):
str = "PCI Device %s\n" % (self.name)
for (start,size) in self.ioports:
str = str + "IO Port 0x%02x [size=%d]\n"%(start,size)
for (start,size) in self.iomem:
str = str + "IO Mem 0x%02x [size=%d]\n"%(start,size)
str = str + "IRQ %d\n"%(self.irq)
str = str + "Vendor ID 0x%04x\n"%(self.vendor)
str = str + "Device ID 0x%04x\n"%(self.device)
str = str + "Sybsystem Vendor ID 0x%04x\n"%(self.subvendor)
str = str + "Subsystem Device ID 0x%04x"%(self.subdevice)
return str
def main():
if len(sys.argv)<5:
print "Usage: %s <domain> <bus> <slot> <func>\n" % sys.argv[0]
sys.exit(2)
dev = PciDevice(int(sys.argv[1],16), int(sys.argv[2],16),
int(sys.argv[3],16), int(sys.argv[4],16))
print str(dev)
if __name__=='__main__':
main()
| gpl-2.0 |
pnecchi/Thesis | Code/Prototype/critic.py | 1 | 2591 | ################################################################################
# Description: Module containing various critic implementations
# Author: Pierpaolo Necchi
# Email: [email protected]
# Date: dom 05 giu 2016 18:24:01 CEST
################################################################################
import numpy as np
class Critic(object):
""" Critic class which specifies the generic interface of a critic. """
def __init__(self, dimIn):
""" Initialize critic.
Args:
dimIn (int): state size
"""
# Initialize input size, i.e. size of the state
self.dimIn = dimIn
def __call__(self, state):
""" Evaluate a given state.
Args:
state (np.array): state to be evaluated
Returns:
value (float): state value
"""
pass
class LinearCritic(Critic):
""" Critic that uses a linear function approximation """
def __init__(self, dimIn, features):
""" Initialize LinearCritic.
Args:
dimIn (int): state size
features (object): features Phi(s)
"""
# Initialize Critic base class
Critic.__init__(self, dimIn)
# Initialize features
self._features = features
self._dimPar = features.size()
# Initialize critic parameters
self._parameters = 0.05 * np.random.randn()
def __call__(self, state):
""" Evaluate a given state.
Args:
state (np.array): state to be evaluated
Returns:
value (float): state value
"""
# Cache state
self._lastState = state
# Evaluate features and cache result
self._featuresEval = self._features(state)
# Evaluate state
return np.dot(self._featuresEval, self._parameters.T)
def gradient(self, state):
""" Compute critic gradient.
Args:
state (np.array): state
Returns:
gradient (np.array): critic gradient
"""
if state != self._lastState:
self._featuresEval = self.features(state)
return self._featuresEval
def getParameters(self):
""" Return critic parameters.
Returns:
parameters (np.array): actor parameters
"""
return self._parameters
def setParameters(self, parameters):
""" Set critic parameters.
Args:
parameters (np.array): new actor parameters
"""
self._parameters = parameters
| mit |
ghisvail/vispy | vispy/visuals/transforms/tests/test_transforms.py | 17 | 6541 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
import numpy as np
import vispy.visuals.transforms as tr
from vispy.geometry import Rect
from vispy.testing import run_tests_if_main
NT = tr.NullTransform
ST = tr.STTransform
AT = tr.MatrixTransform
RT = tr.MatrixTransform
PT = tr.PolarTransform
LT = tr.LogTransform
CT = tr.ChainTransform
def assert_chain_types(chain, types):
assert list(map(type, chain.transforms)) == types
def assert_chain_objects(chain1, chain2):
assert chain1.transforms == chain2.transforms
def tesst_multiplication():
n = NT()
s = ST()
a = AT()
p = PT()
l = LT()
c1 = CT([s, a, p])
assert c1
c2 = CT([s, a, s])
assert isinstance(n * n, NT)
assert isinstance(n * s, ST)
assert isinstance(s * s, ST)
assert isinstance(a * s, AT)
assert isinstance(a * a, AT)
assert isinstance(s * a, AT)
assert isinstance(n * p, PT)
assert isinstance(s * p, CT)
assert_chain_types(s * p, [PT, ST])
assert_chain_types(s * p * a, [ST, PT, AT])
assert_chain_types(s * a * p, [PT, AT])
assert_chain_types(s * p * s, [ST, PT, ST])
assert_chain_types(s * a * p * s * a, [AT, PT, AT])
assert_chain_types(c2 * a, [AT])
assert_chain_types(p * l * s, [ST, LT, PT])
def test_transform_chain():
# Make dummy classes for easier distinguishing the transforms
class DummyTrans(tr.BaseTransform):
glsl_map = "vec4 trans(vec4 pos) {return pos;}"
glsl_imap = "vec4 trans(vec4 pos) {return pos;}"
class TransA(DummyTrans):
pass
class TransB(DummyTrans):
pass
class TransC(DummyTrans):
pass
# Create test transforms
a, b, c = TransA(), TransB(), TransC()
# Test Chain creation
assert tr.ChainTransform().transforms == []
assert tr.ChainTransform(a).transforms == [a]
assert tr.ChainTransform(a, b).transforms == [a, b]
assert tr.ChainTransform(a, b, c, a).transforms == [a, b, c, a]
# Test composition by multiplication
assert_chain_objects(a * b, tr.ChainTransform(a, b))
assert_chain_objects(a * b * c, tr.ChainTransform(a, b, c))
assert_chain_objects(a * b * c * a, tr.ChainTransform(a, b, c, a))
# Test adding/prepending to transform
chain = tr.ChainTransform()
chain.append(a)
assert chain.transforms == [a]
chain.append(b)
assert chain.transforms == [a, b]
chain.append(c)
assert chain.transforms == [a, b, c]
chain.prepend(b)
assert chain.transforms == [b, a, b, c]
chain.prepend(c)
assert chain.transforms == [c, b, a, b, c]
# Test simplifying
t1 = tr.STTransform(scale=(2, 3))
t2 = tr.STTransform(translate=(3, 4))
t3 = tr.STTransform(translate=(3, 4))
# Create multiplied versions
t123 = t1*t2*t3
t321 = t3*t2*t1
c123 = tr.ChainTransform(t1, t2, t3)
c321 = tr.ChainTransform(t3, t2, t1)
c123s = c123.simplified
c321s = c321.simplified
#
assert isinstance(t123, tr.STTransform) # or the test is useless
assert isinstance(t321, tr.STTransform) # or the test is useless
assert isinstance(c123s, tr.ChainTransform) # or the test is useless
assert isinstance(c321s, tr.ChainTransform) # or the test is useless
# Test Mapping
t1 = tr.STTransform(scale=(2, 3))
t2 = tr.STTransform(translate=(3, 4))
chain1 = tr.ChainTransform(t1, t2)
chain2 = tr.ChainTransform(t2, t1)
#
assert chain1.transforms == [t1, t2] # or the test is useless
assert chain2.transforms == [t2, t1] # or the test is useless
#
m12 = (t1*t2).map((1, 1)).tolist()
m21 = (t2*t1).map((1, 1)).tolist()
m12_ = chain1.map((1, 1)).tolist()
m21_ = chain2.map((1, 1)).tolist()
#
#print(m12, m21, m12_, m21_)
assert m12 != m21
assert m12 == m12_
assert m21 == m21_
# Test shader map
t1 = tr.STTransform(scale=(2, 3))
t2 = tr.STTransform(translate=(3, 4))
chain = tr.ChainTransform(t1, t2)
#
funcs = chain.shader_map().dependencies()
funcsi = chain.shader_imap().dependencies()
#
assert t1.shader_map() in funcs
assert t2.shader_map() in funcs
assert t1.shader_imap() in funcsi
assert t2.shader_imap() in funcsi
def test_map_rect():
r = Rect((2, 7), (13, 19))
r1 = ST(scale=(2, 2), translate=(-10, 10)).map(r)
assert r1 == Rect((-6, 24), (26, 38))
def test_st_transform():
# Check that STTransform maps exactly like MatrixTransform
pts = np.random.normal(size=(10, 4))
scale = (1, 7.5, -4e-8)
translate = (1e6, 0.2, 0)
st = tr.STTransform(scale=scale, translate=translate)
at = tr.MatrixTransform()
at.scale(scale)
at.translate(translate)
assert np.allclose(st.map(pts), at.map(pts))
assert np.allclose(st.inverse.map(pts), at.inverse.map(pts))
def test_st_mapping():
p1 = [[5., 7.], [23., 8.]]
p2 = [[-1.3, -1.4], [1.1, 1.2]]
t = tr.STTransform()
t.set_mapping(p1, p2)
assert np.allclose(t.map(p1)[:, :len(p2)], p2)
def test_affine_mapping():
t = tr.MatrixTransform()
p1 = np.array([[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
# test pure translation
p2 = p1 + 5.5
t.set_mapping(p1, p2)
assert np.allclose(t.map(p1)[:, :p2.shape[1]], p2)
# test pure scaling
p2 = p1 * 5.5
t.set_mapping(p1, p2)
assert np.allclose(t.map(p1)[:, :p2.shape[1]], p2)
# test scale + translate
p2 = (p1 * 5.5) + 3.5
t.set_mapping(p1, p2)
assert np.allclose(t.map(p1)[:, :p2.shape[1]], p2)
# test SRT
p2 = np.array([[10, 5, 3],
[10, 15, 3],
[30, 5, 3],
[10, 5, 3.5]])
t.set_mapping(p1, p2)
assert np.allclose(t.map(p1)[:, :p2.shape[1]], p2)
def test_inverse():
m = np.random.normal(size=(4, 4))
transforms = [
NT(),
ST(scale=(1e-4, 2e5), translate=(10, -6e9)),
AT(m),
RT(m),
]
np.random.seed(0)
N = 20
x = np.random.normal(size=(N, 3))
pw = np.random.normal(size=(N, 3), scale=3)
pos = x * 10 ** pw
for trn in transforms:
assert np.allclose(pos, trn.inverse.map(trn.map(pos))[:, :3])
# log transform only works on positive values
#abs_pos = np.abs(pos)
#tr = LT(base=(2, 4.5, 0))
#assert np.allclose(abs_pos, tr.inverse.map(tr.map(abs_pos))[:,:3])
run_tests_if_main()
| bsd-3-clause |
insomnia-lab/calibre | src/calibre/library/server/content.py | 3 | 10759 | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
import re, os, posixpath
import cherrypy
from calibre import fit_image, guess_type
from calibre.utils.date import fromtimestamp, as_utc
from calibre.library.caches import SortKeyGenerator
from calibre.library.save_to_disk import find_plugboard
from calibre.ebooks.metadata import authors_to_string
from calibre.utils.magick.draw import (save_cover_data_to, Image,
thumbnail as generate_thumbnail)
from calibre.utils.filenames import ascii_filename
from calibre.ebooks.metadata.opf2 import metadata_to_opf
from calibre.utils.config import tweaks
plugboard_content_server_value = 'content_server'
plugboard_content_server_formats = ['epub', 'mobi', 'azw3']
class CSSortKeyGenerator(SortKeyGenerator):
def __init__(self, fields, fm, db_prefs):
SortKeyGenerator.__init__(self, fields, fm, None, db_prefs)
def __call__(self, record):
return self.itervals(record).next()
class ContentServer(object):
'''
Handles actually serving content files/covers/metadata. Also has
a few utility methods.
'''
def add_routes(self, connect):
connect('root', '/', self.index)
connect('old', '/old', self.old)
connect('get', '/get/{what}/{id}', self.get,
conditions=dict(method=["GET", "HEAD"]),
android_workaround=True)
connect('static', '/static/{name:.*?}', self.static,
conditions=dict(method=["GET", "HEAD"]))
connect('favicon', '/favicon.png', self.favicon,
conditions=dict(method=["GET", "HEAD"]))
# Utility methods {{{
def last_modified(self, updated):
'''
Generates a locale independent, english timestamp from a datetime
object
'''
updated = as_utc(updated)
lm = updated.strftime('day, %d month %Y %H:%M:%S GMT')
day ={0:'Sun', 1:'Mon', 2:'Tue', 3:'Wed', 4:'Thu', 5:'Fri', 6:'Sat'}
lm = lm.replace('day', day[int(updated.strftime('%w'))])
month = {1:'Jan', 2:'Feb', 3:'Mar', 4:'Apr', 5:'May', 6:'Jun', 7:'Jul',
8:'Aug', 9:'Sep', 10:'Oct', 11:'Nov', 12:'Dec'}
return lm.replace('month', month[updated.month])
def sort(self, items, field, order):
field = self.db.data.sanitize_sort_field_name(field)
if field not in self.db.field_metadata.sortable_field_keys():
raise cherrypy.HTTPError(400, '%s is not a valid sort field'%field)
keyg = CSSortKeyGenerator([(field, order)], self.db.field_metadata,
self.db.prefs)
items.sort(key=keyg, reverse=not order)
# }}}
def get(self, what, id):
'Serves files, covers, thumbnails, metadata from the calibre database'
try:
id = int(id)
except ValueError:
id = id.rpartition('.')[0].rpartition('_')[-1]
match = re.search(r'\d+', id)
if not match:
raise cherrypy.HTTPError(404, 'id:%s not an integer'%id)
id = int(match.group())
if not self.db.has_id(id):
raise cherrypy.HTTPError(404, 'id:%d does not exist in database'%id)
if what == 'thumb' or what.startswith('thumb_'):
try:
width, height = map(int, what.split('_')[1:])
except:
width, height = 60, 80
return self.get_cover(id, thumbnail=True, thumb_width=width,
thumb_height=height)
if what == 'cover':
return self.get_cover(id)
if what == 'opf':
return self.get_metadata_as_opf(id)
if what == 'json':
raise cherrypy.InternalRedirect('/ajax/book/%d'%id)
return self.get_format(id, what)
def static(self, name):
'Serves static content'
name = name.lower()
fname = posixpath.basename(name)
try:
cherrypy.response.headers['Content-Type'] = {
'js' : 'text/javascript',
'css' : 'text/css',
'png' : 'image/png',
'gif' : 'image/gif',
'html' : 'text/html',
}[fname.rpartition('.')[-1].lower()]
except KeyError:
raise cherrypy.HTTPError(404, '%r not a valid resource type'%name)
cherrypy.response.headers['Last-Modified'] = self.last_modified(self.build_time)
basedir = os.path.abspath(P('content_server'))
path = os.path.join(basedir, name.replace('/', os.sep))
path = os.path.abspath(path)
if not path.startswith(basedir):
raise cherrypy.HTTPError(403, 'Access to %s is forbidden'%name)
if not os.path.exists(path) or not os.path.isfile(path):
raise cherrypy.HTTPError(404, '%s not found'%name)
if self.opts.develop:
lm = fromtimestamp(os.stat(path).st_mtime)
cherrypy.response.headers['Last-Modified'] = self.last_modified(lm)
with open(path, 'rb') as f:
ans = f.read()
if path.endswith('.css'):
ans = ans.replace('/static/', self.opts.url_prefix + '/static/')
return ans
def favicon(self):
data = I('lt.png', data=True)
cherrypy.response.headers['Content-Type'] = 'image/png'
cherrypy.response.headers['Last-Modified'] = self.last_modified(
self.build_time)
return data
def index(self, **kwargs):
'The / URL'
ua = cherrypy.request.headers.get('User-Agent', '').strip()
want_opds = \
cherrypy.request.headers.get('Stanza-Device-Name', 919) != 919 or \
cherrypy.request.headers.get('Want-OPDS-Catalog', 919) != 919 or \
ua.startswith('Stanza')
want_mobile = self.is_mobile_browser(ua)
if self.opts.develop and not want_mobile:
cherrypy.log('User agent: '+ua)
if want_opds:
return self.opds(version=0)
if want_mobile:
return self.mobile()
return self.browse_catalog()
def old(self, **kwargs):
return self.static('index.html').replace('{prefix}',
self.opts.url_prefix)
# Actually get content from the database {{{
def get_cover(self, id, thumbnail=False, thumb_width=60, thumb_height=80):
try:
cherrypy.response.headers['Content-Type'] = 'image/jpeg'
cherrypy.response.timeout = 3600
cover = self.db.cover(id, index_is_id=True)
if cover is None:
cover = self.default_cover
updated = self.build_time
else:
updated = self.db.cover_last_modified(id, index_is_id=True)
cherrypy.response.headers['Last-Modified'] = self.last_modified(updated)
if thumbnail:
quality = tweaks['content_server_thumbnail_compression_quality']
if quality < 50:
quality = 50
elif quality > 99:
quality = 99
return generate_thumbnail(cover, width=thumb_width,
height=thumb_height, compression_quality=quality)[-1]
img = Image()
img.load(cover)
width, height = img.size
scaled, width, height = fit_image(width, height,
thumb_width if thumbnail else self.max_cover_width,
thumb_height if thumbnail else self.max_cover_height)
if not scaled:
return cover
return save_cover_data_to(img, 'img.jpg', return_data=True,
resize_to=(width, height))
except Exception as err:
import traceback
cherrypy.log.error('Failed to generate cover:')
cherrypy.log.error(traceback.print_exc())
raise cherrypy.HTTPError(404, 'Failed to generate cover: %r'%err)
def get_metadata_as_opf(self, id_):
cherrypy.response.headers['Content-Type'] = \
'application/oebps-package+xml; charset=UTF-8'
mi = self.db.get_metadata(id_, index_is_id=True)
data = metadata_to_opf(mi)
cherrypy.response.timeout = 3600
cherrypy.response.headers['Last-Modified'] = \
self.last_modified(mi.last_modified)
return data
def get_format(self, id, format):
format = format.upper()
fm = self.db.format_metadata(id, format, allow_cache=False)
if not fm:
raise cherrypy.HTTPError(404, 'book: %d does not have format: %s'%(id, format))
update_metadata = format in {'MOBI', 'EPUB', 'AZW3'}
mi = newmi = self.db.get_metadata(
id, index_is_id=True, cover_as_data=True, get_cover=update_metadata)
cherrypy.response.headers['Last-Modified'] = \
self.last_modified(max(fm['mtime'], mi.last_modified))
fmt = self.db.format(id, format, index_is_id=True, as_file=True,
mode='rb')
if fmt is None:
raise cherrypy.HTTPError(404, 'book: %d does not have format: %s'%(id, format))
mt = guess_type('dummy.'+format.lower())[0]
if mt is None:
mt = 'application/octet-stream'
cherrypy.response.headers['Content-Type'] = mt
if format.lower() in plugboard_content_server_formats:
# Get any plugboards for the content server
plugboards = self.db.prefs.get('plugboards', {})
cpb = find_plugboard(plugboard_content_server_value,
format.lower(), plugboards)
if cpb:
# Transform the metadata via the plugboard
newmi = mi.deepcopy_metadata()
newmi.template_to_attribute(mi, cpb)
if update_metadata:
# Write the updated file
from calibre.ebooks.metadata.meta import set_metadata
set_metadata(fmt, newmi, format.lower())
fmt.seek(0)
fmt.seek(0, 2)
cherrypy.response.headers['Content-Length'] = fmt.tell()
fmt.seek(0)
au = authors_to_string(newmi.authors if newmi.authors else
[_('Unknown')])
title = newmi.title if newmi.title else _('Unknown')
fname = u'%s - %s_%s.%s'%(title[:30], au[:30], id, format.lower())
fname = ascii_filename(fname).replace('"', '_')
cherrypy.response.headers['Content-Disposition'] = \
b'attachment; filename="%s"'%fname
cherrypy.response.body = fmt
cherrypy.response.timeout = 3600
return fmt
# }}}
| gpl-3.0 |
TomBaxter/osf.io | osf/migrations/0024_migrate_subject_parents_to_parent.py | 28 | 3542 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from django.db import connection, migrations, models
from osf.models.validators import validate_subject_hierarchy_length
logger = logging.getLogger(__name__)
def add_custom_mapping_constraint(state, schema):
sql = """
ALTER TABLE osf_subject
ADD CONSTRAINT customs_must_be_mapped
CHECK (bepress_subject_id IS NOT NULL OR provider_id = %s);
"""
try:
osf_id = state.get_model('osf', 'preprintprovider').objects.get(_id='osf').id
except models.ObjectDoesNotExist:
# Allow test / local dev DBs to pass
logger.warn('Unable to create contraint - assuming test environment.')
pass
else:
with connection.cursor() as cursor:
cursor.execute(sql, [osf_id])
def remove_custom_mapping_constraint(*args):
sql = """
ALTER TABLE osf_subject
DROP CONSTRAINT IF EXISTS customs_must_be_mapped RESTRICT;
"""
with connection.cursor() as cursor:
cursor.execute(sql)
class Migration(migrations.Migration):
dependencies = [
('osf', '0023_merge_20170503_1947'),
]
operations = [
migrations.AddField(
model_name='subject',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=models.deletion.SET_NULL, related_name='children', to='osf.Subject', validators=[validate_subject_hierarchy_length]),
),
migrations.AddField(
model_name='subject',
name='provider',
field=models.ForeignKey(blank=True, null=True, on_delete=models.deletion.SET_NULL, to='osf.PreprintProvider', related_name='subjects')
),
migrations.AddField(
model_name='subject',
name='bepress_subject',
field=models.ForeignKey(blank=True, null=True, on_delete=models.deletion.CASCADE, to='osf.Subject', related_name='aliases')
),
migrations.RunSQL(
["""
UPDATE osf_subject
SET provider_id = (SELECT id FROM osf_preprintprovider WHERE _id = 'osf');
"""], ["""
UPDATE osf_subject
SET provider_id = NULL;
"""]
),
migrations.RunSQL(
["""
UPDATE osf_subject
SET parent_id=subquery.to_subject_id
FROM (SELECT from_subject_id, to_subject_id
FROM osf_subject_parents) AS subquery
WHERE osf_subject.id=subquery.from_subject_id;
"""], ["""
INSERT INTO osf_subject_parents (from_subject_id, to_subject_id)
SELECT id, parent_id FROM osf_subject
WHERE parent_id IS NOT NULL;
"""]
),
migrations.RunPython(
add_custom_mapping_constraint, remove_custom_mapping_constraint
),
migrations.RemoveField(
model_name='subject',
name='parents'
),
migrations.AlterField(
model_name='subject',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=models.deletion.SET_NULL, related_name='children', to='osf.Subject', validators=[validate_subject_hierarchy_length]),
),
migrations.AlterField(
model_name='subject',
name='provider',
field=models.ForeignKey(blank=False, null=False, on_delete=models.deletion.CASCADE, to='osf.PreprintProvider', related_name='subjects')
),
]
| apache-2.0 |
iuliat/nova | nova/tests/unit/scheduler/filters/test_retry_filters.py | 68 | 1929 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.scheduler.filters import retry_filter
from nova import test
from nova.tests.unit.scheduler import fakes
class TestRetryFilter(test.NoDBTestCase):
def setUp(self):
super(TestRetryFilter, self).setUp()
self.filt_cls = retry_filter.RetryFilter()
def test_retry_filter_disabled(self):
# Test case where retry/re-scheduling is disabled.
host = fakes.FakeHostState('host1', 'node1', {})
filter_properties = {}
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def test_retry_filter_pass(self):
# Node not previously tried.
host = fakes.FakeHostState('host1', 'nodeX', {})
retry = dict(num_attempts=2,
hosts=[['host1', 'node1'], # same host, different node
['host2', 'node2'], # different host and node
])
filter_properties = dict(retry=retry)
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def test_retry_filter_fail(self):
# Node was already tried.
host = fakes.FakeHostState('host1', 'node1', {})
retry = dict(num_attempts=1,
hosts=[['host1', 'node1']])
filter_properties = dict(retry=retry)
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
| apache-2.0 |
andjimrio/LTN | Application/service/item_services.py | 1 | 5173 | from collections import Counter
from django.utils import timezone
from django.shortcuts import get_object_or_404
from Application.models import Item, UserProfile, Section, Status
from Application.utilities.python_utilities import floor_log
from Application.service.profile_services import get_profile, get_keywords_by_user
from Application.service.section_services import get_sections_by_user
def create_item(**dict_item):
return Item.objects.get_or_create(**dict_item)
def get_item(item_id):
return get_object_or_404(Item, pk=item_id)
def exists_item_by_link(link):
return Item.objects.filter(link=link).exists()
def get_status_by_user_item(user_id, item_id):
return Status.objects.get_or_create(user_id=get_profile(user_id).id, item_id=item_id)
def get_last_items_by_user(user_id, unview=False):
if unview:
return UserProfile.objects.get(user__id=user_id).sections.all()\
.values('feeds__id', 'feeds__title', 'feeds__items__id', 'feeds__items__title',
'feeds__items__description', 'feeds__items__pubDate', 'feeds__items__image')\
.order_by('-feeds__items__pubDate')
else:
return UserProfile.objects.get(user__id=user_id).statuses.all().\
filter(view=False).\
values('item__feed_id', 'item__feed__title', 'item_id', 'item__title',
'item__description', 'item__pubDate', 'item__image').\
order_by('-item__pubDate')
def get_item_today_by_section(section_id, days=0, hours=0):
end_date = timezone.now()
start_date = end_date - timezone.timedelta(days=days, hours=hours)
return Section.objects.filter(id=section_id).filter(feeds__items__pubDate__range=[start_date, end_date])\
.values('feeds__items__id', 'feeds__items__title')
def get_item_similarity(item_id, limit, user_id):
more_results = Item.objects.get_more_like_this('article', item_id, limit). \
filter(statuses__user__user_id=user_id)\
.order_by('-pubDate')
return more_results
def get_item_query(query, profile_id):
results = Item.objects.filter(keywords__term__contains=query) \
.filter(feed__sections__user_id=profile_id)\
.order_by('-pubDate')
return results
def query_multifield_dict(dict_query, profile_id):
results = Item.objects.query_multifield_dict(dict_query) \
.filter(feed__sections__user_id=profile_id)\
.order_by('-pubDate')
return results
def stats_items(queryset):
stats = [x.pubDate.strftime("%m/%Y") for x in queryset]
return dict(Counter(stats))
def get_item_recommend(profile_id):
results = Item.objects.filter(feed__sections__user_id=profile_id)\
.exclude(statuses__view=True)\
.filter(keywords__in=get_keywords_by_user(profile_id))\
.order_by('-pubDate')
return results
def get_item_saved(user_id):
return Item.objects.filter(statuses__user__user_id=user_id)\
.filter(statuses__saves=True)\
.order_by('-pubDate')
def get_summary(user_id):
summary_keywords = dict()
for section in get_sections_by_user(user_id):
section_summary_keywords = SectionSummaryKeywords(section.title)
for item in get_item_today_by_section(section.id, days=1):
keywords = get_item(item['feeds__items__id']).keywords.all()
if len(keywords) > 0:
section_summary_keywords.add_keyword(keywords, item['feeds__items__id'], item['feeds__items__title'])
summary_keywords[section.title] = section_summary_keywords.most_common()
return summary_keywords
class SectionSummaryKeywords:
def __init__(self, section_title):
self.section = section_title
self.keywords_counters = dict()
self.counts_counters = Counter()
def add_keyword(self, keywords, item_id, item_title):
exists = False
keyword = keywords[0]
for key in keywords:
if key in self.keywords_counters:
exists = True
keyword = key
break
if exists:
self.keywords_counters[keyword].update(item_id, item_title)
else:
keyword_counter = KeywordCounter(keyword, item_id, item_title)
self.keywords_counters[keyword] = keyword_counter
self.counts_counters[keyword] += 1
def most_common(self, number=None):
if not number and self.counts_counters:
number = floor_log(len(self.counts_counters))
else:
number = 0
return [self.keywords_counters[keyword[0]] for keyword in self.counts_counters.most_common(number)]
def __str__(self):
return "SSK: {} - {}".format(self.section, len(self.keywords_counters))
class KeywordCounter:
def __init__(self, keyword, item_id, item_title):
self.keyword = keyword
self.counts = 1
self.sample_title = item_title
self.items = dict()
self.items[item_id] = item_title
def update(self, item_id, item_title):
self.counts += 1
self.items[item_id] = item_title
def __str__(self):
return "KC: {} - {}".format(self.keyword, self.counts) | mit |
devlaith/DEVLAITH-HL | tg/tg-test.py | 199 | 1450 | import tgl
import pprint
from functools import partial
our_id = 0
pp = pprint.PrettyPrinter(indent=4)
binlog_done = False;
def on_binlog_replay_end():
binlog_done = True;
def on_get_difference_end():
pass
def on_our_id(id):
our_id = id
return "Set ID: " + str(our_id)
def msg_cb(success, msg):
pp.pprint(success)
pp.pprint(msg)
HISTORY_QUERY_SIZE = 100
def history_cb(msg_list, peer, success, msgs):
print(len(msgs))
msg_list.extend(msgs)
print(len(msg_list))
if len(msgs) == HISTORY_QUERY_SIZE:
tgl.get_history(peer, len(msg_list), HISTORY_QUERY_SIZE, partial(history_cb, msg_list, peer));
def cb(success):
print(success)
def on_msg_receive(msg):
if msg.out and not binlog_done:
return;
if msg.dest.id == our_id: # direct message
peer = msg.src
else: # chatroom
peer = msg.dest
pp.pprint(msg)
if msg.text.startswith("!ping"):
peer.send_msg("PONG! google.com", preview=False, reply=msg.id)
def on_secret_chat_update(peer, types):
return "on_secret_chat_update"
def on_user_update():
pass
def on_chat_update():
pass
# Set callbacks
tgl.set_on_binlog_replay_end(on_binlog_replay_end)
tgl.set_on_get_difference_end(on_get_difference_end)
tgl.set_on_our_id(on_our_id)
tgl.set_on_msg_receive(on_msg_receive)
tgl.set_on_secret_chat_update(on_secret_chat_update)
tgl.set_on_user_update(on_user_update)
tgl.set_on_chat_update(on_chat_update)
| gpl-2.0 |
bykof/billomapy | billomapy/resources.py | 1 | 4081 | """
KEY and DATA_KEYS FOR THE API
"""
PROPERTY_VALUES = '-property-values'
PROPERTY_VALUE = '-property-value'
TAGS = '-tags'
TAG = '-tag'
ITEMS = '-items'
ITEM = '-item'
COMMENTS = '-comments'
COMMENT = '-comment'
PAYMENTS = '-payments'
PAYMENT = '-payment'
EMAIL_RECEIVERS = '-email-receivers'
EMAIL_RECEIVER = '-email-receiver'
DOCUMENTS = '-documents'
DOCUMENT = '-document'
CLIENTS = 'clients'
CLIENT = 'client'
CLIENT_PROPERTIES = CLIENT + PROPERTY_VALUES
CLIENT_PROPERTY = CLIENT + PROPERTY_VALUE
CLIENT_TAGS = CLIENT + TAGS
CLIENT_TAG = CLIENT + TAG
CONTACTS = 'contacts'
CONTACT = 'contact'
SUPPLIERS = 'suppliers'
SUPPLIER = 'supplier'
SUPPLIER_PROPERTIES = SUPPLIER + PROPERTY_VALUES
SUPPLIER_PROPERTY = SUPPLIER + PROPERTY_VALUE
SUPPLIER_TAGS = SUPPLIER + TAGS
SUPPLIER_TAG = SUPPLIER + TAG
ARTICLES = 'articles'
ARTICLE = 'article'
ARTICLE_PROPERTIES = ARTICLE + PROPERTY_VALUES
ARTICLE_PROPERTY = ARTICLE + PROPERTY_VALUE
ARTICLE_TAGS = ARTICLE + TAGS
ARTICLE_TAG = ARTICLE + TAG
UNITS = 'units'
UNIT = 'unit'
INVOICES = 'invoices'
INVOICE = 'invoice'
INVOICE_ITEMS = INVOICE + ITEMS
INVOICE_ITEM = INVOICE + ITEM
INVOICE_COMMENTS = INVOICE + COMMENTS
INVOICE_COMMENT = INVOICE + COMMENT
INVOICE_PAYMENTS = INVOICE + PAYMENTS
INVOICE_PAYMENT = INVOICE + PAYMENT
INVOICE_TAGS = INVOICE + TAGS
INVOICE_TAG = INVOICE + TAG
RECURRINGS = 'recurrings'
RECURRING = 'recurring'
RECURRING_ITEMS = RECURRING + ITEMS
RECURRING_ITEM = RECURRING + ITEM
RECURRING_TAGS = RECURRING + TAGS
RECURRING_TAG = RECURRING + TAG
RECURRING_EMAIL_RECEIVERS = RECURRING + EMAIL_RECEIVERS
RECURRING_EMAIL_RECEIVER = RECURRING + EMAIL_RECEIVER
INCOMINGS = 'incomings'
INCOMING = 'incoming'
INCOMING_COMMENTS = INCOMING + COMMENTS
INCOMING_COMMENT = INCOMING + COMMENT
INCOMING_PAYMENTS = INCOMING + PAYMENTS
INCOMING_PAYMENT = INCOMING + PAYMENT
INCOMING_PROPERTIES = INCOMING + PROPERTY_VALUES
INCOMING_PROPERTY = INCOMING + PROPERTY_VALUE
INCOMING_TAGS = INCOMING + TAGS
INCOMING_TAG = INCOMING + TAG
INBOX = 'inbox'
INBOX_DOCUMENTS = INBOX + DOCUMENTS
INBOX_DOCUMENT = INBOX + DOCUMENT
OFFERS = 'offers'
OFFER = 'offer'
OFFER_ITEMS = OFFER + ITEMS
OFFER_ITEM = OFFER + ITEM
OFFER_COMMENTS = OFFER + COMMENTS
OFFER_COMMENT = OFFER + COMMENT
OFFER_TAGS = OFFER + TAGS
OFFER_TAG = OFFER + TAG
CREDIT_NOTES = 'credit-notes'
CREDIT_NOTE = 'credit-note'
CREDIT_NOTE_ITEMS = CREDIT_NOTE + ITEMS
CREDIT_NOTE_ITEM = CREDIT_NOTE + ITEM
CREDIT_NOTE_COMMENTS = CREDIT_NOTE + COMMENTS
CREDIT_NOTE_COMMENT = CREDIT_NOTE + COMMENT
CREDIT_NOTE_PAYMENTS = CREDIT_NOTE + PAYMENTS
CREDIT_NOTE_PAYMENT = CREDIT_NOTE + PAYMENT
CREDIT_NOTE_TAGS = CREDIT_NOTE + TAGS
CREDIT_NOTE_TAG = CREDIT_NOTE + TAG
CONFIRMATIONS = 'confirmations'
CONFIRMATION = 'confirmation'
CONFIRMATION_ITEMS = CONFIRMATION + ITEMS
CONFIRMATION_ITEM = CONFIRMATION + ITEM
CONFIRMATION_COMMENTS = CONFIRMATION + COMMENTS
CONFIRMATION_COMMENT = CONFIRMATION + COMMENT
CONFIRMATION_TAGS = CONFIRMATION + TAGS
CONFIRMATION_TAG = CONFIRMATION + TAG
REMINDERS = 'reminders'
REMINDER = 'reminder'
REMINDER_ITEMS = REMINDER + ITEMS
REMINDER_ITEM = REMINDER + ITEM
REMINDER_TAGS = REMINDER + TAGS
REMINDER_TAG = REMINDER + TAG
DELIVERY_NOTES = 'delivery-notes'
DELIVERY_NOTE = 'delivery-note'
DELIVERY_NOTE_ITEMS = DELIVERY_NOTE + ITEMS
DELIVERY_NOTE_ITEM = DELIVERY_NOTE + ITEM
DELIVERY_NOTE_COMMENTS = DELIVERY_NOTE + COMMENTS
DELIVERY_NOTE_COMMENT = DELIVERY_NOTE + COMMENT
DELIVERY_NOTE_TAGS = DELIVERY_NOTE + TAGS
DELIVERY_NOTE_TAG = DELIVERY_NOTE + TAG
LETTERS = 'letters'
LETTER = 'letter'
LETTER_COMMENTS = LETTER + COMMENTS
LETTER_COMMENT = LETTER + COMMENT
LETTER_TAGS = LETTER + TAGS
LETTER_TAG = LETTER + TAG
TEMPLATES = 'templates'
TEMPLATE = 'template'
EMAIL_TEMPLATES = 'email-templates'
EMAIL_TEMPLATE = 'email-template'
USER = 'users'
USERS = 'users'
"""
COMMANDS for the API
"""
COMPLETE = 'complete'
PDF = 'pdf'
UPLOAD_SIGNATURE = 'upload-signature'
EMAIL = 'email'
CANCEL = 'cancel'
UNCANCEL = 'uncancel'
WIN = 'win'
LOSE = 'lose'
CLEAR = 'clear'
UNCLEAR = 'unclear'
| apache-2.0 |
BjerknesClimateDataCentre/QuinCe | external_scripts/NRT/salinity_data/prepare_salinity.py | 2 | 2681 | # Prepare a cut-down version of the World Ocean Atlas 2018 salinity
# data to use with the AddSalinityPreprocessor.
# Input files are 0.25° seasonal files for the years 2005-2017,
# available from https://www.nodc.noaa.gov/cgi-bin/OC5/woa18/woa18.pl
# Files are:
#
# woa18_A5B7_s13_04.nc - Winter (DJF) = Season 1
# woa18_A5B7_s14_04.nc - Spring (MAM) = Season 2
# woa18_A5B7_s15_04.nc - Summer (JJA) = Season 3
# woa18_A5B7_s16_04.nc - Winter (SON) = Season 4
#
#
# Output is a single netCDF file, containing the surface data for the full grid
# and four time steps.
import os
import netCDF4
WINTER_FILE = "woa18_A5B7_s13_04.nc"
SPRING_FILE = "woa18_A5B7_s14_04.nc"
SUMMER_FILE = "woa18_A5B7_s15_04.nc"
AUTUMN_FILE = "woa18_A5B7_s16_04.nc"
IN_VAR = "s_an"
OUTPUT_FILE = "woa18_seasonal_surface_salinity.nc"
def main():
if not init_check():
print("Initialisation check failed.")
exit()
init_output_file()
add_season(WINTER_FILE, 0)
add_season(SPRING_FILE, 1)
add_season(SUMMER_FILE, 2)
add_season(AUTUMN_FILE, 3)
# Initialisation check
def init_check():
check_result = True
if not file_exists(WINTER_FILE):
check_result = False
if not file_exists(SPRING_FILE):
check_result = False
if not file_exists(SUMMER_FILE):
check_result = False
if not file_exists(SPRING_FILE):
check_result = False
return check_result
# See if a file exists
def file_exists(file):
exists = True
if not os.path.isfile(file):
print("Missing file %s" % file)
exists = False
return exists
def init_output_file():
# Get spatial dimensions from input file
nc = netCDF4.Dataset(WINTER_FILE, mode="r")
lons = nc.variables["lon"][:]
lats = nc.variables["lat"][:]
nc.close()
nc = netCDF4.Dataset(OUTPUT_FILE, format="NETCDF4_CLASSIC", mode="w")
londim = nc.createDimension("lon", len(lons))
lonvar = nc.createVariable("lon", "f", ("lon"), fill_value=-999)
lonvar.units = "degrees_east"
latdim = nc.createDimension("lat", len(lats))
latvar = nc.createVariable("lat", "f", ("lat"), fill_value=-999)
latvar.units = "degrees_north"
timedim = nc.createDimension("time", 4)
timevar = nc.createVariable("time", "i", ("time"), fill_value = -1)
timevar.units = "season"
timevar.long_name = "season"
salvar = nc.createVariable("salinity", "d", ("time", "lat", "lon"), fill_value=-999)
lonvar[:] = lons
latvar[:] = lats
timevar[:] = [1,2,3,4]
nc.close()
def add_season(season_file, season):
nc = netCDF4.Dataset(season_file, mode="r")
values = nc.variables[IN_VAR][0,0,:,:]
nc.close()
nc = netCDF4.Dataset(OUTPUT_FILE, mode="a")
nc.variables["salinity"][season,:,:] = values
nc.close()
if __name__ == '__main__':
main()
| gpl-3.0 |
balloob/home-assistant | homeassistant/components/venstar/climate.py | 16 | 11240 | """Support for Venstar WiFi Thermostats."""
import logging
from venstarcolortouch import VenstarColorTouch
import voluptuous as vol
from homeassistant.components.climate import PLATFORM_SCHEMA, ClimateEntity
from homeassistant.components.climate.const import (
ATTR_HVAC_MODE,
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
CURRENT_HVAC_COOL,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
CURRENT_HVAC_OFF,
FAN_AUTO,
FAN_ON,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_NONE,
SUPPORT_FAN_MODE,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_HUMIDITY,
SUPPORT_TARGET_TEMPERATURE,
SUPPORT_TARGET_TEMPERATURE_RANGE,
)
from homeassistant.const import (
ATTR_TEMPERATURE,
CONF_HOST,
CONF_PASSWORD,
CONF_PIN,
CONF_SSL,
CONF_TIMEOUT,
CONF_USERNAME,
PRECISION_HALVES,
STATE_ON,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
ATTR_FAN_STATE = "fan_state"
ATTR_HVAC_STATE = "hvac_mode"
CONF_HUMIDIFIER = "humidifier"
DEFAULT_SSL = False
VALID_FAN_STATES = [STATE_ON, HVAC_MODE_AUTO]
VALID_THERMOSTAT_MODES = [HVAC_MODE_HEAT, HVAC_MODE_COOL, HVAC_MODE_OFF, HVAC_MODE_AUTO]
HOLD_MODE_OFF = "off"
HOLD_MODE_TEMPERATURE = "temperature"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_HUMIDIFIER, default=True): cv.boolean,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
vol.Optional(CONF_TIMEOUT, default=5): vol.All(
vol.Coerce(int), vol.Range(min=1)
),
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_PIN): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Venstar thermostat."""
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
pin = config.get(CONF_PIN)
host = config.get(CONF_HOST)
timeout = config.get(CONF_TIMEOUT)
humidifier = config.get(CONF_HUMIDIFIER)
protocol = "https" if config[CONF_SSL] else "http"
client = VenstarColorTouch(
addr=host,
timeout=timeout,
user=username,
password=password,
pin=pin,
proto=protocol,
)
add_entities([VenstarThermostat(client, humidifier)], True)
class VenstarThermostat(ClimateEntity):
"""Representation of a Venstar thermostat."""
def __init__(self, client, humidifier):
"""Initialize the thermostat."""
self._client = client
self._humidifier = humidifier
self._mode_map = {
HVAC_MODE_HEAT: self._client.MODE_HEAT,
HVAC_MODE_COOL: self._client.MODE_COOL,
HVAC_MODE_AUTO: self._client.MODE_AUTO,
}
def update(self):
"""Update the data from the thermostat."""
info_success = self._client.update_info()
sensor_success = self._client.update_sensors()
if not info_success or not sensor_success:
_LOGGER.error("Failed to update data")
@property
def supported_features(self):
"""Return the list of supported features."""
features = SUPPORT_TARGET_TEMPERATURE | SUPPORT_FAN_MODE | SUPPORT_PRESET_MODE
if self._client.mode == self._client.MODE_AUTO:
features |= SUPPORT_TARGET_TEMPERATURE_RANGE
if self._humidifier and hasattr(self._client, "hum_active"):
features |= SUPPORT_TARGET_HUMIDITY
return features
@property
def name(self):
"""Return the name of the thermostat."""
return self._client.name
@property
def precision(self):
"""Return the precision of the system.
Venstar temperature values are passed back and forth in the
API in C or F, with half-degree accuracy.
"""
return PRECISION_HALVES
@property
def temperature_unit(self):
"""Return the unit of measurement, as defined by the API."""
if self._client.tempunits == self._client.TEMPUNITS_F:
return TEMP_FAHRENHEIT
return TEMP_CELSIUS
@property
def fan_modes(self):
"""Return the list of available fan modes."""
return VALID_FAN_STATES
@property
def hvac_modes(self):
"""Return the list of available operation modes."""
return VALID_THERMOSTAT_MODES
@property
def current_temperature(self):
"""Return the current temperature."""
return self._client.get_indoor_temp()
@property
def current_humidity(self):
"""Return the current humidity."""
return self._client.get_indoor_humidity()
@property
def hvac_mode(self):
"""Return current operation mode ie. heat, cool, auto."""
if self._client.mode == self._client.MODE_HEAT:
return HVAC_MODE_HEAT
if self._client.mode == self._client.MODE_COOL:
return HVAC_MODE_COOL
if self._client.mode == self._client.MODE_AUTO:
return HVAC_MODE_AUTO
return HVAC_MODE_OFF
@property
def hvac_action(self):
"""Return current operation mode ie. heat, cool, auto."""
if self._client.state == self._client.STATE_IDLE:
return CURRENT_HVAC_IDLE
if self._client.state == self._client.STATE_HEATING:
return CURRENT_HVAC_HEAT
if self._client.state == self._client.STATE_COOLING:
return CURRENT_HVAC_COOL
return CURRENT_HVAC_OFF
@property
def fan_mode(self):
"""Return the current fan mode."""
if self._client.fan == self._client.FAN_ON:
return FAN_ON
return FAN_AUTO
@property
def device_state_attributes(self):
"""Return the optional state attributes."""
return {
ATTR_FAN_STATE: self._client.fanstate,
ATTR_HVAC_STATE: self._client.state,
}
@property
def target_temperature(self):
"""Return the target temperature we try to reach."""
if self._client.mode == self._client.MODE_HEAT:
return self._client.heattemp
if self._client.mode == self._client.MODE_COOL:
return self._client.cooltemp
return None
@property
def target_temperature_low(self):
"""Return the lower bound temp if auto mode is on."""
if self._client.mode == self._client.MODE_AUTO:
return self._client.heattemp
return None
@property
def target_temperature_high(self):
"""Return the upper bound temp if auto mode is on."""
if self._client.mode == self._client.MODE_AUTO:
return self._client.cooltemp
return None
@property
def target_humidity(self):
"""Return the humidity we try to reach."""
return self._client.hum_setpoint
@property
def min_humidity(self):
"""Return the minimum humidity. Hardcoded to 0 in API."""
return 0
@property
def max_humidity(self):
"""Return the maximum humidity. Hardcoded to 60 in API."""
return 60
@property
def preset_mode(self):
"""Return current preset."""
if self._client.away:
return PRESET_AWAY
if self._client.schedule == 0:
return HOLD_MODE_TEMPERATURE
return PRESET_NONE
@property
def preset_modes(self):
"""Return valid preset modes."""
return [PRESET_NONE, PRESET_AWAY, HOLD_MODE_TEMPERATURE]
def _set_operation_mode(self, operation_mode):
"""Change the operation mode (internal)."""
if operation_mode == HVAC_MODE_HEAT:
success = self._client.set_mode(self._client.MODE_HEAT)
elif operation_mode == HVAC_MODE_COOL:
success = self._client.set_mode(self._client.MODE_COOL)
elif operation_mode == HVAC_MODE_AUTO:
success = self._client.set_mode(self._client.MODE_AUTO)
else:
success = self._client.set_mode(self._client.MODE_OFF)
if not success:
_LOGGER.error("Failed to change the operation mode")
return success
def set_temperature(self, **kwargs):
"""Set a new target temperature."""
set_temp = True
operation_mode = kwargs.get(ATTR_HVAC_MODE)
temp_low = kwargs.get(ATTR_TARGET_TEMP_LOW)
temp_high = kwargs.get(ATTR_TARGET_TEMP_HIGH)
temperature = kwargs.get(ATTR_TEMPERATURE)
if operation_mode and self._mode_map.get(operation_mode) != self._client.mode:
set_temp = self._set_operation_mode(operation_mode)
if set_temp:
if (
self._mode_map.get(operation_mode, self._client.mode)
== self._client.MODE_HEAT
):
success = self._client.set_setpoints(temperature, self._client.cooltemp)
elif (
self._mode_map.get(operation_mode, self._client.mode)
== self._client.MODE_COOL
):
success = self._client.set_setpoints(self._client.heattemp, temperature)
elif (
self._mode_map.get(operation_mode, self._client.mode)
== self._client.MODE_AUTO
):
success = self._client.set_setpoints(temp_low, temp_high)
else:
success = False
_LOGGER.error(
"The thermostat is currently not in a mode "
"that supports target temperature: %s",
operation_mode,
)
if not success:
_LOGGER.error("Failed to change the temperature")
def set_fan_mode(self, fan_mode):
"""Set new target fan mode."""
if fan_mode == STATE_ON:
success = self._client.set_fan(self._client.FAN_ON)
else:
success = self._client.set_fan(self._client.FAN_AUTO)
if not success:
_LOGGER.error("Failed to change the fan mode")
def set_hvac_mode(self, hvac_mode):
"""Set new target operation mode."""
self._set_operation_mode(hvac_mode)
def set_humidity(self, humidity):
"""Set new target humidity."""
success = self._client.set_hum_setpoint(humidity)
if not success:
_LOGGER.error("Failed to change the target humidity level")
def set_preset_mode(self, preset_mode):
"""Set the hold mode."""
if preset_mode == PRESET_AWAY:
success = self._client.set_away(self._client.AWAY_AWAY)
elif preset_mode == HOLD_MODE_TEMPERATURE:
success = self._client.set_away(self._client.AWAY_HOME)
success = success and self._client.set_schedule(0)
elif preset_mode == PRESET_NONE:
success = self._client.set_away(self._client.AWAY_HOME)
success = success and self._client.set_schedule(1)
else:
_LOGGER.error("Unknown hold mode: %s", preset_mode)
success = False
if not success:
_LOGGER.error("Failed to change the schedule/hold state")
| apache-2.0 |
yamila-moreno/nikola | nikola/data/themes/base/messages/messages_da.py | 6 | 1441 | # -*- encoding:utf-8 -*-
from __future__ import unicode_literals
MESSAGES = {
"%d min remaining to read": "%d min. tilbage at læse",
"(active)": "",
"Also available in:": "Fås også i:",
"Archive": "Arkiv",
"Categories": "Kategorier",
"Comments": "Kommentarer",
"LANGUAGE": "Dansk",
"Languages:": "Sprog:",
"More posts about %s": "Yderligere indlæg om %s",
"Newer posts": "Nyere indlæg",
"Next post": "Næste indlæg",
"No posts found.": "Søgningen gav ingen resultater.",
"Nothing found.": "Søgningen gav ingen resultater.",
"Older posts": "Ældre indlæg",
"Original site": "Oprindeligt hjemmeside",
"Posted:": "Opslået:",
"Posts about %s": "Indlæg om %s",
"Posts for year %s": "Indlæg for %s",
"Posts for {month} {day}, {year}": "Indlæs for {month} {day}, {year}",
"Posts for {month} {year}": "Indlæg for {month} {year}",
"Previous post": "Tidligere indlæg",
"Publication date": "Udgivelsesdato",
"RSS feed": "RSS-nyhedskilde",
"Read in English": "Læs på dansk",
"Read more": "Læs mere",
"Skip to main content": "Hop direkte til hovedindhold",
"Source": "Kilde",
"Subcategories:": "",
"Tags and Categories": "Nøgleord og kategorier",
"Tags": "Nøgleord",
"Write your page here.": "",
"Write your post here.": "",
"old posts, page %d": "gamle indlæg, side %d",
"page %d": "side %d",
}
| mit |
mhvk/numpy | numpy/core/tests/test_memmap.py | 5 | 7469 | import sys
import os
import mmap
import pytest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryFile
from numpy import (
memmap, sum, average, product, ndarray, isscalar, add, subtract, multiply)
from numpy import arange, allclose, asarray
from numpy.testing import (
assert_, assert_equal, assert_array_equal, suppress_warnings, IS_PYPY,
break_cycles
)
class TestMemmap:
def setup(self):
self.tmpfp = NamedTemporaryFile(prefix='mmap')
self.shape = (3, 4)
self.dtype = 'float32'
self.data = arange(12, dtype=self.dtype)
self.data.resize(self.shape)
def teardown(self):
self.tmpfp.close()
self.data = None
if IS_PYPY:
break_cycles()
break_cycles()
def test_roundtrip(self):
# Write data to file
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
fp[:] = self.data[:]
del fp # Test __del__ machinery, which handles cleanup
# Read data back from file
newfp = memmap(self.tmpfp, dtype=self.dtype, mode='r',
shape=self.shape)
assert_(allclose(self.data, newfp))
assert_array_equal(self.data, newfp)
assert_equal(newfp.flags.writeable, False)
def test_open_with_filename(self, tmp_path):
tmpname = tmp_path / 'mmap'
fp = memmap(tmpname, dtype=self.dtype, mode='w+',
shape=self.shape)
fp[:] = self.data[:]
del fp
def test_unnamed_file(self):
with TemporaryFile() as f:
fp = memmap(f, dtype=self.dtype, shape=self.shape)
del fp
def test_attributes(self):
offset = 1
mode = "w+"
fp = memmap(self.tmpfp, dtype=self.dtype, mode=mode,
shape=self.shape, offset=offset)
assert_equal(offset, fp.offset)
assert_equal(mode, fp.mode)
del fp
def test_filename(self, tmp_path):
tmpname = tmp_path / "mmap"
fp = memmap(tmpname, dtype=self.dtype, mode='w+',
shape=self.shape)
abspath = Path(os.path.abspath(tmpname))
fp[:] = self.data[:]
assert_equal(abspath, fp.filename)
b = fp[:1]
assert_equal(abspath, b.filename)
del b
del fp
def test_path(self, tmp_path):
tmpname = tmp_path / "mmap"
fp = memmap(Path(tmpname), dtype=self.dtype, mode='w+',
shape=self.shape)
# os.path.realpath does not resolve symlinks on Windows
# see: https://bugs.python.org/issue9949
# use Path.resolve, just as memmap class does internally
abspath = str(Path(tmpname).resolve())
fp[:] = self.data[:]
assert_equal(abspath, str(fp.filename.resolve()))
b = fp[:1]
assert_equal(abspath, str(b.filename.resolve()))
del b
del fp
def test_filename_fileobj(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode="w+",
shape=self.shape)
assert_equal(fp.filename, self.tmpfp.name)
@pytest.mark.skipif(sys.platform == 'gnu0',
reason="Known to fail on hurd")
def test_flush(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
fp[:] = self.data[:]
assert_equal(fp[0], self.data[0])
fp.flush()
def test_del(self):
# Make sure a view does not delete the underlying mmap
fp_base = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
fp_base[0] = 5
fp_view = fp_base[0:1]
assert_equal(fp_view[0], 5)
del fp_view
# Should still be able to access and assign values after
# deleting the view
assert_equal(fp_base[0], 5)
fp_base[0] = 6
assert_equal(fp_base[0], 6)
def test_arithmetic_drops_references(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
tmp = (fp + 10)
if isinstance(tmp, memmap):
assert_(tmp._mmap is not fp._mmap)
def test_indexing_drops_references(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
tmp = fp[(1, 2), (2, 3)]
if isinstance(tmp, memmap):
assert_(tmp._mmap is not fp._mmap)
def test_slicing_keeps_references(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
assert_(fp[:2, :2]._mmap is fp._mmap)
def test_view(self):
fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape)
new1 = fp.view()
new2 = new1.view()
assert_(new1.base is fp)
assert_(new2.base is fp)
new_array = asarray(fp)
assert_(new_array.base is fp)
def test_ufunc_return_ndarray(self):
fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape)
fp[:] = self.data
with suppress_warnings() as sup:
sup.filter(FutureWarning, "np.average currently does not preserve")
for unary_op in [sum, average, product]:
result = unary_op(fp)
assert_(isscalar(result))
assert_(result.__class__ is self.data[0, 0].__class__)
assert_(unary_op(fp, axis=0).__class__ is ndarray)
assert_(unary_op(fp, axis=1).__class__ is ndarray)
for binary_op in [add, subtract, multiply]:
assert_(binary_op(fp, self.data).__class__ is ndarray)
assert_(binary_op(self.data, fp).__class__ is ndarray)
assert_(binary_op(fp, fp).__class__ is ndarray)
fp += 1
assert(fp.__class__ is memmap)
add(fp, 1, out=fp)
assert(fp.__class__ is memmap)
def test_getitem(self):
fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape)
fp[:] = self.data
assert_(fp[1:, :-1].__class__ is memmap)
# Fancy indexing returns a copy that is not memmapped
assert_(fp[[0, 1]].__class__ is ndarray)
def test_memmap_subclass(self):
class MemmapSubClass(memmap):
pass
fp = MemmapSubClass(self.tmpfp, dtype=self.dtype, shape=self.shape)
fp[:] = self.data
# We keep previous behavior for subclasses of memmap, i.e. the
# ufunc and __getitem__ output is never turned into a ndarray
assert_(sum(fp, axis=0).__class__ is MemmapSubClass)
assert_(sum(fp).__class__ is MemmapSubClass)
assert_(fp[1:, :-1].__class__ is MemmapSubClass)
assert(fp[[0, 1]].__class__ is MemmapSubClass)
def test_mmap_offset_greater_than_allocation_granularity(self):
size = 5 * mmap.ALLOCATIONGRANULARITY
offset = mmap.ALLOCATIONGRANULARITY + 1
fp = memmap(self.tmpfp, shape=size, mode='w+', offset=offset)
assert_(fp.offset == offset)
def test_no_shape(self):
self.tmpfp.write(b'a'*16)
mm = memmap(self.tmpfp, dtype='float64')
assert_equal(mm.shape, (2,))
def test_empty_array(self):
# gh-12653
with pytest.raises(ValueError, match='empty file'):
memmap(self.tmpfp, shape=(0,4), mode='w+')
self.tmpfp.write(b'\0')
# ok now the file is not empty
memmap(self.tmpfp, shape=(0,4), mode='w+')
| bsd-3-clause |
ychen820/microblog | flask/lib/python2.7/site-packages/openid/server/server.py | 142 | 65667 | # -*- test-case-name: openid.test.test_server -*-
"""OpenID server protocol and logic.
Overview
========
An OpenID server must perform three tasks:
1. Examine the incoming request to determine its nature and validity.
2. Make a decision about how to respond to this request.
3. Format the response according to the protocol.
The first and last of these tasks may performed by
the L{decodeRequest<Server.decodeRequest>} and
L{encodeResponse<Server.encodeResponse>} methods of the
L{Server} object. Who gets to do the intermediate task -- deciding
how to respond to the request -- will depend on what type of request it
is.
If it's a request to authenticate a user (a X{C{checkid_setup}} or
X{C{checkid_immediate}} request), you need to decide if you will assert
that this user may claim the identity in question. Exactly how you do
that is a matter of application policy, but it generally involves making
sure the user has an account with your system and is logged in, checking
to see if that identity is hers to claim, and verifying with the user that
she does consent to releasing that information to the party making the
request.
Examine the properties of the L{CheckIDRequest} object, optionally
check L{CheckIDRequest.returnToVerified}, and and when you've come
to a decision, form a response by calling L{CheckIDRequest.answer}.
Other types of requests relate to establishing associations between client
and server and verifying the authenticity of previous communications.
L{Server} contains all the logic and data necessary to respond to
such requests; just pass the request to L{Server.handleRequest}.
OpenID Extensions
=================
Do you want to provide other information for your users
in addition to authentication? Version 2.0 of the OpenID
protocol allows consumers to add extensions to their requests.
For example, with sites using the U{Simple Registration
Extension<http://openid.net/specs/openid-simple-registration-extension-1_0.html>},
a user can agree to have their nickname and e-mail address sent to a
site when they sign up.
Since extensions do not change the way OpenID authentication works,
code to handle extension requests may be completely separate from the
L{OpenIDRequest} class here. But you'll likely want data sent back by
your extension to be signed. L{OpenIDResponse} provides methods with
which you can add data to it which can be signed with the other data in
the OpenID signature.
For example::
# when request is a checkid_* request
response = request.answer(True)
# this will a signed 'openid.sreg.timezone' parameter to the response
# as well as a namespace declaration for the openid.sreg namespace
response.fields.setArg('http://openid.net/sreg/1.0', 'timezone', 'America/Los_Angeles')
There are helper modules for a number of extensions, including
L{Attribute Exchange<openid.extensions.ax>},
L{PAPE<openid.extensions.pape>}, and
L{Simple Registration<openid.extensions.sreg>} in the L{openid.extensions}
package.
Stores
======
The OpenID server needs to maintain state between requests in order
to function. Its mechanism for doing this is called a store. The
store interface is defined in C{L{openid.store.interface.OpenIDStore}}.
Additionally, several concrete store implementations are provided, so that
most sites won't need to implement a custom store. For a store backed
by flat files on disk, see C{L{openid.store.filestore.FileOpenIDStore}}.
For stores based on MySQL or SQLite, see the C{L{openid.store.sqlstore}}
module.
Upgrading
=========
From 1.0 to 1.1
---------------
The keys by which a server looks up associations in its store have changed
in version 1.2 of this library. If your store has entries created from
version 1.0 code, you should empty it.
From 1.1 to 2.0
---------------
One of the additions to the OpenID protocol was a specified nonce
format for one-way nonces. As a result, the nonce table in the store
has changed. You'll need to run contrib/upgrade-store-1.1-to-2.0 to
upgrade your store, or you'll encounter errors about the wrong number
of columns in the oid_nonces table.
If you've written your own custom store or code that interacts
directly with it, you'll need to review the change notes in
L{openid.store.interface}.
@group Requests: OpenIDRequest, AssociateRequest, CheckIDRequest,
CheckAuthRequest
@group Responses: OpenIDResponse
@group HTTP Codes: HTTP_OK, HTTP_REDIRECT, HTTP_ERROR
@group Response Encodings: ENCODE_KVFORM, ENCODE_HTML_FORM, ENCODE_URL
"""
import time, warnings
from copy import deepcopy
from openid import cryptutil
from openid import oidutil
from openid import kvform
from openid.dh import DiffieHellman
from openid.store.nonce import mkNonce
from openid.server.trustroot import TrustRoot, verifyReturnTo
from openid.association import Association, default_negotiator, getSecretSize
from openid.message import Message, InvalidOpenIDNamespace, \
OPENID_NS, OPENID2_NS, IDENTIFIER_SELECT, OPENID1_URL_LIMIT
from openid.urinorm import urinorm
HTTP_OK = 200
HTTP_REDIRECT = 302
HTTP_ERROR = 400
BROWSER_REQUEST_MODES = ['checkid_setup', 'checkid_immediate']
ENCODE_KVFORM = ('kvform',)
ENCODE_URL = ('URL/redirect',)
ENCODE_HTML_FORM = ('HTML form',)
UNUSED = None
class OpenIDRequest(object):
"""I represent an incoming OpenID request.
@cvar mode: the C{X{openid.mode}} of this request.
@type mode: str
"""
mode = None
class CheckAuthRequest(OpenIDRequest):
"""A request to verify the validity of a previous response.
@cvar mode: "X{C{check_authentication}}"
@type mode: str
@ivar assoc_handle: The X{association handle} the response was signed with.
@type assoc_handle: str
@ivar signed: The message with the signature which wants checking.
@type signed: L{Message}
@ivar invalidate_handle: An X{association handle} the client is asking
about the validity of. Optional, may be C{None}.
@type invalidate_handle: str
@see: U{OpenID Specs, Mode: check_authentication
<http://openid.net/specs.bml#mode-check_authentication>}
"""
mode = "check_authentication"
required_fields = ["identity", "return_to", "response_nonce"]
def __init__(self, assoc_handle, signed, invalidate_handle=None):
"""Construct me.
These parameters are assigned directly as class attributes, see
my L{class documentation<CheckAuthRequest>} for their descriptions.
@type assoc_handle: str
@type signed: L{Message}
@type invalidate_handle: str
"""
self.assoc_handle = assoc_handle
self.signed = signed
self.invalidate_handle = invalidate_handle
self.namespace = OPENID2_NS
def fromMessage(klass, message, op_endpoint=UNUSED):
"""Construct me from an OpenID Message.
@param message: An OpenID check_authentication Message
@type message: L{openid.message.Message}
@returntype: L{CheckAuthRequest}
"""
self = klass.__new__(klass)
self.message = message
self.namespace = message.getOpenIDNamespace()
self.assoc_handle = message.getArg(OPENID_NS, 'assoc_handle')
self.sig = message.getArg(OPENID_NS, 'sig')
if (self.assoc_handle is None or
self.sig is None):
fmt = "%s request missing required parameter from message %s"
raise ProtocolError(
message, text=fmt % (self.mode, message))
self.invalidate_handle = message.getArg(OPENID_NS, 'invalidate_handle')
self.signed = message.copy()
# openid.mode is currently check_authentication because
# that's the mode of this request. But the signature
# was made on something with a different openid.mode.
# http://article.gmane.org/gmane.comp.web.openid.general/537
if self.signed.hasKey(OPENID_NS, "mode"):
self.signed.setArg(OPENID_NS, "mode", "id_res")
return self
fromMessage = classmethod(fromMessage)
def answer(self, signatory):
"""Respond to this request.
Given a L{Signatory}, I can check the validity of the signature and
the X{C{invalidate_handle}}.
@param signatory: The L{Signatory} to use to check the signature.
@type signatory: L{Signatory}
@returns: A response with an X{C{is_valid}} (and, if
appropriate X{C{invalidate_handle}}) field.
@returntype: L{OpenIDResponse}
"""
is_valid = signatory.verify(self.assoc_handle, self.signed)
# Now invalidate that assoc_handle so it this checkAuth message cannot
# be replayed.
signatory.invalidate(self.assoc_handle, dumb=True)
response = OpenIDResponse(self)
valid_str = (is_valid and "true") or "false"
response.fields.setArg(OPENID_NS, 'is_valid', valid_str)
if self.invalidate_handle:
assoc = signatory.getAssociation(self.invalidate_handle, dumb=False)
if not assoc:
response.fields.setArg(
OPENID_NS, 'invalidate_handle', self.invalidate_handle)
return response
def __str__(self):
if self.invalidate_handle:
ih = " invalidate? %r" % (self.invalidate_handle,)
else:
ih = ""
s = "<%s handle: %r sig: %r: signed: %r%s>" % (
self.__class__.__name__, self.assoc_handle,
self.sig, self.signed, ih)
return s
class PlainTextServerSession(object):
"""An object that knows how to handle association requests with no
session type.
@cvar session_type: The session_type for this association
session. There is no type defined for plain-text in the OpenID
specification, so we use 'no-encryption'.
@type session_type: str
@see: U{OpenID Specs, Mode: associate
<http://openid.net/specs.bml#mode-associate>}
@see: AssociateRequest
"""
session_type = 'no-encryption'
allowed_assoc_types = ['HMAC-SHA1', 'HMAC-SHA256']
def fromMessage(cls, unused_request):
return cls()
fromMessage = classmethod(fromMessage)
def answer(self, secret):
return {'mac_key': oidutil.toBase64(secret)}
class DiffieHellmanSHA1ServerSession(object):
"""An object that knows how to handle association requests with the
Diffie-Hellman session type.
@cvar session_type: The session_type for this association
session.
@type session_type: str
@ivar dh: The Diffie-Hellman algorithm values for this request
@type dh: DiffieHellman
@ivar consumer_pubkey: The public key sent by the consumer in the
associate request
@type consumer_pubkey: long
@see: U{OpenID Specs, Mode: associate
<http://openid.net/specs.bml#mode-associate>}
@see: AssociateRequest
"""
session_type = 'DH-SHA1'
hash_func = staticmethod(cryptutil.sha1)
allowed_assoc_types = ['HMAC-SHA1']
def __init__(self, dh, consumer_pubkey):
self.dh = dh
self.consumer_pubkey = consumer_pubkey
def fromMessage(cls, message):
"""
@param message: The associate request message
@type message: openid.message.Message
@returntype: L{DiffieHellmanSHA1ServerSession}
@raises ProtocolError: When parameters required to establish the
session are missing.
"""
dh_modulus = message.getArg(OPENID_NS, 'dh_modulus')
dh_gen = message.getArg(OPENID_NS, 'dh_gen')
if (dh_modulus is None and dh_gen is not None or
dh_gen is None and dh_modulus is not None):
if dh_modulus is None:
missing = 'modulus'
else:
missing = 'generator'
raise ProtocolError(message,
'If non-default modulus or generator is '
'supplied, both must be supplied. Missing %s'
% (missing,))
if dh_modulus or dh_gen:
dh_modulus = cryptutil.base64ToLong(dh_modulus)
dh_gen = cryptutil.base64ToLong(dh_gen)
dh = DiffieHellman(dh_modulus, dh_gen)
else:
dh = DiffieHellman.fromDefaults()
consumer_pubkey = message.getArg(OPENID_NS, 'dh_consumer_public')
if consumer_pubkey is None:
raise ProtocolError(message, "Public key for DH-SHA1 session "
"not found in message %s" % (message,))
consumer_pubkey = cryptutil.base64ToLong(consumer_pubkey)
return cls(dh, consumer_pubkey)
fromMessage = classmethod(fromMessage)
def answer(self, secret):
mac_key = self.dh.xorSecret(self.consumer_pubkey,
secret,
self.hash_func)
return {
'dh_server_public': cryptutil.longToBase64(self.dh.public),
'enc_mac_key': oidutil.toBase64(mac_key),
}
class DiffieHellmanSHA256ServerSession(DiffieHellmanSHA1ServerSession):
session_type = 'DH-SHA256'
hash_func = staticmethod(cryptutil.sha256)
allowed_assoc_types = ['HMAC-SHA256']
class AssociateRequest(OpenIDRequest):
"""A request to establish an X{association}.
@cvar mode: "X{C{check_authentication}}"
@type mode: str
@ivar assoc_type: The type of association. The protocol currently only
defines one value for this, "X{C{HMAC-SHA1}}".
@type assoc_type: str
@ivar session: An object that knows how to handle association
requests of a certain type.
@see: U{OpenID Specs, Mode: associate
<http://openid.net/specs.bml#mode-associate>}
"""
mode = "associate"
session_classes = {
'no-encryption': PlainTextServerSession,
'DH-SHA1': DiffieHellmanSHA1ServerSession,
'DH-SHA256': DiffieHellmanSHA256ServerSession,
}
def __init__(self, session, assoc_type):
"""Construct me.
The session is assigned directly as a class attribute. See my
L{class documentation<AssociateRequest>} for its description.
"""
super(AssociateRequest, self).__init__()
self.session = session
self.assoc_type = assoc_type
self.namespace = OPENID2_NS
def fromMessage(klass, message, op_endpoint=UNUSED):
"""Construct me from an OpenID Message.
@param message: The OpenID associate request
@type message: openid.message.Message
@returntype: L{AssociateRequest}
"""
if message.isOpenID1():
session_type = message.getArg(OPENID_NS, 'session_type')
if session_type == 'no-encryption':
oidutil.log('Received OpenID 1 request with a no-encryption '
'assocaition session type. Continuing anyway.')
elif not session_type:
session_type = 'no-encryption'
else:
session_type = message.getArg(OPENID2_NS, 'session_type')
if session_type is None:
raise ProtocolError(message,
text="session_type missing from request")
try:
session_class = klass.session_classes[session_type]
except KeyError:
raise ProtocolError(message,
"Unknown session type %r" % (session_type,))
try:
session = session_class.fromMessage(message)
except ValueError, why:
raise ProtocolError(message, 'Error parsing %s session: %s' %
(session_class.session_type, why[0]))
assoc_type = message.getArg(OPENID_NS, 'assoc_type', 'HMAC-SHA1')
if assoc_type not in session.allowed_assoc_types:
fmt = 'Session type %s does not support association type %s'
raise ProtocolError(message, fmt % (session_type, assoc_type))
self = klass(session, assoc_type)
self.message = message
self.namespace = message.getOpenIDNamespace()
return self
fromMessage = classmethod(fromMessage)
def answer(self, assoc):
"""Respond to this request with an X{association}.
@param assoc: The association to send back.
@type assoc: L{openid.association.Association}
@returns: A response with the association information, encrypted
to the consumer's X{public key} if appropriate.
@returntype: L{OpenIDResponse}
"""
response = OpenIDResponse(self)
response.fields.updateArgs(OPENID_NS, {
'expires_in': '%d' % (assoc.getExpiresIn(),),
'assoc_type': self.assoc_type,
'assoc_handle': assoc.handle,
})
response.fields.updateArgs(OPENID_NS,
self.session.answer(assoc.secret))
if not (self.session.session_type == 'no-encryption' and
self.message.isOpenID1()):
# The session type "no-encryption" did not have a name
# in OpenID v1, it was just omitted.
response.fields.setArg(
OPENID_NS, 'session_type', self.session.session_type)
return response
def answerUnsupported(self, message, preferred_association_type=None,
preferred_session_type=None):
"""Respond to this request indicating that the association
type or association session type is not supported."""
if self.message.isOpenID1():
raise ProtocolError(self.message)
response = OpenIDResponse(self)
response.fields.setArg(OPENID_NS, 'error_code', 'unsupported-type')
response.fields.setArg(OPENID_NS, 'error', message)
if preferred_association_type:
response.fields.setArg(
OPENID_NS, 'assoc_type', preferred_association_type)
if preferred_session_type:
response.fields.setArg(
OPENID_NS, 'session_type', preferred_session_type)
return response
class CheckIDRequest(OpenIDRequest):
"""A request to confirm the identity of a user.
This class handles requests for openid modes X{C{checkid_immediate}}
and X{C{checkid_setup}}.
@cvar mode: "X{C{checkid_immediate}}" or "X{C{checkid_setup}}"
@type mode: str
@ivar immediate: Is this an immediate-mode request?
@type immediate: bool
@ivar identity: The OP-local identifier being checked.
@type identity: str
@ivar claimed_id: The claimed identifier. Not present in OpenID 1.x
messages.
@type claimed_id: str
@ivar trust_root: "Are you Frank?" asks the checkid request. "Who wants
to know?" C{trust_root}, that's who. This URL identifies the party
making the request, and the user will use that to make her decision
about what answer she trusts them to have. Referred to as "realm" in
OpenID 2.0.
@type trust_root: str
@ivar return_to: The URL to send the user agent back to to reply to this
request.
@type return_to: str
@ivar assoc_handle: Provided in smart mode requests, a handle for a
previously established association. C{None} for dumb mode requests.
@type assoc_handle: str
"""
def __init__(self, identity, return_to, trust_root=None, immediate=False,
assoc_handle=None, op_endpoint=None, claimed_id=None):
"""Construct me.
These parameters are assigned directly as class attributes, see
my L{class documentation<CheckIDRequest>} for their descriptions.
@raises MalformedReturnURL: When the C{return_to} URL is not a URL.
"""
self.assoc_handle = assoc_handle
self.identity = identity
self.claimed_id = claimed_id or identity
self.return_to = return_to
self.trust_root = trust_root or return_to
self.op_endpoint = op_endpoint
assert self.op_endpoint is not None
if immediate:
self.immediate = True
self.mode = "checkid_immediate"
else:
self.immediate = False
self.mode = "checkid_setup"
if self.return_to is not None and \
not TrustRoot.parse(self.return_to):
raise MalformedReturnURL(None, self.return_to)
if not self.trustRootValid():
raise UntrustedReturnURL(None, self.return_to, self.trust_root)
self.message = None
def _getNamespace(self):
warnings.warn('The "namespace" attribute of CheckIDRequest objects '
'is deprecated. Use "message.getOpenIDNamespace()" '
'instead', DeprecationWarning, stacklevel=2)
return self.message.getOpenIDNamespace()
namespace = property(_getNamespace)
def fromMessage(klass, message, op_endpoint):
"""Construct me from an OpenID message.
@raises ProtocolError: When not all required parameters are present
in the message.
@raises MalformedReturnURL: When the C{return_to} URL is not a URL.
@raises UntrustedReturnURL: When the C{return_to} URL is outside
the C{trust_root}.
@param message: An OpenID checkid_* request Message
@type message: openid.message.Message
@param op_endpoint: The endpoint URL of the server that this
message was sent to.
@type op_endpoint: str
@returntype: L{CheckIDRequest}
"""
self = klass.__new__(klass)
self.message = message
self.op_endpoint = op_endpoint
mode = message.getArg(OPENID_NS, 'mode')
if mode == "checkid_immediate":
self.immediate = True
self.mode = "checkid_immediate"
else:
self.immediate = False
self.mode = "checkid_setup"
self.return_to = message.getArg(OPENID_NS, 'return_to')
if message.isOpenID1() and not self.return_to:
fmt = "Missing required field 'return_to' from %r"
raise ProtocolError(message, text=fmt % (message,))
self.identity = message.getArg(OPENID_NS, 'identity')
self.claimed_id = message.getArg(OPENID_NS, 'claimed_id')
if message.isOpenID1():
if self.identity is None:
s = "OpenID 1 message did not contain openid.identity"
raise ProtocolError(message, text=s)
else:
if self.identity and not self.claimed_id:
s = ("OpenID 2.0 message contained openid.identity but not "
"claimed_id")
raise ProtocolError(message, text=s)
elif self.claimed_id and not self.identity:
s = ("OpenID 2.0 message contained openid.claimed_id but not "
"identity")
raise ProtocolError(message, text=s)
# There's a case for making self.trust_root be a TrustRoot
# here. But if TrustRoot isn't currently part of the "public" API,
# I'm not sure it's worth doing.
if message.isOpenID1():
trust_root_param = 'trust_root'
else:
trust_root_param = 'realm'
# Using 'or' here is slightly different than sending a default
# argument to getArg, as it will treat no value and an empty
# string as equivalent.
self.trust_root = (message.getArg(OPENID_NS, trust_root_param)
or self.return_to)
if not message.isOpenID1():
if self.return_to is self.trust_root is None:
raise ProtocolError(message, "openid.realm required when " +
"openid.return_to absent")
self.assoc_handle = message.getArg(OPENID_NS, 'assoc_handle')
# Using TrustRoot.parse here is a bit misleading, as we're not
# parsing return_to as a trust root at all. However, valid URLs
# are valid trust roots, so we can use this to get an idea if it
# is a valid URL. Not all trust roots are valid return_to URLs,
# however (particularly ones with wildcards), so this is still a
# little sketchy.
if self.return_to is not None and \
not TrustRoot.parse(self.return_to):
raise MalformedReturnURL(message, self.return_to)
# I first thought that checking to see if the return_to is within
# the trust_root is premature here, a logic-not-decoding thing. But
# it was argued that this is really part of data validation. A
# request with an invalid trust_root/return_to is broken regardless of
# application, right?
if not self.trustRootValid():
raise UntrustedReturnURL(message, self.return_to, self.trust_root)
return self
fromMessage = classmethod(fromMessage)
def idSelect(self):
"""Is the identifier to be selected by the IDP?
@returntype: bool
"""
# So IDPs don't have to import the constant
return self.identity == IDENTIFIER_SELECT
def trustRootValid(self):
"""Is my return_to under my trust_root?
@returntype: bool
"""
if not self.trust_root:
return True
tr = TrustRoot.parse(self.trust_root)
if tr is None:
raise MalformedTrustRoot(self.message, self.trust_root)
if self.return_to is not None:
return tr.validateURL(self.return_to)
else:
return True
def returnToVerified(self):
"""Does the relying party publish the return_to URL for this
response under the realm? It is up to the provider to set a
policy for what kinds of realms should be allowed. This
return_to URL verification reduces vulnerability to data-theft
attacks based on open proxies, cross-site-scripting, or open
redirectors.
This check should only be performed after making sure that the
return_to URL matches the realm.
@see: L{trustRootValid}
@raises openid.yadis.discover.DiscoveryFailure: if the realm
URL does not support Yadis discovery (and so does not
support the verification process).
@raises openid.fetchers.HTTPFetchingError: if the realm URL
is not reachable. When this is the case, the RP may be hosted
on the user's intranet.
@returntype: bool
@returns: True if the realm publishes a document with the
return_to URL listed
@since: 2.1.0
"""
return verifyReturnTo(self.trust_root, self.return_to)
def answer(self, allow, server_url=None, identity=None, claimed_id=None):
"""Respond to this request.
@param allow: Allow this user to claim this identity, and allow the
consumer to have this information?
@type allow: bool
@param server_url: DEPRECATED. Passing C{op_endpoint} to the
L{Server} constructor makes this optional.
When an OpenID 1.x immediate mode request does not succeed,
it gets back a URL where the request may be carried out
in a not-so-immediate fashion. Pass my URL in here (the
fully qualified address of this server's endpoint, i.e.
C{http://example.com/server}), and I will use it as a base for the
URL for a new request.
Optional for requests where C{CheckIDRequest.immediate} is C{False}
or C{allow} is C{True}.
@type server_url: str
@param identity: The OP-local identifier to answer with. Only for use
when the relying party requested identifier selection.
@type identity: str or None
@param claimed_id: The claimed identifier to answer with, for use
with identifier selection in the case where the claimed identifier
and the OP-local identifier differ, i.e. when the claimed_id uses
delegation.
If C{identity} is provided but this is not, C{claimed_id} will
default to the value of C{identity}. When answering requests
that did not ask for identifier selection, the response
C{claimed_id} will default to that of the request.
This parameter is new in OpenID 2.0.
@type claimed_id: str or None
@returntype: L{OpenIDResponse}
@change: Version 2.0 deprecates C{server_url} and adds C{claimed_id}.
@raises NoReturnError: when I do not have a return_to.
"""
assert self.message is not None
if not self.return_to:
raise NoReturnToError
if not server_url:
if not self.message.isOpenID1() and not self.op_endpoint:
# In other words, that warning I raised in Server.__init__?
# You should pay attention to it now.
raise RuntimeError("%s should be constructed with op_endpoint "
"to respond to OpenID 2.0 messages." %
(self,))
server_url = self.op_endpoint
if allow:
mode = 'id_res'
elif self.message.isOpenID1():
if self.immediate:
mode = 'id_res'
else:
mode = 'cancel'
else:
if self.immediate:
mode = 'setup_needed'
else:
mode = 'cancel'
response = OpenIDResponse(self)
if claimed_id and self.message.isOpenID1():
namespace = self.message.getOpenIDNamespace()
raise VersionError("claimed_id is new in OpenID 2.0 and not "
"available for %s" % (namespace,))
if allow:
if self.identity == IDENTIFIER_SELECT:
if not identity:
raise ValueError(
"This request uses IdP-driven identifier selection."
"You must supply an identifier in the response.")
response_identity = identity
response_claimed_id = claimed_id or identity
elif self.identity:
if identity and (self.identity != identity):
normalized_request_identity = urinorm(self.identity)
normalized_answer_identity = urinorm(identity)
if (normalized_request_identity !=
normalized_answer_identity):
raise ValueError(
"Request was for identity %r, cannot reply "
"with identity %r" % (self.identity, identity))
# The "identity" value in the response shall always be
# the same as that in the request, otherwise the RP is
# likely to not validate the response.
response_identity = self.identity
response_claimed_id = self.claimed_id
else:
if identity:
raise ValueError(
"This request specified no identity and you "
"supplied %r" % (identity,))
response_identity = None
if self.message.isOpenID1() and response_identity is None:
raise ValueError(
"Request was an OpenID 1 request, so response must "
"include an identifier."
)
response.fields.updateArgs(OPENID_NS, {
'mode': mode,
'return_to': self.return_to,
'response_nonce': mkNonce(),
})
if server_url:
response.fields.setArg(OPENID_NS, 'op_endpoint', server_url)
if response_identity is not None:
response.fields.setArg(
OPENID_NS, 'identity', response_identity)
if self.message.isOpenID2():
response.fields.setArg(
OPENID_NS, 'claimed_id', response_claimed_id)
else:
response.fields.setArg(OPENID_NS, 'mode', mode)
if self.immediate:
if self.message.isOpenID1() and not server_url:
raise ValueError("setup_url is required for allow=False "
"in OpenID 1.x immediate mode.")
# Make a new request just like me, but with immediate=False.
setup_request = self.__class__(
self.identity, self.return_to, self.trust_root,
immediate=False, assoc_handle=self.assoc_handle,
op_endpoint=self.op_endpoint, claimed_id=self.claimed_id)
# XXX: This API is weird.
setup_request.message = self.message
setup_url = setup_request.encodeToURL(server_url)
response.fields.setArg(OPENID_NS, 'user_setup_url', setup_url)
return response
def encodeToURL(self, server_url):
"""Encode this request as a URL to GET.
@param server_url: The URL of the OpenID server to make this request of.
@type server_url: str
@returntype: str
@raises NoReturnError: when I do not have a return_to.
"""
if not self.return_to:
raise NoReturnToError
# Imported from the alternate reality where these classes are used
# in both the client and server code, so Requests are Encodable too.
# That's right, code imported from alternate realities all for the
# love of you, id_res/user_setup_url.
q = {'mode': self.mode,
'identity': self.identity,
'claimed_id': self.claimed_id,
'return_to': self.return_to}
if self.trust_root:
if self.message.isOpenID1():
q['trust_root'] = self.trust_root
else:
q['realm'] = self.trust_root
if self.assoc_handle:
q['assoc_handle'] = self.assoc_handle
response = Message(self.message.getOpenIDNamespace())
response.updateArgs(OPENID_NS, q)
return response.toURL(server_url)
def getCancelURL(self):
"""Get the URL to cancel this request.
Useful for creating a "Cancel" button on a web form so that operation
can be carried out directly without another trip through the server.
(Except you probably want to make another trip through the server so
that it knows that the user did make a decision. Or you could simulate
this method by doing C{.answer(False).encodeToURL()})
@returntype: str
@returns: The return_to URL with openid.mode = cancel.
@raises NoReturnError: when I do not have a return_to.
"""
if not self.return_to:
raise NoReturnToError
if self.immediate:
raise ValueError("Cancel is not an appropriate response to "
"immediate mode requests.")
response = Message(self.message.getOpenIDNamespace())
response.setArg(OPENID_NS, 'mode', 'cancel')
return response.toURL(self.return_to)
def __repr__(self):
return '<%s id:%r im:%s tr:%r ah:%r>' % (self.__class__.__name__,
self.identity,
self.immediate,
self.trust_root,
self.assoc_handle)
class OpenIDResponse(object):
"""I am a response to an OpenID request.
@ivar request: The request I respond to.
@type request: L{OpenIDRequest}
@ivar fields: My parameters as a dictionary with each key mapping to
one value. Keys are parameter names with no leading "C{openid.}".
e.g. "C{identity}" and "C{mac_key}", never "C{openid.identity}".
@type fields: L{openid.message.Message}
@ivar signed: The names of the fields which should be signed.
@type signed: list of str
"""
# Implementer's note: In a more symmetric client/server
# implementation, there would be more types of OpenIDResponse
# object and they would have validated attributes according to the
# type of response. But as it is, Response objects in a server are
# basically write-only, their only job is to go out over the wire,
# so this is just a loose wrapper around OpenIDResponse.fields.
def __init__(self, request):
"""Make a response to an L{OpenIDRequest}.
@type request: L{OpenIDRequest}
"""
self.request = request
self.fields = Message(request.namespace)
def __str__(self):
return "%s for %s: %s" % (
self.__class__.__name__,
self.request.__class__.__name__,
self.fields)
def toFormMarkup(self, form_tag_attrs=None):
"""Returns the form markup for this response.
@param form_tag_attrs: Dictionary of attributes to be added to
the form tag. 'accept-charset' and 'enctype' have defaults
that can be overridden. If a value is supplied for
'action' or 'method', it will be replaced.
@returntype: str
@since: 2.1.0
"""
return self.fields.toFormMarkup(self.request.return_to,
form_tag_attrs=form_tag_attrs)
def toHTML(self, form_tag_attrs=None):
"""Returns an HTML document that auto-submits the form markup
for this response.
@returntype: str
@see: toFormMarkup
@since: 2.1.?
"""
return oidutil.autoSubmitHTML(self.toFormMarkup(form_tag_attrs))
def renderAsForm(self):
"""Returns True if this response's encoding is
ENCODE_HTML_FORM. Convenience method for server authors.
@returntype: bool
@since: 2.1.0
"""
return self.whichEncoding() == ENCODE_HTML_FORM
def needsSigning(self):
"""Does this response require signing?
@returntype: bool
"""
return self.fields.getArg(OPENID_NS, 'mode') == 'id_res'
# implements IEncodable
def whichEncoding(self):
"""How should I be encoded?
@returns: one of ENCODE_URL, ENCODE_HTML_FORM, or ENCODE_KVFORM.
@change: 2.1.0 added the ENCODE_HTML_FORM response.
"""
if self.request.mode in BROWSER_REQUEST_MODES:
if self.fields.getOpenIDNamespace() == OPENID2_NS and \
len(self.encodeToURL()) > OPENID1_URL_LIMIT:
return ENCODE_HTML_FORM
else:
return ENCODE_URL
else:
return ENCODE_KVFORM
def encodeToURL(self):
"""Encode a response as a URL for the user agent to GET.
You will generally use this URL with a HTTP redirect.
@returns: A URL to direct the user agent back to.
@returntype: str
"""
return self.fields.toURL(self.request.return_to)
def addExtension(self, extension_response):
"""
Add an extension response to this response message.
@param extension_response: An object that implements the
extension interface for adding arguments to an OpenID
message.
@type extension_response: L{openid.extension}
@returntype: None
"""
extension_response.toMessage(self.fields)
def encodeToKVForm(self):
"""Encode a response in key-value colon/newline format.
This is a machine-readable format used to respond to messages which
came directly from the consumer and not through the user agent.
@see: OpenID Specs,
U{Key-Value Colon/Newline format<http://openid.net/specs.bml#keyvalue>}
@returntype: str
"""
return self.fields.toKVForm()
class WebResponse(object):
"""I am a response to an OpenID request in terms a web server understands.
I generally come from an L{Encoder}, either directly or from
L{Server.encodeResponse}.
@ivar code: The HTTP code of this response.
@type code: int
@ivar headers: Headers to include in this response.
@type headers: dict
@ivar body: The body of this response.
@type body: str
"""
def __init__(self, code=HTTP_OK, headers=None, body=""):
"""Construct me.
These parameters are assigned directly as class attributes, see
my L{class documentation<WebResponse>} for their descriptions.
"""
self.code = code
if headers is not None:
self.headers = headers
else:
self.headers = {}
self.body = body
class Signatory(object):
"""I sign things.
I also check signatures.
All my state is encapsulated in an
L{OpenIDStore<openid.store.interface.OpenIDStore>}, which means
I'm not generally pickleable but I am easy to reconstruct.
@cvar SECRET_LIFETIME: The number of seconds a secret remains valid.
@type SECRET_LIFETIME: int
"""
SECRET_LIFETIME = 14 * 24 * 60 * 60 # 14 days, in seconds
# keys have a bogus server URL in them because the filestore
# really does expect that key to be a URL. This seems a little
# silly for the server store, since I expect there to be only one
# server URL.
_normal_key = 'http://localhost/|normal'
_dumb_key = 'http://localhost/|dumb'
def __init__(self, store):
"""Create a new Signatory.
@param store: The back-end where my associations are stored.
@type store: L{openid.store.interface.OpenIDStore}
"""
assert store is not None
self.store = store
def verify(self, assoc_handle, message):
"""Verify that the signature for some data is valid.
@param assoc_handle: The handle of the association used to sign the
data.
@type assoc_handle: str
@param message: The signed message to verify
@type message: openid.message.Message
@returns: C{True} if the signature is valid, C{False} if not.
@returntype: bool
"""
assoc = self.getAssociation(assoc_handle, dumb=True)
if not assoc:
oidutil.log("failed to get assoc with handle %r to verify "
"message %r"
% (assoc_handle, message))
return False
try:
valid = assoc.checkMessageSignature(message)
except ValueError, ex:
oidutil.log("Error in verifying %s with %s: %s" % (message,
assoc,
ex))
return False
return valid
def sign(self, response):
"""Sign a response.
I take a L{OpenIDResponse}, create a signature for everything
in its L{signed<OpenIDResponse.signed>} list, and return a new
copy of the response object with that signature included.
@param response: A response to sign.
@type response: L{OpenIDResponse}
@returns: A signed copy of the response.
@returntype: L{OpenIDResponse}
"""
signed_response = deepcopy(response)
assoc_handle = response.request.assoc_handle
if assoc_handle:
# normal mode
# disabling expiration check because even if the association
# is expired, we still need to know some properties of the
# association so that we may preserve those properties when
# creating the fallback association.
assoc = self.getAssociation(assoc_handle, dumb=False,
checkExpiration=False)
if not assoc or assoc.expiresIn <= 0:
# fall back to dumb mode
signed_response.fields.setArg(
OPENID_NS, 'invalidate_handle', assoc_handle)
assoc_type = assoc and assoc.assoc_type or 'HMAC-SHA1'
if assoc and assoc.expiresIn <= 0:
# now do the clean-up that the disabled checkExpiration
# code didn't get to do.
self.invalidate(assoc_handle, dumb=False)
assoc = self.createAssociation(dumb=True, assoc_type=assoc_type)
else:
# dumb mode.
assoc = self.createAssociation(dumb=True)
try:
signed_response.fields = assoc.signMessage(signed_response.fields)
except kvform.KVFormError, err:
raise EncodingError(response, explanation=str(err))
return signed_response
def createAssociation(self, dumb=True, assoc_type='HMAC-SHA1'):
"""Make a new association.
@param dumb: Is this association for a dumb-mode transaction?
@type dumb: bool
@param assoc_type: The type of association to create. Currently
there is only one type defined, C{HMAC-SHA1}.
@type assoc_type: str
@returns: the new association.
@returntype: L{openid.association.Association}
"""
secret = cryptutil.getBytes(getSecretSize(assoc_type))
uniq = oidutil.toBase64(cryptutil.getBytes(4))
handle = '{%s}{%x}{%s}' % (assoc_type, int(time.time()), uniq)
assoc = Association.fromExpiresIn(
self.SECRET_LIFETIME, handle, secret, assoc_type)
if dumb:
key = self._dumb_key
else:
key = self._normal_key
self.store.storeAssociation(key, assoc)
return assoc
def getAssociation(self, assoc_handle, dumb, checkExpiration=True):
"""Get the association with the specified handle.
@type assoc_handle: str
@param dumb: Is this association used with dumb mode?
@type dumb: bool
@returns: the association, or None if no valid association with that
handle was found.
@returntype: L{openid.association.Association}
"""
# Hmm. We've created an interface that deals almost entirely with
# assoc_handles. The only place outside the Signatory that uses this
# (and thus the only place that ever sees Association objects) is
# when creating a response to an association request, as it must have
# the association's secret.
if assoc_handle is None:
raise ValueError("assoc_handle must not be None")
if dumb:
key = self._dumb_key
else:
key = self._normal_key
assoc = self.store.getAssociation(key, assoc_handle)
if assoc is not None and assoc.expiresIn <= 0:
oidutil.log("requested %sdumb key %r is expired (by %s seconds)" %
((not dumb) and 'not-' or '',
assoc_handle, assoc.expiresIn))
if checkExpiration:
self.store.removeAssociation(key, assoc_handle)
assoc = None
return assoc
def invalidate(self, assoc_handle, dumb):
"""Invalidates the association with the given handle.
@type assoc_handle: str
@param dumb: Is this association used with dumb mode?
@type dumb: bool
"""
if dumb:
key = self._dumb_key
else:
key = self._normal_key
self.store.removeAssociation(key, assoc_handle)
class Encoder(object):
"""I encode responses in to L{WebResponses<WebResponse>}.
If you don't like L{WebResponses<WebResponse>}, you can do
your own handling of L{OpenIDResponses<OpenIDResponse>} with
L{OpenIDResponse.whichEncoding}, L{OpenIDResponse.encodeToURL}, and
L{OpenIDResponse.encodeToKVForm}.
"""
responseFactory = WebResponse
def encode(self, response):
"""Encode a response to a L{WebResponse}.
@raises EncodingError: When I can't figure out how to encode this
message.
"""
encode_as = response.whichEncoding()
if encode_as == ENCODE_KVFORM:
wr = self.responseFactory(body=response.encodeToKVForm())
if isinstance(response, Exception):
wr.code = HTTP_ERROR
elif encode_as == ENCODE_URL:
location = response.encodeToURL()
wr = self.responseFactory(code=HTTP_REDIRECT,
headers={'location': location})
elif encode_as == ENCODE_HTML_FORM:
wr = self.responseFactory(code=HTTP_OK,
body=response.toFormMarkup())
else:
# Can't encode this to a protocol message. You should probably
# render it to HTML and show it to the user.
raise EncodingError(response)
return wr
class SigningEncoder(Encoder):
"""I encode responses in to L{WebResponses<WebResponse>}, signing them when required.
"""
def __init__(self, signatory):
"""Create a L{SigningEncoder}.
@param signatory: The L{Signatory} I will make signatures with.
@type signatory: L{Signatory}
"""
self.signatory = signatory
def encode(self, response):
"""Encode a response to a L{WebResponse}, signing it first if appropriate.
@raises EncodingError: When I can't figure out how to encode this
message.
@raises AlreadySigned: When this response is already signed.
@returntype: L{WebResponse}
"""
# the isinstance is a bit of a kludge... it means there isn't really
# an adapter to make the interfaces quite match.
if (not isinstance(response, Exception)) and response.needsSigning():
if not self.signatory:
raise ValueError(
"Must have a store to sign this request: %s" %
(response,), response)
if response.fields.hasKey(OPENID_NS, 'sig'):
raise AlreadySigned(response)
response = self.signatory.sign(response)
return super(SigningEncoder, self).encode(response)
class Decoder(object):
"""I decode an incoming web request in to a L{OpenIDRequest}.
"""
_handlers = {
'checkid_setup': CheckIDRequest.fromMessage,
'checkid_immediate': CheckIDRequest.fromMessage,
'check_authentication': CheckAuthRequest.fromMessage,
'associate': AssociateRequest.fromMessage,
}
def __init__(self, server):
"""Construct a Decoder.
@param server: The server which I am decoding requests for.
(Necessary because some replies reference their server.)
@type server: L{Server}
"""
self.server = server
def decode(self, query):
"""I transform query parameters into an L{OpenIDRequest}.
If the query does not seem to be an OpenID request at all, I return
C{None}.
@param query: The query parameters as a dictionary with each
key mapping to one value.
@type query: dict
@raises ProtocolError: When the query does not seem to be a valid
OpenID request.
@returntype: L{OpenIDRequest}
"""
if not query:
return None
try:
message = Message.fromPostArgs(query)
except InvalidOpenIDNamespace, err:
# It's useful to have a Message attached to a ProtocolError, so we
# override the bad ns value to build a Message out of it. Kinda
# kludgy, since it's made of lies, but the parts that aren't lies
# are more useful than a 'None'.
query = query.copy()
query['openid.ns'] = OPENID2_NS
message = Message.fromPostArgs(query)
raise ProtocolError(message, str(err))
mode = message.getArg(OPENID_NS, 'mode')
if not mode:
fmt = "No mode value in message %s"
raise ProtocolError(message, text=fmt % (message,))
handler = self._handlers.get(mode, self.defaultDecoder)
return handler(message, self.server.op_endpoint)
def defaultDecoder(self, message, server):
"""Called to decode queries when no handler for that mode is found.
@raises ProtocolError: This implementation always raises
L{ProtocolError}.
"""
mode = message.getArg(OPENID_NS, 'mode')
fmt = "Unrecognized OpenID mode %r"
raise ProtocolError(message, text=fmt % (mode,))
class Server(object):
"""I handle requests for an OpenID server.
Some types of requests (those which are not C{checkid} requests) may be
handed to my L{handleRequest} method, and I will take care of it and
return a response.
For your convenience, I also provide an interface to L{Decoder.decode}
and L{SigningEncoder.encode} through my methods L{decodeRequest} and
L{encodeResponse}.
All my state is encapsulated in an
L{OpenIDStore<openid.store.interface.OpenIDStore>}, which means
I'm not generally pickleable but I am easy to reconstruct.
Example::
oserver = Server(FileOpenIDStore(data_path), "http://example.com/op")
request = oserver.decodeRequest(query)
if request.mode in ['checkid_immediate', 'checkid_setup']:
if self.isAuthorized(request.identity, request.trust_root):
response = request.answer(True)
elif request.immediate:
response = request.answer(False)
else:
self.showDecidePage(request)
return
else:
response = oserver.handleRequest(request)
webresponse = oserver.encode(response)
@ivar signatory: I'm using this for associate requests and to sign things.
@type signatory: L{Signatory}
@ivar decoder: I'm using this to decode things.
@type decoder: L{Decoder}
@ivar encoder: I'm using this to encode things.
@type encoder: L{Encoder}
@ivar op_endpoint: My URL.
@type op_endpoint: str
@ivar negotiator: I use this to determine which kinds of
associations I can make and how.
@type negotiator: L{openid.association.SessionNegotiator}
"""
signatoryClass = Signatory
encoderClass = SigningEncoder
decoderClass = Decoder
def __init__(self, store, op_endpoint=None):
"""A new L{Server}.
@param store: The back-end where my associations are stored.
@type store: L{openid.store.interface.OpenIDStore}
@param op_endpoint: My URL, the fully qualified address of this
server's endpoint, i.e. C{http://example.com/server}
@type op_endpoint: str
@change: C{op_endpoint} is new in library version 2.0. It
currently defaults to C{None} for compatibility with
earlier versions of the library, but you must provide it
if you want to respond to any version 2 OpenID requests.
"""
self.store = store
self.signatory = self.signatoryClass(self.store)
self.encoder = self.encoderClass(self.signatory)
self.decoder = self.decoderClass(self)
self.negotiator = default_negotiator.copy()
if not op_endpoint:
warnings.warn("%s.%s constructor requires op_endpoint parameter "
"for OpenID 2.0 servers" %
(self.__class__.__module__, self.__class__.__name__),
stacklevel=2)
self.op_endpoint = op_endpoint
def handleRequest(self, request):
"""Handle a request.
Give me a request, I will give you a response. Unless it's a type
of request I cannot handle myself, in which case I will raise
C{NotImplementedError}. In that case, you can handle it yourself,
or add a method to me for handling that request type.
@raises NotImplementedError: When I do not have a handler defined
for that type of request.
@returntype: L{OpenIDResponse}
"""
handler = getattr(self, 'openid_' + request.mode, None)
if handler is not None:
return handler(request)
else:
raise NotImplementedError(
"%s has no handler for a request of mode %r." %
(self, request.mode))
def openid_check_authentication(self, request):
"""Handle and respond to C{check_authentication} requests.
@returntype: L{OpenIDResponse}
"""
return request.answer(self.signatory)
def openid_associate(self, request):
"""Handle and respond to C{associate} requests.
@returntype: L{OpenIDResponse}
"""
# XXX: TESTME
assoc_type = request.assoc_type
session_type = request.session.session_type
if self.negotiator.isAllowed(assoc_type, session_type):
assoc = self.signatory.createAssociation(dumb=False,
assoc_type=assoc_type)
return request.answer(assoc)
else:
message = ('Association type %r is not supported with '
'session type %r' % (assoc_type, session_type))
(preferred_assoc_type, preferred_session_type) = \
self.negotiator.getAllowedType()
return request.answerUnsupported(
message,
preferred_assoc_type,
preferred_session_type)
def decodeRequest(self, query):
"""Transform query parameters into an L{OpenIDRequest}.
If the query does not seem to be an OpenID request at all, I return
C{None}.
@param query: The query parameters as a dictionary with each
key mapping to one value.
@type query: dict
@raises ProtocolError: When the query does not seem to be a valid
OpenID request.
@returntype: L{OpenIDRequest}
@see: L{Decoder.decode}
"""
return self.decoder.decode(query)
def encodeResponse(self, response):
"""Encode a response to a L{WebResponse}, signing it first if appropriate.
@raises EncodingError: When I can't figure out how to encode this
message.
@raises AlreadySigned: When this response is already signed.
@returntype: L{WebResponse}
@see: L{SigningEncoder.encode}
"""
return self.encoder.encode(response)
class ProtocolError(Exception):
"""A message did not conform to the OpenID protocol.
@ivar message: The query that is failing to be a valid OpenID request.
@type message: openid.message.Message
"""
def __init__(self, message, text=None, reference=None, contact=None):
"""When an error occurs.
@param message: The message that is failing to be a valid
OpenID request.
@type message: openid.message.Message
@param text: A message about the encountered error. Set as C{args[0]}.
@type text: str
"""
self.openid_message = message
self.reference = reference
self.contact = contact
assert type(message) not in [str, unicode]
Exception.__init__(self, text)
def getReturnTo(self):
"""Get the return_to argument from the request, if any.
@returntype: str
"""
if self.openid_message is None:
return None
else:
return self.openid_message.getArg(OPENID_NS, 'return_to')
def hasReturnTo(self):
"""Did this request have a return_to parameter?
@returntype: bool
"""
return self.getReturnTo() is not None
def toMessage(self):
"""Generate a Message object for sending to the relying party,
after encoding.
"""
namespace = self.openid_message.getOpenIDNamespace()
reply = Message(namespace)
reply.setArg(OPENID_NS, 'mode', 'error')
reply.setArg(OPENID_NS, 'error', str(self))
if self.contact is not None:
reply.setArg(OPENID_NS, 'contact', str(self.contact))
if self.reference is not None:
reply.setArg(OPENID_NS, 'reference', str(self.reference))
return reply
# implements IEncodable
def encodeToURL(self):
return self.toMessage().toURL(self.getReturnTo())
def encodeToKVForm(self):
return self.toMessage().toKVForm()
def toFormMarkup(self):
"""Encode to HTML form markup for POST.
@since: 2.1.0
"""
return self.toMessage().toFormMarkup(self.getReturnTo())
def toHTML(self):
"""Encode to a full HTML page, wrapping the form markup in a page
that will autosubmit the form.
@since: 2.1.?
"""
return oidutil.autoSubmitHTML(self.toFormMarkup())
def whichEncoding(self):
"""How should I be encoded?
@returns: one of ENCODE_URL, ENCODE_KVFORM, or None. If None,
I cannot be encoded as a protocol message and should be
displayed to the user.
"""
if self.hasReturnTo():
if self.openid_message.getOpenIDNamespace() == OPENID2_NS and \
len(self.encodeToURL()) > OPENID1_URL_LIMIT:
return ENCODE_HTML_FORM
else:
return ENCODE_URL
if self.openid_message is None:
return None
mode = self.openid_message.getArg(OPENID_NS, 'mode')
if mode:
if mode not in BROWSER_REQUEST_MODES:
return ENCODE_KVFORM
# According to the OpenID spec as of this writing, we are probably
# supposed to switch on request type here (GET versus POST) to figure
# out if we're supposed to print machine-readable or human-readable
# content at this point. GET/POST seems like a pretty lousy way of
# making the distinction though, as it's just as possible that the
# user agent could have mistakenly been directed to post to the
# server URL.
# Basically, if your request was so broken that you didn't manage to
# include an openid.mode, I'm not going to worry too much about
# returning you something you can't parse.
return None
class VersionError(Exception):
"""Raised when an operation was attempted that is not compatible with
the protocol version being used."""
class NoReturnToError(Exception):
"""Raised when a response to a request cannot be generated because
the request contains no return_to URL.
"""
pass
class EncodingError(Exception):
"""Could not encode this as a protocol message.
You should probably render it and show it to the user.
@ivar response: The response that failed to encode.
@type response: L{OpenIDResponse}
"""
def __init__(self, response, explanation=None):
Exception.__init__(self, response)
self.response = response
self.explanation = explanation
def __str__(self):
if self.explanation:
s = '%s: %s' % (self.__class__.__name__,
self.explanation)
else:
s = '%s for Response %s' % (
self.__class__.__name__, self.response)
return s
class AlreadySigned(EncodingError):
"""This response is already signed."""
class UntrustedReturnURL(ProtocolError):
"""A return_to is outside the trust_root."""
def __init__(self, message, return_to, trust_root):
ProtocolError.__init__(self, message)
self.return_to = return_to
self.trust_root = trust_root
def __str__(self):
return "return_to %r not under trust_root %r" % (self.return_to,
self.trust_root)
class MalformedReturnURL(ProtocolError):
"""The return_to URL doesn't look like a valid URL."""
def __init__(self, openid_message, return_to):
self.return_to = return_to
ProtocolError.__init__(self, openid_message)
class MalformedTrustRoot(ProtocolError):
"""The trust root is not well-formed.
@see: OpenID Specs, U{openid.trust_root<http://openid.net/specs.bml#mode-checkid_immediate>}
"""
pass
#class IEncodable: # Interface
# def encodeToURL(return_to):
# """Encode a response as a URL for redirection.
#
# @returns: A URL to direct the user agent back to.
# @returntype: str
# """
# pass
#
# def encodeToKvform():
# """Encode a response in key-value colon/newline format.
#
# This is a machine-readable format used to respond to messages which
# came directly from the consumer and not through the user agent.
#
# @see: OpenID Specs,
# U{Key-Value Colon/Newline format<http://openid.net/specs.bml#keyvalue>}
#
# @returntype: str
# """
# pass
#
# def whichEncoding():
# """How should I be encoded?
#
# @returns: one of ENCODE_URL, ENCODE_KVFORM, or None. If None,
# I cannot be encoded as a protocol message and should be
# displayed to the user.
# """
# pass
| bsd-3-clause |
anant-dev/django | tests/indexes/models.py | 253 | 1714 | from django.db import connection, models
class CurrentTranslation(models.ForeignObject):
"""
Creates virtual relation to the translation with model cache enabled.
"""
# Avoid validation
requires_unique_target = False
def __init__(self, to, on_delete, from_fields, to_fields, **kwargs):
# Disable reverse relation
kwargs['related_name'] = '+'
# Set unique to enable model cache.
kwargs['unique'] = True
super(CurrentTranslation, self).__init__(to, on_delete, from_fields, to_fields, **kwargs)
class ArticleTranslation(models.Model):
article = models.ForeignKey('indexes.Article', models.CASCADE)
language = models.CharField(max_length=10, unique=True)
content = models.TextField()
class Article(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateTimeField()
# Add virtual relation to the ArticleTranslation model.
translation = CurrentTranslation(ArticleTranslation, models.CASCADE, ['id'], ['article'])
class Meta:
index_together = [
["headline", "pub_date"],
]
# Model for index_together being used only with single list
class IndexTogetherSingleList(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateTimeField()
class Meta:
index_together = ["headline", "pub_date"]
# Indexing a TextField on Oracle or MySQL results in index creation error.
if connection.vendor == 'postgresql':
class IndexedArticle(models.Model):
headline = models.CharField(max_length=100, db_index=True)
body = models.TextField(db_index=True)
slug = models.CharField(max_length=40, unique=True)
| bsd-3-clause |
pcrews/rannsaka | test_files/2volumes_basic.py | 1 | 3456 | import os
import random
import time
import json
from locust import HttpLocust, TaskSet, task
from lib.baseTaskSet import baseTaskSet
# TODO - make these config-driven
from lib.openstack.keystone import get_auth_token
from lib.openstack.cinder import list_volumes
from lib.openstack.cinder import list_volumes_detail
from lib.openstack.cinder import list_volume_detail
from lib.openstack.cinder import create_volume
from lib.openstack.cinder import delete_volume
from lib.openstack.cinder import cinder_get_volume_id
from lib.openstack.nova import nova_get_image_id
from lib.openstack.nova import list_limits
class UserBehavior(baseTaskSet):
def on_start(self):
super(UserBehavior, self).on_start()
self.volume_id = None
self.volume_count = 0
self.sleep_times=[0,0,1,1,1,1,3,3,3,5,5,5,5,10,10,30,30]
self.auth_token, self.tenant_id, self.service_catalog = get_auth_token(self)
def chance(self):
chances = [1,1,1,1,2]
if random.choice(chances)%2==0:
return True
else:
return False
def rand_sleep(self):
time.sleep(random.choice(self.sleep_times))
@task(2)
def update_volume_id(self):
self.volume_id = cinder_get_volume_id(self)
@task(5)
def cinder_create_volume(self):
if not self.volume_id:
volume_id=None
image_id=None
bootable=False
size=1
# volume_id
if self.chance():
volume_id = cinder_get_volume_id(self)
# image_id
if self.chance():
image_id = nova_get_image_id(self)
# bootable
if self.chance():
bootable=True
# metadata
# size
sizes = [1,1,1,3,3,5,5,2.5,100,99,'a','abbazabba',-1,0]
size = random.choice(sizes)
# description
# snapshot_id
response = create_volume(self,
name="volume-%s-%s" % (self.id, self.volume_count),
volume_id=volume_id,
image_id=image_id,
bootable=bootable,
size=size)
print response.content
print '!'*80
self.volume_id = json.loads(response.content)['volume']['id']
self.volume_count += 1
self.rand_sleep()
else:
self.output('Volume already exists, not creating one:')
self.output("volume id: %s" % self.volume_id)
@task(2)
def cinder_delete_volume(self):
if self.volume_id:
delete_volume(self, self.volume_id)
# TODO - test response
self.volume_id = None
self.rand_sleep()
else:
self.cinder_create_volume()
@task(5)
def cinder_list_volumes(self):
list_volumes(self)
@task(5)
def cinder_list_volumes_detail(self):
list_volumes_detail(self)
@task(4)
def cinder_list_volume_detail(self):
list_volume_detail(self)
@task(1)
def nova_list_limits(self):
list_limits(self)
@task(1)
def keystone_get_auth(self):
self.auth_token, self.tenant_id, self.service_catalog = get_auth_token(self)
class WebsiteUser(HttpLocust):
task_set = UserBehavior
min_wait=500
max_wait=1000
| apache-2.0 |
grap/OpenUpgrade | addons/membership/membership.py | 9 | 28553 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
STATE = [
('none', 'Non Member'),
('canceled', 'Cancelled Member'),
('old', 'Old Member'),
('waiting', 'Waiting Member'),
('invoiced', 'Invoiced Member'),
('free', 'Free Member'),
('paid', 'Paid Member'),
]
STATE_PRIOR = {
'none': 0,
'canceled': 1,
'old': 2,
'waiting': 3,
'invoiced': 4,
'free': 6,
'paid': 7
}
class membership_line(osv.osv):
'''Member line'''
def _get_partners(self, cr, uid, ids, context=None):
list_membership_line = []
member_line_obj = self.pool.get('membership.membership_line')
for partner in self.pool.get('res.partner').browse(cr, uid, ids, context=context):
if partner.member_lines:
list_membership_line += member_line_obj.search(cr, uid, [('id', 'in', [ l.id for l in partner.member_lines])], context=context)
return list_membership_line
def _get_membership_lines(self, cr, uid, ids, context=None):
list_membership_line = []
member_line_obj = self.pool.get('membership.membership_line')
for invoice in self.pool.get('account.invoice').browse(cr, uid, ids, context=context):
if invoice.invoice_line:
list_membership_line += member_line_obj.search(cr, uid, [('account_invoice_line', 'in', [ l.id for l in invoice.invoice_line])], context=context)
return list_membership_line
def _check_membership_date(self, cr, uid, ids, context=None):
"""Check if membership product is not in the past
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Membership Line IDs
@param context: A standard dictionary for contextual values
"""
cr.execute('''
SELECT MIN(ml.date_to - ai.date_invoice)
FROM membership_membership_line ml
JOIN account_invoice_line ail ON (
ml.account_invoice_line = ail.id
)
JOIN account_invoice ai ON (
ai.id = ail.invoice_id)
WHERE ml.id IN %s''', (tuple(ids),))
res = cr.fetchall()
for r in res:
if r[0] and r[0] < 0:
return False
return True
def _state(self, cr, uid, ids, name, args, context=None):
"""Compute the state lines
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Membership Line IDs
@param name: Field Name
@param context: A standard dictionary for contextual values
@param return: Dictionary of state Value
"""
res = {}
inv_obj = self.pool.get('account.invoice')
for line in self.browse(cr, uid, ids, context=context):
cr.execute('''
SELECT i.state, i.id FROM
account_invoice i
WHERE
i.id = (
SELECT l.invoice_id FROM
account_invoice_line l WHERE
l.id = (
SELECT ml.account_invoice_line FROM
membership_membership_line ml WHERE
ml.id = %s
)
)
''', (line.id,))
fetched = cr.fetchone()
if not fetched:
res[line.id] = 'canceled'
continue
istate = fetched[0]
state = 'none'
if (istate == 'draft') | (istate == 'proforma'):
state = 'waiting'
elif istate == 'open':
state = 'invoiced'
elif istate == 'paid':
state = 'paid'
inv = inv_obj.browse(cr, uid, fetched[1], context=context)
for payment in inv.payment_ids:
if payment.invoice and payment.invoice.type == 'out_refund':
state = 'canceled'
elif istate == 'cancel':
state = 'canceled'
res[line.id] = state
return res
_description = __doc__
_name = 'membership.membership_line'
_columns = {
'partner': fields.many2one('res.partner', 'Partner', ondelete='cascade', select=1),
'membership_id': fields.many2one('product.product', string="Membership", required=True),
'date_from': fields.date('From', readonly=True),
'date_to': fields.date('To', readonly=True),
'date_cancel': fields.date('Cancel date'),
'date': fields.date('Join Date', help="Date on which member has joined the membership"),
'member_price': fields.float('Membership Fee', digits_compute= dp.get_precision('Product Price'), required=True, help='Amount for the membership'),
'account_invoice_line': fields.many2one('account.invoice.line', 'Account Invoice line', readonly=True),
'account_invoice_id': fields.related('account_invoice_line', 'invoice_id', type='many2one', relation='account.invoice', string='Invoice', readonly=True),
'state': fields.function(_state,
string='Membership Status', type='selection',
selection=STATE, store = {
'account.invoice': (_get_membership_lines, ['state'], 10),
'res.partner': (_get_partners, ['membership_state'], 12),
}, help="""It indicates the membership status.
-Non Member: A member who has not applied for any membership.
-Cancelled Member: A member who has cancelled his membership.
-Old Member: A member whose membership date has expired.
-Waiting Member: A member who has applied for the membership and whose invoice is going to be created.
-Invoiced Member: A member whose invoice has been created.
-Paid Member: A member who has paid the membership amount."""),
'company_id': fields.related('account_invoice_line', 'invoice_id', 'company_id', type="many2one", relation="res.company", string="Company", readonly=True, store=True)
}
_rec_name = 'partner'
_order = 'id desc'
_constraints = [
(_check_membership_date, 'Error, this membership product is out of date', [])
]
class Partner(osv.osv):
'''Partner'''
_inherit = 'res.partner'
def _get_partner_id(self, cr, uid, ids, context=None):
member_line_obj = self.pool.get('membership.membership_line')
res_obj = self.pool.get('res.partner')
data_inv = member_line_obj.browse(cr, uid, ids, context=context)
list_partner = []
for data in data_inv:
list_partner.append(data.partner.id)
ids2 = list_partner
while ids2:
ids2 = res_obj.search(cr, uid, [('associate_member', 'in', ids2)], context=context)
list_partner += ids2
return list_partner
def _get_invoice_partner(self, cr, uid, ids, context=None):
inv_obj = self.pool.get('account.invoice')
res_obj = self.pool.get('res.partner')
data_inv = inv_obj.browse(cr, uid, ids, context=context)
list_partner = []
for data in data_inv:
list_partner.append(data.partner_id.id)
ids2 = list_partner
while ids2:
ids2 = res_obj.search(cr, uid, [('associate_member', 'in', ids2)], context=context)
list_partner += ids2
return list_partner
def _cron_update_membership(self, cr, uid, context=None):
partner_ids = self.search(cr, uid, [('membership_state', '=', 'paid')], context=context)
if partner_ids:
self._store_set_values(cr, uid, partner_ids, ['membership_state'], context=context)
def _membership_state(self, cr, uid, ids, name, args, context=None):
"""This Function return Membership State For Given Partner.
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Partner IDs
@param name: Field Name
@param context: A standard dictionary for contextual values
@param return: Dictionary of Membership state Value
"""
res = {}
for id in ids:
res[id] = 'none'
today = time.strftime('%Y-%m-%d')
for id in ids:
partner_data = self.browse(cr, uid, id, context=context)
if partner_data.membership_cancel and today > partner_data.membership_cancel:
res[id] = 'free' if partner_data.free_member else 'canceled'
continue
if partner_data.membership_stop and today > partner_data.membership_stop:
res[id] = 'free' if partner_data.free_member else 'old'
continue
s = 4
if partner_data.member_lines:
for mline in partner_data.member_lines:
if mline.date_to >= today and mline.date_from <= today:
if mline.account_invoice_line and mline.account_invoice_line.invoice_id:
mstate = mline.account_invoice_line.invoice_id.state
if mstate == 'paid':
s = 0
inv = mline.account_invoice_line.invoice_id
for payment in inv.payment_ids:
if payment.invoice.type == 'out_refund':
s = 2
break
elif mstate == 'open' and s!=0:
s = 1
elif mstate == 'cancel' and s!=0 and s!=1:
s = 2
elif (mstate == 'draft' or mstate == 'proforma') and s!=0 and s!=1:
s = 3
if s==4:
for mline in partner_data.member_lines:
if mline.date_from < today and mline.date_to < today and mline.date_from <= mline.date_to and mline.account_invoice_line and mline.account_invoice_line.invoice_id.state == 'paid':
s = 5
else:
s = 6
if s==0:
res[id] = 'paid'
elif s==1:
res[id] = 'invoiced'
elif s==2:
res[id] = 'canceled'
elif s==3:
res[id] = 'waiting'
elif s==5:
res[id] = 'old'
elif s==6:
res[id] = 'none'
if partner_data.free_member and s!=0:
res[id] = 'free'
if partner_data.associate_member:
res_state = self._membership_state(cr, uid, [partner_data.associate_member.id], name, args, context=context)
res[id] = res_state[partner_data.associate_member.id]
return res
def _membership_date(self, cr, uid, ids, name, args, context=None):
"""Return date of membership"""
name = name[0]
res = {}
member_line_obj = self.pool.get('membership.membership_line')
for partner in self.browse(cr, uid, ids, context=context):
if partner.associate_member:
partner_id = partner.associate_member.id
else:
partner_id = partner.id
res[partner.id] = {
'membership_start': False,
'membership_stop': False,
'membership_cancel': False
}
if name == 'membership_start':
line_id = member_line_obj.search(cr, uid, [('partner', '=', partner_id),('date_cancel','=',False)],
limit=1, order='date_from', context=context)
if line_id:
res[partner.id]['membership_start'] = member_line_obj.read(cr, uid, [line_id[0]],
['date_from'], context=context)[0]['date_from']
if name == 'membership_stop':
line_id1 = member_line_obj.search(cr, uid, [('partner', '=', partner_id),('date_cancel','=',False)],
limit=1, order='date_to desc', context=context)
if line_id1:
res[partner.id]['membership_stop'] = member_line_obj.read(cr, uid, [line_id1[0]],
['date_to'], context=context)[0]['date_to']
if name == 'membership_cancel':
if partner.membership_state == 'canceled':
line_id2 = member_line_obj.search(cr, uid, [('partner', '=', partner.id)], limit=1, order='date_cancel', context=context)
if line_id2:
res[partner.id]['membership_cancel'] = member_line_obj.read(cr, uid, [line_id2[0]], ['date_cancel'], context=context)[0]['date_cancel']
return res
def _get_partners(self, cr, uid, ids, context=None):
ids2 = ids
while ids2:
ids2 = self.search(cr, uid, [('associate_member', 'in', ids2)], context=context)
ids += ids2
return ids
def __get_membership_state(self, *args, **kwargs):
return self._membership_state(*args, **kwargs)
_columns = {
'associate_member': fields.many2one('res.partner', 'Associate Member',help="A member with whom you want to associate your membership.It will consider the membership state of the associated member."),
'member_lines': fields.one2many('membership.membership_line', 'partner', 'Membership'),
'free_member': fields.boolean('Free Member', help = "Select if you want to give free membership."),
'membership_amount': fields.float(
'Membership Amount', digits=(16, 2),
help = 'The price negotiated by the partner'),
'membership_state': fields.function(
__get_membership_state,
string = 'Current Membership Status', type = 'selection',
selection = STATE,
store = {
'account.invoice': (_get_invoice_partner, ['state'], 10),
'membership.membership_line': (_get_partner_id, ['state'], 10),
'res.partner': (_get_partners, ['free_member', 'membership_state', 'associate_member'], 10)
}, help='It indicates the membership state.\n'
'-Non Member: A partner who has not applied for any membership.\n'
'-Cancelled Member: A member who has cancelled his membership.\n'
'-Old Member: A member whose membership date has expired.\n'
'-Waiting Member: A member who has applied for the membership and whose invoice is going to be created.\n'
'-Invoiced Member: A member whose invoice has been created.\n'
'-Paying member: A member who has paid the membership fee.'),
'membership_start': fields.function(
_membership_date, multi = 'membeship_start',
string = 'Membership Start Date', type = 'date',
store = {
'account.invoice': (_get_invoice_partner, ['state'], 10),
'membership.membership_line': (_get_partner_id, ['state'], 10, ),
'res.partner': (_get_partners, ['free_member', 'membership_state', 'associate_member'], 10)
}, help="Date from which membership becomes active."),
'membership_stop': fields.function(
_membership_date,
string = 'Membership End Date', type='date', multi='membership_stop',
store = {
'account.invoice': (_get_invoice_partner, ['state'], 10),
'membership.membership_line': (_get_partner_id, ['state'], 10),
'res.partner': (_get_partners, ['free_member', 'membership_state', 'associate_member'], 10)
}, help="Date until which membership remains active."),
'membership_cancel': fields.function(
_membership_date,
string = 'Cancel Membership Date', type='date', multi='membership_cancel',
store = {
'account.invoice': (_get_invoice_partner, ['state'], 11),
'membership.membership_line': (_get_partner_id, ['state'], 10),
'res.partner': (_get_partners, ['free_member', 'membership_state', 'associate_member'], 10)
}, help="Date on which membership has been cancelled"),
}
_defaults = {
'free_member': False,
'membership_cancel': False,
}
def _check_recursion(self, cr, uid, ids, context=None):
"""Check Recursive for Associated Members.
"""
level = 100
while len(ids):
cr.execute('SELECT DISTINCT associate_member FROM res_partner WHERE id IN %s', (tuple(ids),))
ids = filter(None, map(lambda x:x[0], cr.fetchall()))
if not level:
return False
level -= 1
return True
_constraints = [
(_check_recursion, 'Error ! You cannot create recursive associated members.', ['associate_member'])
]
def create_membership_invoice(self, cr, uid, ids, product_id=None, datas=None, context=None):
""" Create Customer Invoice of Membership for partners.
@param datas: datas has dictionary value which consist Id of Membership product and Cost Amount of Membership.
datas = {'membership_product_id': None, 'amount': None}
"""
invoice_obj = self.pool.get('account.invoice')
invoice_line_obj = self.pool.get('account.invoice.line')
invoice_tax_obj = self.pool.get('account.invoice.tax')
product_id = product_id or datas.get('membership_product_id', False)
amount = datas.get('amount', 0.0)
invoice_list = []
if type(ids) in (int, long,):
ids = [ids]
for partner in self.browse(cr, uid, ids, context=context):
account_id = partner.property_account_receivable and partner.property_account_receivable.id or False
fpos_id = partner.property_account_position and partner.property_account_position.id or False
addr = self.address_get(cr, uid, [partner.id], ['invoice'])
if partner.free_member:
raise osv.except_osv(_('Error!'),
_("Partner is a free Member."))
if not addr.get('invoice', False):
raise osv.except_osv(_('Error!'),
_("Partner doesn't have an address to make the invoice."))
quantity = 1
line_value = {
'product_id': product_id,
}
line_dict = invoice_line_obj.product_id_change(cr, uid, {},
product_id, False, quantity, '', 'out_invoice', partner.id, fpos_id, price_unit=amount, context=context)
line_value.update(line_dict['value'])
line_value['price_unit'] = amount
if line_value.get('invoice_line_tax_id', False):
tax_tab = [(6, 0, line_value['invoice_line_tax_id'])]
line_value['invoice_line_tax_id'] = tax_tab
invoice_id = invoice_obj.create(cr, uid, {
'partner_id': partner.id,
'account_id': account_id,
'fiscal_position': fpos_id or False
}, context=context)
line_value['invoice_id'] = invoice_id
invoice_line_obj.create(cr, uid, line_value, context=context)
invoice_list.append(invoice_id)
if line_value['invoice_line_tax_id']:
tax_value = invoice_tax_obj.compute(cr, uid, invoice_id).values()
for tax in tax_value:
invoice_tax_obj.create(cr, uid, tax, context=context)
#recompute the membership_state of those partners
self.pool.get('res.partner').write(cr, uid, ids, {})
return invoice_list
class Product(osv.osv):
def fields_view_get(self, cr, user, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
model_obj = self.pool.get('ir.model.data')
if context is None:
context = {}
if ('product' in context) and (context['product']=='membership_product'):
model_data_ids_form = model_obj.search(cr, user, [('model','=','ir.ui.view'), ('name', 'in', ['membership_products_form', 'membership_products_tree'])], context=context)
resource_id_form = model_obj.read(cr, user, model_data_ids_form, fields=['res_id', 'name'], context=context)
dict_model = {}
for i in resource_id_form:
dict_model[i['name']] = i['res_id']
if view_type == 'form':
view_id = dict_model['membership_products_form']
else:
view_id = dict_model['membership_products_tree']
return super(Product,self).fields_view_get(cr, user, view_id, view_type, context=context, toolbar=toolbar, submenu=submenu)
'''Product'''
_inherit = 'product.template'
_columns = {
'membership': fields.boolean('Membership', help='Check if the product is eligible for membership.'),
'membership_date_from': fields.date('Membership Start Date', help='Date from which membership becomes active.'),
'membership_date_to': fields.date('Membership End Date', help='Date until which membership remains active.'),
}
_sql_constraints = [('membership_date_greater','check(membership_date_to >= membership_date_from)','Error ! Ending Date cannot be set before Beginning Date.')]
_defaults = {
'membership': False,
}
class Invoice(osv.osv):
'''Invoice'''
_inherit = 'account.invoice'
def action_cancel(self, cr, uid, ids, context=None):
'''Create a 'date_cancel' on the membership_line object'''
member_line_obj = self.pool.get('membership.membership_line')
today = time.strftime('%Y-%m-%d')
for invoice in self.browse(cr, uid, ids, context=context):
mlines = member_line_obj.search(cr, uid,
[('account_invoice_line', 'in',
[l.id for l in invoice.invoice_line])])
member_line_obj.write(cr, uid, mlines, {'date_cancel': today})
return super(Invoice, self).action_cancel(cr, uid, ids, context=context)
# TODO master: replace by ondelete='cascade'
def unlink(self, cr, uid, ids, context=None):
member_line_obj = self.pool.get('membership.membership_line')
for invoice in self.browse(cr, uid, ids, context=context):
mlines = member_line_obj.search(cr, uid,
[('account_invoice_line', 'in',
[l.id for l in invoice.invoice_line])])
member_line_obj.unlink(cr, uid, mlines, context=context)
return super(Invoice, self).unlink(cr, uid, ids, context=context)
class account_invoice_line(osv.osv):
_inherit='account.invoice.line'
def write(self, cr, uid, ids, vals, context=None):
"""Overrides orm write method
"""
member_line_obj = self.pool.get('membership.membership_line')
res = super(account_invoice_line, self).write(cr, uid, ids, vals, context=context)
for line in self.browse(cr, uid, ids, context=context):
if line.invoice_id.type == 'out_invoice':
ml_ids = member_line_obj.search(cr, uid, [('account_invoice_line', '=', line.id)], context=context)
if line.product_id and line.product_id.membership and not ml_ids:
# Product line has changed to a membership product
date_from = line.product_id.membership_date_from
date_to = line.product_id.membership_date_to
if line.invoice_id.date_invoice > date_from and line.invoice_id.date_invoice < date_to:
date_from = line.invoice_id.date_invoice
member_line_obj.create(cr, uid, {
'partner': line.invoice_id.partner_id.id,
'membership_id': line.product_id.id,
'member_price': line.price_unit,
'date': time.strftime('%Y-%m-%d'),
'date_from': date_from,
'date_to': date_to,
'account_invoice_line': line.id,
}, context=context)
if line.product_id and not line.product_id.membership and ml_ids:
# Product line has changed to a non membership product
member_line_obj.unlink(cr, uid, ml_ids, context=context)
return res
# TODO master: replace by ondelete='cascade'
def unlink(self, cr, uid, ids, context=None):
"""Remove Membership Line Record for Account Invoice Line
"""
member_line_obj = self.pool.get('membership.membership_line')
for id in ids:
ml_ids = member_line_obj.search(cr, uid, [('account_invoice_line', '=', id)], context=context)
member_line_obj.unlink(cr, uid, ml_ids, context=context)
return super(account_invoice_line, self).unlink(cr, uid, ids, context=context)
def create(self, cr, uid, vals, context=None):
"""Overrides orm create method
"""
member_line_obj = self.pool.get('membership.membership_line')
result = super(account_invoice_line, self).create(cr, uid, vals, context=context)
line = self.browse(cr, uid, result, context=context)
if line.invoice_id.type == 'out_invoice':
ml_ids = member_line_obj.search(cr, uid, [('account_invoice_line', '=', line.id)], context=context)
if line.product_id and line.product_id.membership and not ml_ids:
# Product line is a membership product
date_from = line.product_id.membership_date_from
date_to = line.product_id.membership_date_to
if line.invoice_id.date_invoice > date_from and line.invoice_id.date_invoice < date_to:
date_from = line.invoice_id.date_invoice
member_line_obj.create(cr, uid, {
'partner': line.invoice_id.partner_id and line.invoice_id.partner_id.id or False,
'membership_id': line.product_id.id,
'member_price': line.price_unit,
'date': time.strftime('%Y-%m-%d'),
'date_from': date_from,
'date_to': date_to,
'account_invoice_line': line.id,
}, context=context)
return result
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
2014c2g5/2014c2 | exts/wsgi/static/Brython2.1.0-20140419-113919/Lib/unittest/result.py | 727 | 6397 | """Test result object"""
import io
import sys
import traceback
from . import util
from functools import wraps
__unittest = True
def failfast(method):
@wraps(method)
def inner(self, *args, **kw):
if getattr(self, 'failfast', False):
self.stop()
return method(self, *args, **kw)
return inner
STDOUT_LINE = '\nStdout:\n%s'
STDERR_LINE = '\nStderr:\n%s'
class TestResult(object):
"""Holder for test result information.
Test results are automatically managed by the TestCase and TestSuite
classes, and do not need to be explicitly manipulated by writers of tests.
Each instance holds the total number of tests run, and collections of
failures and errors that occurred among those test runs. The collections
contain tuples of (testcase, exceptioninfo), where exceptioninfo is the
formatted traceback of the error that occurred.
"""
_previousTestClass = None
_testRunEntered = False
_moduleSetUpFailed = False
def __init__(self, stream=None, descriptions=None, verbosity=None):
self.failfast = False
self.failures = []
self.errors = []
self.testsRun = 0
self.skipped = []
self.expectedFailures = []
self.unexpectedSuccesses = []
self.shouldStop = False
self.buffer = False
self._stdout_buffer = None
self._stderr_buffer = None
self._original_stdout = sys.stdout
self._original_stderr = sys.stderr
self._mirrorOutput = False
def printErrors(self):
"Called by TestRunner after test run"
#fixme brython
pass
def startTest(self, test):
"Called when the given test is about to be run"
self.testsRun += 1
self._mirrorOutput = False
self._setupStdout()
def _setupStdout(self):
if self.buffer:
if self._stderr_buffer is None:
self._stderr_buffer = io.StringIO()
self._stdout_buffer = io.StringIO()
sys.stdout = self._stdout_buffer
sys.stderr = self._stderr_buffer
def startTestRun(self):
"""Called once before any tests are executed.
See startTest for a method called before each test.
"""
def stopTest(self, test):
"""Called when the given test has been run"""
self._restoreStdout()
self._mirrorOutput = False
def _restoreStdout(self):
if self.buffer:
if self._mirrorOutput:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
self._original_stdout.write(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
self._original_stderr.write(STDERR_LINE % error)
sys.stdout = self._original_stdout
sys.stderr = self._original_stderr
self._stdout_buffer.seek(0)
self._stdout_buffer.truncate()
self._stderr_buffer.seek(0)
self._stderr_buffer.truncate()
def stopTestRun(self):
"""Called once after all tests are executed.
See stopTest for a method called after each test.
"""
@failfast
def addError(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info().
"""
self.errors.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
@failfast
def addFailure(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info()."""
self.failures.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
def addSuccess(self, test):
"Called when a test has completed successfully"
pass
def addSkip(self, test, reason):
"""Called when a test is skipped."""
self.skipped.append((test, reason))
def addExpectedFailure(self, test, err):
"""Called when an expected failure/error occured."""
self.expectedFailures.append(
(test, self._exc_info_to_string(err, test)))
@failfast
def addUnexpectedSuccess(self, test):
"""Called when a test was expected to fail, but succeed."""
self.unexpectedSuccesses.append(test)
def wasSuccessful(self):
"Tells whether or not this result was a success"
return len(self.failures) == len(self.errors) == 0
def stop(self):
"Indicates that the tests should be aborted"
self.shouldStop = True
def _exc_info_to_string(self, err, test):
"""Converts a sys.exc_info()-style tuple of values into a string."""
exctype, value, tb = err
# Skip test runner traceback levels
while tb and self._is_relevant_tb_level(tb):
tb = tb.tb_next
if exctype is test.failureException:
# Skip assert*() traceback levels
length = self._count_relevant_tb_levels(tb)
msgLines = traceback.format_exception(exctype, value, tb, length)
else:
msgLines = traceback.format_exception(exctype, value, tb)
if self.buffer:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
msgLines.append(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
msgLines.append(STDERR_LINE % error)
return ''.join(msgLines)
def _is_relevant_tb_level(self, tb):
#fix me brython
#return '__unittest' in tb.tb_frame.f_globals
return True #for now, lets just return False
def _count_relevant_tb_levels(self, tb):
length = 0
while tb and not self._is_relevant_tb_level(tb):
length += 1
tb = tb.tb_next
return length
def __repr__(self):
return ("<%s run=%i errors=%i failures=%i>" %
(util.strclass(self.__class__), self.testsRun, len(self.errors),
len(self.failures)))
| gpl-2.0 |
fperez/cython | tests/run/test_call.py | 17 | 3139 | import unittest
# The test cases here cover several paths through the function calling
# code. They depend on the METH_XXX flag that is used to define a C
# function, which can't be verified from Python. If the METH_XXX decl
# for a C function changes, these tests may not cover the right paths.
class CFunctionCalls(unittest.TestCase):
def test_varargs0(self):
self.assertRaises(TypeError, {}.__contains__)
def test_varargs1(self):
{}.__contains__(0)
def test_varargs2(self):
self.assertRaises(TypeError, {}.__contains__, 0, 1)
def test_varargs0_ext(self):
try:
{}.__contains__(*())
except TypeError:
pass
def test_varargs1_ext(self):
{}.__contains__(*(0,))
def test_varargs2_ext(self):
try:
{}.__contains__(*(1, 2))
except TypeError:
pass
else:
raise RuntimeError
def test_varargs0_kw(self):
self.assertRaises(TypeError, {}.__contains__, x=2)
def test_varargs1_kw(self):
self.assertRaises(TypeError, {}.__contains__, x=2)
def test_varargs2_kw(self):
self.assertRaises(TypeError, {}.__contains__, x=2, y=2)
def test_oldargs0_0(self):
{}.keys()
def test_oldargs0_1(self):
self.assertRaises(TypeError, {}.keys, 0)
def test_oldargs0_2(self):
self.assertRaises(TypeError, {}.keys, 0, 1)
def test_oldargs0_0_ext(self):
{}.keys(*())
def test_oldargs0_1_ext(self):
try:
{}.keys(*(0,))
except TypeError:
pass
else:
raise RuntimeError
def test_oldargs0_2_ext(self):
try:
{}.keys(*(1, 2))
except TypeError:
pass
else:
raise RuntimeError
### Cython makes this a compile time error
# def test_oldargs0_0_kw(self):
# try:
# {}.keys(x=2)
# except TypeError:
# pass
# else:
# raise RuntimeError
def test_oldargs0_1_kw(self):
self.assertRaises(TypeError, {}.keys, x=2)
def test_oldargs0_2_kw(self):
self.assertRaises(TypeError, {}.keys, x=2, y=2)
def test_oldargs1_0(self):
self.assertRaises(TypeError, [].count)
def test_oldargs1_1(self):
[].count(1)
def test_oldargs1_2(self):
self.assertRaises(TypeError, [].count, 1, 2)
def test_oldargs1_0_ext(self):
try:
[].count(*())
except TypeError:
pass
else:
raise RuntimeError
def test_oldargs1_1_ext(self):
[].count(*(1,))
def test_oldargs1_2_ext(self):
try:
[].count(*(1, 2))
except TypeError:
pass
else:
raise RuntimeError
def test_oldargs1_0_kw(self):
self.assertRaises(TypeError, [].count, x=2)
def test_oldargs1_1_kw(self):
self.assertRaises(TypeError, [].count, {}, x=2)
def test_oldargs1_2_kw(self):
self.assertRaises(TypeError, [].count, x=2, y=2)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
brean/arduino-kivy-bluetooth | glue/protocols/__init__.py | 1 | 2145 | # -*- coding: utf-8 -*-
"""
provide classes for different connection protocols
(bluetooth, tcp/ip, ...)
"""
protocols = {}
class Protocol(object):
def __init__(self, name):
"""
basic protocol interface
"""
self.name = name
def write(self, data):
"""
write data to connected system
"""
return False
def read(self):
"""
read data from connected system
"""
return None
try:
import bluetooth
class BluetoothSocket(bluetooth.BluetoothSocket, Protocol):
def __init__(self, config):
self.config = config
self.name = config['name']
super(BluetoothSocket, self).__init__()
print (config['addr'], config['port'])
self.connect((config['addr'], config['port']))
def write(self, data):
"""
write data to system
:param data: data to send to the system
"""
self.send(data)
def read(self):
"""
read data from system
:return: received data
"""
return self.recv(numbytes=4096)
def inWaiting(self):
# XXX replace this with some real waiting state detection
return 0
protocols['bluetooth'] = BluetoothSocket
except ImportError as err:
bluetooth = None
print 'can not import bluetooth', err
try:
import serial
class SerialSocket(Protocol):
def __init__(self, config):
self.ser = serial.Serial(config['addr'], config['baudrate'])
super(SerialSocket, self).__init__(self.ser.name)
def write(self, data):
self.ser.write(data)
def inWaiting(self):
# XXX replace this with some real wating state detection
return 0
protocols['serial'] = SerialSocket
except ImportError as err:
socket = None
print 'can not import socket', err
#sock = BTFirmataSock(bluetooth.RFCOMM)
#sock.connect((bd_addr, port))
#print 'Connected to {}'.format(bd_addr)
#sock.settimeout(1.0)
#board = BTArduino(sock)
| mit |
infoxchange/lettuce | tests/integration/lib/Django-1.3/django/template/loaders/app_directories.py | 229 | 2764 | """
Wrapper for loading templates from "templates" directories in INSTALLED_APPS
packages.
"""
import os
import sys
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.template.base import TemplateDoesNotExist
from django.template.loader import BaseLoader
from django.utils._os import safe_join
from django.utils.importlib import import_module
# At compile time, cache the directories to search.
fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
app_template_dirs = []
for app in settings.INSTALLED_APPS:
try:
mod = import_module(app)
except ImportError, e:
raise ImproperlyConfigured('ImportError %s: %s' % (app, e.args[0]))
template_dir = os.path.join(os.path.dirname(mod.__file__), 'templates')
if os.path.isdir(template_dir):
app_template_dirs.append(template_dir.decode(fs_encoding))
# It won't change, so convert it to a tuple to save memory.
app_template_dirs = tuple(app_template_dirs)
class Loader(BaseLoader):
is_usable = True
def get_template_sources(self, template_name, template_dirs=None):
"""
Returns the absolute paths to "template_name", when appended to each
directory in "template_dirs". Any paths that don't lie inside one of the
template dirs are excluded from the result set, for security reasons.
"""
if not template_dirs:
template_dirs = app_template_dirs
for template_dir in template_dirs:
try:
yield safe_join(template_dir, template_name)
except UnicodeDecodeError:
# The template dir name was a bytestring that wasn't valid UTF-8.
raise
except ValueError:
# The joined path was located outside of template_dir.
pass
def load_template_source(self, template_name, template_dirs=None):
for filepath in self.get_template_sources(template_name, template_dirs):
try:
file = open(filepath)
try:
return (file.read().decode(settings.FILE_CHARSET), filepath)
finally:
file.close()
except IOError:
pass
raise TemplateDoesNotExist(template_name)
_loader = Loader()
def load_template_source(template_name, template_dirs=None):
# For backwards compatibility
import warnings
warnings.warn(
"'django.template.loaders.app_directories.load_template_source' is deprecated; use 'django.template.loaders.app_directories.Loader' instead.",
DeprecationWarning
)
return _loader.load_template_source(template_name, template_dirs)
load_template_source.is_usable = True
| gpl-3.0 |
semgroup5-project/opendlv.scaledcars | thirdparty/cxxtest/doc/examples/test_examples.py | 50 | 2474 | #-------------------------------------------------------------------------
# CxxTest: A lightweight C++ unit testing library.
# Copyright (c) 2008 Sandia Corporation.
# This software is distributed under the LGPL License v3
# For more information, see the COPYING file in the top CxxTest directory.
# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
# the U.S. Government retains certain rights in this software.
#-------------------------------------------------------------------------
# Imports
import pyutilib.th as unittest
import glob
import os
from os.path import dirname, abspath, basename
import sys
import re
currdir = dirname(abspath(__file__))+os.sep
datadir = currdir
compilerre = re.compile("^(?P<path>[^:]+)(?P<rest>:.*)$")
dirre = re.compile("^([^%s]*/)*" % re.escape(os.sep))
xmlre = re.compile("\"(?P<path>[^\"]*/[^\"]*)\"")
datere = re.compile("date=\"[^\"]*\"")
failure = re.compile("^(?P<prefix>.+)file=\"(?P<path>[^\"]+)\"(?P<suffix>.*)$")
#print "FOO", dirre
def filter(line):
# for xml, remove prefixes from everything that looks like a
# file path inside ""
line = xmlre.sub(
lambda match: '"'+re.sub("^[^/]+/", "", match.group(1))+'"',
line
)
# Remove date info
line = datere.sub( lambda match: 'date=""', line)
if 'Running' in line:
return False
if "IGNORE" in line:
return True
pathmatch = compilerre.match(line) # see if we can remove the basedir
failmatch = failure.match(line) # see if we can remove the basedir
#print "HERE", pathmatch, failmatch
if failmatch:
parts = failmatch.groupdict()
#print "X", parts
line = "%s file=\"%s\" %s" % (parts['prefix'], dirre.sub("", parts['path']), parts['suffix'])
elif pathmatch:
parts = pathmatch.groupdict()
#print "Y", parts
line = dirre.sub("", parts['path']) + parts['rest']
return line
# Declare an empty TestCase class
class Test(unittest.TestCase): pass
if not sys.platform.startswith('win'):
# Find all *.sh files, and use them to define baseline tests
for file in glob.glob(datadir+'*.sh'):
bname = basename(file)
name=bname.split('.')[0]
if os.path.exists(datadir+name+'.txt'):
Test.add_baseline_test(cwd=datadir, cmd=file, baseline=datadir+name+'.txt', name=name, filter=filter)
# Execute the tests
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
quantumlib/Cirq | examples/quantum_fourier_transform.py | 1 | 2702 | """
Creates and simulates a circuit for Quantum Fourier Transform(QFT)
on a 4 qubit system.
In this example we demonstrate Fourier Transform on
(1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0) vector. To do the same, we prepare
the input state of the qubits as |0000>.
=== EXAMPLE OUTPUT ===
Circuit:
(0, 0): ─H───@^0.5───×───H────────────@^0.5─────×───H──────────@^0.5──×─H
│ │ │ │ │ │
(0, 1): ─────@───────×───@^0.25───×───@─────────×───@^0.25───×──@─────×──
│ │ │ │
(1, 0): ─────────────────┼────────┼───@^0.125───×───┼────────┼───────────
│ │ │ │ │ │
(1, 1): ─────────────────@────────×───@─────────×───@────────×───────────
FinalState
[0.25+0.j 0.25+0.j 0.25+0.j 0.25+0.j 0.25+0.j 0.25+0.j 0.25+0.j 0.25+0.j
0.25+0.j 0.25+0.j 0.25+0.j 0.25+0.j 0.25+0.j 0.25+0.j 0.25+0.j 0.25+0.j]
"""
import numpy as np
import cirq
def main():
"""Demonstrates Quantum Fourier transform."""
# Create circuit
qft_circuit = generate_2x2_grid_qft_circuit()
print('Circuit:')
print(qft_circuit)
# Simulate and collect final_state
simulator = cirq.Simulator()
result = simulator.simulate(qft_circuit)
print()
print('FinalState')
print(np.around(result.final_state_vector, 3))
def _cz_and_swap(q0, q1, rot):
yield cirq.CZ(q0, q1) ** rot
yield cirq.SWAP(q0, q1)
# Create a quantum fourier transform circuit for 2*2 planar qubit architecture.
# Circuit is adopted from https://arxiv.org/pdf/quant-ph/0402196.pdf
def generate_2x2_grid_qft_circuit():
# Define a 2*2 square grid of qubits.
a, b, c, d = [
cirq.GridQubit(0, 0),
cirq.GridQubit(0, 1),
cirq.GridQubit(1, 1),
cirq.GridQubit(1, 0),
]
circuit = cirq.Circuit(
cirq.H(a),
_cz_and_swap(a, b, 0.5),
_cz_and_swap(b, c, 0.25),
_cz_and_swap(c, d, 0.125),
cirq.H(a),
_cz_and_swap(a, b, 0.5),
_cz_and_swap(b, c, 0.25),
cirq.H(a),
_cz_and_swap(a, b, 0.5),
cirq.H(a),
strategy=cirq.InsertStrategy.EARLIEST,
)
return circuit
if __name__ == '__main__':
main()
| apache-2.0 |
gangadharkadam/sher | erpnext/hr/report/employee_birthday/employee_birthday.py | 25 | 1331 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import flt
def execute(filters=None):
if not filters: filters = {}
columns = get_columns()
data = get_employees(filters)
return columns, data
def get_columns():
return [
_("Employee") + ":Link/Employee:120", _("Name") + ":Data:200", _("Date of Birth")+ ":Date:100",
_("Branch") + ":Link/Branch:120", _("Department") + ":Link/Department:120",
_("Designation") + ":Link/Designation:120", _("Gender") + "::60", _("Company") + ":Link/Company:120"
]
def get_employees(filters):
conditions = get_conditions(filters)
return frappe.db.sql("""select name, employee_name, date_of_birth,
branch, department, designation,
gender, company from tabEmployee where status = 'Active' %s""" % conditions, as_list=1)
def get_conditions(filters):
conditions = ""
if filters.get("month"):
month = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov",
"Dec"].index(filters["month"]) + 1
conditions += " and month(date_of_birth) = '%s'" % month
if filters.get("company"): conditions += " and company = '%s'" % \
filters["company"].replace("'", "\\'")
return conditions
| agpl-3.0 |
giggsey/SickRage | lib/sqlalchemy/dialects/drizzle/mysqldb.py | 154 | 1270 | """
.. dialect:: drizzle+mysqldb
:name: MySQL-Python
:dbapi: mysqldb
:connectstring: drizzle+mysqldb://<user>:<password>@<host>[:<port>]/<dbname>
:url: http://sourceforge.net/projects/mysql-python
"""
from sqlalchemy.dialects.drizzle.base import (
DrizzleDialect,
DrizzleExecutionContext,
DrizzleCompiler,
DrizzleIdentifierPreparer)
from sqlalchemy.connectors.mysqldb import (
MySQLDBExecutionContext,
MySQLDBCompiler,
MySQLDBIdentifierPreparer,
MySQLDBConnector)
class DrizzleExecutionContext_mysqldb(MySQLDBExecutionContext,
DrizzleExecutionContext):
pass
class DrizzleCompiler_mysqldb(MySQLDBCompiler, DrizzleCompiler):
pass
class DrizzleIdentifierPreparer_mysqldb(MySQLDBIdentifierPreparer,
DrizzleIdentifierPreparer):
pass
class DrizzleDialect_mysqldb(MySQLDBConnector, DrizzleDialect):
execution_ctx_cls = DrizzleExecutionContext_mysqldb
statement_compiler = DrizzleCompiler_mysqldb
preparer = DrizzleIdentifierPreparer_mysqldb
def _detect_charset(self, connection):
"""Sniff out the character set in use for connection results."""
return 'utf8'
dialect = DrizzleDialect_mysqldb
| gpl-3.0 |
TalShafir/ansible | lib/ansible/modules/cloud/scaleway/scaleway_sshkey.py | 75 | 4776 | #!/usr/bin/python
#
# Scaleway SSH keys management module
#
# Copyright (C) 2018 Online SAS.
# https://www.scaleway.com
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: scaleway_sshkey
short_description: Scaleway SSH keys management module
version_added: "2.6"
author: Remy Leone (@sieben)
description:
- This module manages SSH keys on Scaleway account
U(https://developer.scaleway.com)
extends_documentation_fragment: scaleway
options:
state:
description:
- Indicate desired state of the SSH key.
default: present
choices:
- present
- absent
ssh_pub_key:
description:
- The public SSH key as a string to add.
required: true
api_url:
description:
- Scaleway API URL
default: 'https://account.scaleway.com'
aliases: ['base_url']
'''
EXAMPLES = '''
- name: "Add SSH key"
scaleway_sshkey:
ssh_pub_key: "ssh-rsa AAAA..."
state: "present"
- name: "Delete SSH key"
scaleway_sshkey:
ssh_pub_key: "ssh-rsa AAAA..."
state: "absent"
- name: "Add SSH key with explicit token"
scaleway_sshkey:
ssh_pub_key: "ssh-rsa AAAA..."
state: "present"
oauth_token: "6ecd2c9b-6f4f-44d4-a187-61a92078d08c"
'''
RETURN = '''
data:
description: This is only present when C(state=present)
returned: when C(state=present)
type: dict
sample: {
"ssh_public_keys": [
{"key": "ssh-rsa AAAA...."}
]
}
'''
from ansible.module_utils.basic import AnsibleModule, env_fallback
from ansible.module_utils.scaleway import scaleway_argument_spec, Scaleway
def extract_present_sshkeys(raw_organization_dict):
ssh_key_list = raw_organization_dict["organizations"][0]["users"][0]["ssh_public_keys"]
ssh_key_lookup = [ssh_key["key"] for ssh_key in ssh_key_list]
return ssh_key_lookup
def extract_user_id(raw_organization_dict):
return raw_organization_dict["organizations"][0]["users"][0]["id"]
def sshkey_user_patch(ssh_lookup):
ssh_list = {"ssh_public_keys": [{"key": key}
for key in ssh_lookup]}
return ssh_list
def core(module):
ssh_pub_key = module.params['ssh_pub_key']
state = module.params["state"]
account_api = Scaleway(module)
response = account_api.get('organizations')
status_code = response.status_code
organization_json = response.json
if not response.ok:
module.fail_json(msg='Error getting ssh key [{0}: {1}]'.format(
status_code, response.json['message']))
user_id = extract_user_id(organization_json)
present_sshkeys = []
try:
present_sshkeys = extract_present_sshkeys(organization_json)
except (KeyError, IndexError) as e:
module.fail_json(changed=False, data="Error while extracting present SSH keys from API")
if state in ('present',):
if ssh_pub_key in present_sshkeys:
module.exit_json(changed=False)
# If key not found create it!
if module.check_mode:
module.exit_json(changed=True)
present_sshkeys.append(ssh_pub_key)
payload = sshkey_user_patch(present_sshkeys)
response = account_api.patch('/users/%s' % user_id, data=payload)
if response.ok:
module.exit_json(changed=True, data=response.json)
module.fail_json(msg='Error creating ssh key [{0}: {1}]'.format(
response.status_code, response.json))
elif state in ('absent',):
if ssh_pub_key not in present_sshkeys:
module.exit_json(changed=False)
if module.check_mode:
module.exit_json(changed=True)
present_sshkeys.remove(ssh_pub_key)
payload = sshkey_user_patch(present_sshkeys)
response = account_api.patch('/users/%s' % user_id, data=payload)
if response.ok:
module.exit_json(changed=True, data=response.json)
module.fail_json(msg='Error deleting ssh key [{0}: {1}]'.format(
response.status_code, response.json))
def main():
argument_spec = scaleway_argument_spec()
argument_spec.update(dict(
state=dict(default='present', choices=['absent', 'present']),
ssh_pub_key=dict(required=True),
api_url=dict(fallback=(env_fallback, ['SCW_API_URL']), default='https://account.scaleway.com', aliases=['base_url']),
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
core(module)
if __name__ == '__main__':
main()
| gpl-3.0 |
jrha/aquilon | tests/broker/test_update_cluster.py | 2 | 3584 | #!/usr/bin/env python2.6
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2011,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the update cluster command."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestUpdateCluster(TestBrokerCommand):
def test_100_updatenoop(self):
self.noouttest(["update_cluster", "--cluster=utgrid1",
"--down_hosts_threshold=2%"])
command = "show cluster --cluster utgrid1"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Grid Cluster: utgrid1", command)
self.matchoutput(out, "Down Hosts Threshold: 0 (2%)", command)
self.matchoutput(out, "Maintenance Threshold: 0 (6%)", command)
def test_200_updateutgrid1(self):
command = ["update_cluster", "--cluster=utgrid1",
"--down_hosts_threshold=2"]
self.noouttest(command)
command = "show cluster --cluster utgrid1"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Grid Cluster: utgrid1", command)
self.matchoutput(out, "Down Hosts Threshold: 2", command)
self.matchoutput(out, "Maintenance Threshold: 0 (6%)", command)
def test_300_update_maint_threshold(self):
command = ["update_cluster", "--cluster=utgrid1",
"--maint_threshold=50%"]
self.noouttest(command)
command = "show cluster --cluster utgrid1 --format proto"
out = self.commandtest(command.split(" "))
cluslist = self.parse_clusters_msg(out)
cluster = cluslist.clusters[0]
self.assertEqual(cluster.name, "utgrid1")
self.assertEqual(cluster.threshold, 2)
self.assertEqual(cluster.threshold_is_percent, False)
self.assertEqual(cluster.maint_threshold, 50)
self.assertEqual(cluster.maint_threshold_is_percent, True)
command = ["update_cluster", "--cluster=utgrid1",
"--maint_threshold=50"]
self.noouttest(command)
command = "show cluster --cluster utgrid1"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Grid Cluster: utgrid1", command)
self.matchoutput(out, "Down Hosts Threshold: 2", command)
self.matchoutput(out, "Maintenance Threshold: 50", command)
command = ["update_cluster", "--cluster=utgrid1",
"--maint_threshold=0%"]
self.noouttest(command)
command = "show cluster --cluster utgrid1"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Grid Cluster: utgrid1", command)
self.matchoutput(out, "Down Hosts Threshold: 2", command)
self.matchoutput(out, "Maintenance Threshold: 0 (0%)", command)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestUpdateCluster)
unittest.TextTestRunner(verbosity=2).run(suite)
| apache-2.0 |
zhaochao/fuel-web | tasklib/tasklib/tests/functional/test_run_exec.py | 4 | 2108 | # Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tasklib.tests import base
from tasklib.utils import STATUS
class TestFunctionalExecTasks(base.BaseFunctionalTest):
"""Each test will follow next pattern:
1. Run test with provided name - taskcmd -c conf.yaml run test/test
2. check status of task
"""
def test_simple_run(self):
exit_code, out, err = self.execute(['run', 'exec/simple'])
self.assertEqual(exit_code, 0)
exit_code, out, err = self.execute(['status', 'exec/simple'])
self.assertEqual(out.strip('\n'), STATUS.end.name)
self.assertEqual(exit_code, 0)
def test_failed_run(self):
exit_code, out, err = self.execute(['run', 'exec/fail'])
self.assertEqual(exit_code, 2)
exit_code, out, err = self.execute(['status', 'exec/fail'])
self.assertEqual(out.strip('\n'), STATUS.failed.name)
self.assertEqual(exit_code, 2)
def test_error(self):
exit_code, out, err = self.execute(['run', 'exec/error'])
self.assertEqual(exit_code, 3)
exit_code, out, err = self.execute(['status', 'exec/error'])
self.assertEqual(out.strip('\n'), STATUS.error.name)
self.assertEqual(exit_code, 3)
def test_notfound(self):
exit_code, out, err = self.execute(['run', 'exec/notfound'])
self.assertEqual(exit_code, 4)
exit_code, out, err = self.execute(['status', 'exec/notfound'])
self.assertEqual(out.strip('\n'), STATUS.notfound.name)
self.assertEqual(exit_code, 4)
| apache-2.0 |
hell03610/python-koans | python3/koans/about_regex.py | 34 | 4795 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
import re
class AboutRegex(Koan):
"""
These koans are based on the Ben's book: Regular Expressions in 10 minutes.
I found this books very useful so I decided to write a koans in order to practice everything I had learned from it.
http://www.forta.com/books/0672325667/
"""
def test_matching_literal_text(self):
"""
Lesson 1 Matching Literal String
"""
string = "Hello, my name is Felix and this koans are based on the Ben's book: Regular Expressions in 10 minutes."
m = re.search(__, string)
self.assertTrue(m and m.group(0) and m.group(0)== 'Felix', "I want my name")
def test_matching_literal_text_how_many(self):
"""
Lesson 1 How many matches?
The default behaviour of most regular extression engines is to return just the first match.
In python you have the next options:
match() --> Determine if the RE matches at the beginning of the string.
search() --> Scan through a string, looking for any location where this RE matches.
findall() --> Find all substrings where the RE matches, and returns them as a list.
finditer() --> Find all substrings where the RE matches, and returns them as an iterator.
"""
string = "Hello, my name is Felix and this koans are based on the Ben's book: Regular Expressions in 10 minutes. Repeat My name is Felix"
m = re.match('Felix', string) #TIP: Maybe match it's not the best option
# I want to know how many times appears my name
self.assertEqual(m, __)
def test_matching_literal_text_not_case_sensitivity(self):
"""
Lesson 1 Matching Literal String non case sensitivity.
Most regex implementations also support matches that are not case sensitive. In python you can use re.IGNORECASE, in
Javascript you can specify the optional i flag.
In Ben's book you can see more languages.
"""
string = "Hello, my name is Felix or felix and this koans is based on the Ben's book: Regular Expressions in 10 minutes."
self.assertEqual(re.findall("felix", string, 20), __)
self.assertEqual(re.findall("felix", string, 10), __)
def test_matching_any_character(self):
"""
Lesson 1 Matching any character
. matches any character, alphabetic characters, digits and .
"""
string = "pecks.xlx\n" \
+ "orders1.xls\n" \
+ "apec1.xls\n" \
+ "na1.xls\n" \
+ "na2.xls\n" \
+ "sa1.xls"
# TIP: remember the name of this lesson
change_this_search_string = 'a..xlx' # <-- I want to find all uses of myArray
self.assertEquals(len(re.findall(change_this_search_string, string)),3)
def test_matching_set_character(self):
"""
Lesson 2 Matching sets of characters
A set of characters is defined using the metacharacters [ and ]. Everything between them is part of the set and
any one of the set members must match (but not all).
"""
string = "sales.xlx\n" \
+ "sales1.xls\n" \
+ "orders3.xls\n" \
+ "apac1.xls\n" \
+ "sales2.xls\n" \
+ "na1.xls\n" \
+ "na2.xls\n" \
+ "sa1.xls\n" \
+ "ca1.xls"
# I want to find all files for North America(na) or South America(sa), but not (ca)
# TIP you can use the pattern .a. which matches in above test but in this case matches more than you want
change_this_search_string = '[nsc]a[2-9].xls'
self.assertEquals(len(re.findall(change_this_search_string, string)),3)
def test_anything_but_matching(self):
"""
Lesson 2 Using character set ranges
Occsionally, you'll want a list of characters that you don't want to match.
Character sets can be negated using the ^ metacharacter.
"""
string = "sales.xlx\n" \
+ "sales1.xls\n" \
+ "orders3.xls\n" \
+ "apac1.xls\n" \
+ "sales2.xls\n" \
+ "sales3.xls\n" \
+ "europe2.xls\n" \
+ "sam.xls\n" \
+ "na1.xls\n" \
+ "na2.xls\n" \
+ "sa1.xls\n" \
+ "ca1.xls"
# I want to find the name sam
change_this_search_string = '[^nc]am'
self.assertEquals(re.findall(change_this_search_string, string), ['sam.xls'])
| mit |
ogenstad/ansible | lib/ansible/plugins/lookup/subelements.py | 64 | 6146 | # (c) 2013, Serge van Ginderachter <[email protected]>
# (c) 2012-17 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: subelements
author: Serge van Ginderachter <[email protected]>
version_added: "1.4"
short_description: traverse nested key from a list of dictionaries
description:
- Subelements walks a list of hashes (aka dictionaries) and then traverses a list with a given (nested sub-)key inside of those records.
options:
_terms:
description: tuple of list of dictionaries and dictionary key to extract
required: True
skip_missing:
default: False
description:
- If set to True, the lookup plugin will skip the lists items that do not contain the given subkey.
If False, the plugin will yield an error and complain about the missing subkey.
"""
EXAMPLES = """
- name: show var structure as it is needed for example to make sense
hosts: all
vars:
users:
- name: alice
authorized:
- /tmp/alice/onekey.pub
- /tmp/alice/twokey.pub
mysql:
password: mysql-password
hosts:
- "%"
- "127.0.0.1"
- "::1"
- "localhost"
privs:
- "*.*:SELECT"
- "DB1.*:ALL"
groups:
- wheel
- name: bob
authorized:
- /tmp/bob/id_rsa.pub
mysql:
password: other-mysql-password
hosts:
- "db1"
privs:
- "*.*:SELECT"
- "DB2.*:ALL"
tasks:
- name: Set authorized ssh key, extracting just that data from 'users'
authorized_key:
user: "{{ item.0.name }}"
key: "{{ lookup('file', item.1) }}"
with_subelements:
- "{{ users }}"
- authorized
- name: Setup MySQL users, given the mysql hosts and privs subkey lists
mysql_user:
name: "{{ item.0.name }}"
password: "{{ item.0.mysql.password }}"
host: "{{ item.1 }}"
priv: "{{ item.0.mysql.privs | join('/') }}"
with_subelements:
- "{{ users }}"
- mysql.hosts
- name: list groups for user that have them, dont error if they don't
debug: var=item
with_list: "{{lookup('subelements', users, 'groups', 'skip_missing=True')}}"
"""
RETURN = """
_list:
description: list of subelements extracted
"""
from ansible.errors import AnsibleError
from ansible.module_utils.six import string_types
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.plugins.lookup import LookupBase
from ansible.utils.listify import listify_lookup_plugin_terms
FLAGS = ('skip_missing',)
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
def _raise_terms_error(msg=""):
raise AnsibleError(
"subelements lookup expects a list of two or three items, " + msg)
terms[0] = listify_lookup_plugin_terms(terms[0], templar=self._templar, loader=self._loader)
# check lookup terms - check number of terms
if not isinstance(terms, list) or not 2 <= len(terms) <= 3:
_raise_terms_error()
# first term should be a list (or dict), second a string holding the subkey
if not isinstance(terms[0], (list, dict)) or not isinstance(terms[1], string_types):
_raise_terms_error("first a dict or a list, second a string pointing to the subkey")
subelements = terms[1].split(".")
if isinstance(terms[0], dict): # convert to list:
if terms[0].get('skipped', False) is not False:
# the registered result was completely skipped
return []
elementlist = []
for key in terms[0]:
elementlist.append(terms[0][key])
else:
elementlist = terms[0]
# check for optional flags in third term
flags = {}
if len(terms) == 3:
flags = terms[2]
if not isinstance(flags, dict) and not all([isinstance(key, string_types) and key in FLAGS for key in flags]):
_raise_terms_error("the optional third item must be a dict with flags %s" % FLAGS)
# build_items
ret = []
for item0 in elementlist:
if not isinstance(item0, dict):
raise AnsibleError("subelements lookup expects a dictionary, got '%s'" % item0)
if item0.get('skipped', False) is not False:
# this particular item is to be skipped
continue
skip_missing = boolean(flags.get('skip_missing', False), strict=False)
subvalue = item0
lastsubkey = False
sublist = []
for subkey in subelements:
if subkey == subelements[-1]:
lastsubkey = True
if subkey not in subvalue:
if skip_missing:
continue
else:
raise AnsibleError("could not find '%s' key in iterated item '%s'" % (subkey, subvalue))
if not lastsubkey:
if not isinstance(subvalue[subkey], dict):
if skip_missing:
continue
else:
raise AnsibleError("the key %s should point to a dictionary, got '%s'" % (subkey, subvalue[subkey]))
else:
subvalue = subvalue[subkey]
else: # lastsubkey
if not isinstance(subvalue[subkey], list):
raise AnsibleError("the key %s should point to a list, got '%s'" % (subkey, subvalue[subkey]))
else:
sublist = subvalue.pop(subkey, [])
for item1 in sublist:
ret.append((item0, item1))
return ret
| gpl-3.0 |
andybab/Impala | tests/query_test/test_hbase_queries.py | 8 | 1079 | #!/usr/bin/env python
# Copyright (c) 2012 Cloudera, Inc. All rights reserved.
# Targeted Impala HBase Tests
#
import logging
import pytest
from tests.common.test_vector import *
from tests.common.impala_test_suite import *
class TestHBaseQueries(ImpalaTestSuite):
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestHBaseQueries, cls).add_test_dimensions()
cls.TestMatrix.add_constraint(\
lambda v: v.get_value('table_format').file_format == 'hbase')
def test_hbase_scan_node(self, vector):
self.run_test_case('QueryTest/hbase-scan-node', vector)
def test_hbase_row_key(self, vector):
self.run_test_case('QueryTest/hbase-rowkeys', vector)
def test_hbase_filters(self, vector):
self.run_test_case('QueryTest/hbase-filters', vector)
def test_hbase_subquery(self, vector):
self.run_test_case('QueryTest/hbase-subquery', vector)
@pytest.mark.execute_serially
def test_hbase_inserts(self, vector):
self.run_test_case('QueryTest/hbase-inserts', vector)
| apache-2.0 |
nozuono/calibre-webserver | src/calibre/ebooks/pdb/ereader/writer.py | 24 | 10728 | # -*- coding: utf-8 -*-
'''
Write content to ereader pdb file.
'''
__license__ = 'GPL v3'
__copyright__ = '2009, John Schember <[email protected]>'
__docformat__ = 'restructuredtext en'
import re
import struct
import zlib
try:
from PIL import Image
Image
except ImportError:
import Image
import cStringIO
from calibre.ebooks.pdb.formatwriter import FormatWriter
from calibre.ebooks.pdb.header import PdbHeaderBuilder
from calibre.ebooks.pml.pmlml import PMLMLizer
IDENTITY = 'PNRdPPrs'
# This is an arbitrary number that is small enough to work. The actual maximum
# record size is unknown.
MAX_RECORD_SIZE = 8192
class Writer(FormatWriter):
def __init__(self, opts, log):
self.opts = opts
self.log = log
def write_content(self, oeb_book, out_stream, metadata=None):
pmlmlizer = PMLMLizer(self.log)
pml = unicode(pmlmlizer.extract_content(oeb_book, self.opts)).encode('cp1252', 'replace')
text, text_sizes = self._text(pml)
chapter_index = self._index_item(r'(?s)\\C(?P<val>[0-4])="(?P<text>.+?)"', pml)
chapter_index += self._index_item(r'(?s)\\X(?P<val>[0-4])(?P<text>.+?)\\X[0-4]', pml)
chapter_index += self._index_item(r'(?s)\\x(?P<text>.+?)\\x', pml)
link_index = self._index_item(r'(?s)\\Q="(?P<text>.+?)"', pml)
images = self._images(oeb_book.manifest, pmlmlizer.image_hrefs)
metadata = [self._metadata(metadata)]
hr = [self._header_record(len(text), len(chapter_index), len(link_index), len(images))]
'''
Record order as generated by Dropbook.
1. eReader Header
2. Compressed text
3. Small font page index
4. Large font page index
5. Chapter index
6. Links index
7. Images
8. (Extrapolation: there should be one more record type here though yet uncovered what it might be).
9. Metadata
10. Sidebar records
11. Footnote records
12. Text block size record
13. "MeTaInFo\x00" word record
'''
sections = hr+text+chapter_index+link_index+images+metadata+[text_sizes]+['MeTaInFo\x00']
lengths = [len(i) if i not in images else len(i[0]) + len(i[1]) for i in sections]
pdbHeaderBuilder = PdbHeaderBuilder(IDENTITY, metadata[0].partition('\x00')[0])
pdbHeaderBuilder.build_header(lengths, out_stream)
for item in sections:
if item in images:
out_stream.write(item[0])
out_stream.write(item[1])
else:
out_stream.write(item)
def _text(self, pml):
pml_pages = []
text_sizes = ''
index = 0
while index < len(pml):
'''
Split on the space character closest to MAX_RECORD_SIZE when possible.
'''
split = pml.rfind(' ', index, MAX_RECORD_SIZE)
if split == -1:
len_end = len(pml[index:])
if len_end > MAX_RECORD_SIZE:
split = MAX_RECORD_SIZE
else:
split = len_end
if split == 0:
split = 1
pml_pages.append(zlib.compress(pml[index:index+split]))
text_sizes += struct.pack('>H', split)
index += split
return pml_pages, text_sizes
def _index_item(self, regex, pml):
index = []
for mo in re.finditer(regex, pml):
item = ''
if 'text' in mo.groupdict().keys():
item += struct.pack('>L', mo.start())
text = mo.group('text')
# Strip all PML tags from text
text = re.sub(r'\\U[0-9a-z]{4}', '', text)
text = re.sub(r'\\a\d{3}', '', text)
text = re.sub(r'\\.', '', text)
# Add appropriate spacing to denote the various levels of headings
if 'val' in mo.groupdict().keys():
text = '%s%s' % (' ' * 4 * int(mo.group('val')), text)
item += text
item += '\x00'
if item:
index.append(item)
return index
def _images(self, manifest, image_hrefs):
'''
Image format.
0-4 : 'PNG '. There must be a space after PNG.
4-36 : Image name. Must be exactly 32 bytes long. Pad with \x00 for names shorter than 32 bytes
36-58 : Unknown.
58-60 : Width.
60-62 : Height.
62-...: Raw image data in 8 bit PNG format.
'''
images = []
from calibre.ebooks.oeb.base import OEB_RASTER_IMAGES
for item in manifest:
if item.media_type in OEB_RASTER_IMAGES and item.href in image_hrefs.keys():
try:
im = Image.open(cStringIO.StringIO(item.data)).convert('P')
im.thumbnail((300,300), Image.ANTIALIAS)
data = cStringIO.StringIO()
im.save(data, 'PNG')
data = data.getvalue()
header = 'PNG '
header += image_hrefs[item.href].ljust(32, '\x00')[:32]
header = header.ljust(58, '\x00')
header += struct.pack('>HH', im.size[0], im.size[1])
header = header.ljust(62, '\x00')
if len(data) + len(header) < 65505:
images.append((header, data))
except Exception as e:
self.log.error('Error: Could not include file %s becuase ' \
'%s.' % (item.href, e))
return images
def _metadata(self, metadata):
'''
Metadata takes the form:
title\x00
author\x00
copyright\x00
publisher\x00
isbn\x00
'''
title = _('Unknown')
author = _('Unknown')
copyright = ''
publisher = ''
isbn = ''
if metadata:
if len(metadata.title) >= 1:
title = metadata.title[0].value
if len(metadata.creator) >= 1:
from calibre.ebooks.metadata import authors_to_string
author = authors_to_string([x.value for x in metadata.creator])
if len(metadata.rights) >= 1:
copyright = metadata.rights[0].value
if len(metadata.publisher) >= 1:
publisher = metadata.publisher[0].value
return '%s\x00%s\x00%s\x00%s\x00%s\x00' % (title, author, copyright, publisher, isbn)
def _header_record(self, text_count, chapter_count, link_count, image_count):
'''
text_count = the number of text pages
image_count = the number of images
'''
compression = 10 # zlib compression.
non_text_offset = text_count + 1
chapter_offset = non_text_offset
link_offset = chapter_offset + chapter_count
if image_count > 0:
image_data_offset = link_offset + link_count
meta_data_offset = image_data_offset + image_count
last_data_offset = meta_data_offset + 1
else:
meta_data_offset = link_offset + link_count
last_data_offset = meta_data_offset + 1
image_data_offset = last_data_offset
if chapter_count == 0:
chapter_offset = last_data_offset
if link_count == 0:
link_offset = last_data_offset
record = ''
record += struct.pack('>H', compression) # [0:2] # Compression. Specifies compression and drm. 2 = palmdoc, 10 = zlib. 260 and 272 = DRM
record += struct.pack('>H', 0) # [2:4] # Unknown.
record += struct.pack('>H', 0) # [4:6] # Unknown.
record += struct.pack('>H', 25152) # [6:8] # 25152 is MAGIC. Somehow represents the cp1252 encoding of the text
record += struct.pack('>H', 0) # [8:10] # Number of small font pages. 0 if page index is not built.
record += struct.pack('>H', 0) # [10:12] # Number of large font pages. 0 if page index is not built.
record += struct.pack('>H', non_text_offset) # [12:14] # Non-Text record start.
record += struct.pack('>H', chapter_count) # [14:16] # Number of chapter index records.
record += struct.pack('>H', 0) # [16:18] # Number of small font page index records.
record += struct.pack('>H', 0) # [18:20] # Number of large font page index records.
record += struct.pack('>H', image_count) # [20:22] # Number of images.
record += struct.pack('>H', link_count) # [22:24] # Number of links.
record += struct.pack('>H', 1) # [24:26] # 1 if has metadata, 0 if not.
record += struct.pack('>H', 0) # [26:28] # Unknown.
record += struct.pack('>H', 0) # [28:30] # Number of Footnotes.
record += struct.pack('>H', 0) # [30:32] # Number of Sidebars.
record += struct.pack('>H', chapter_offset) # [32:34] # Chapter index offset.
record += struct.pack('>H', 2560) # [34:36] # 2560 is MAGIC.
record += struct.pack('>H', last_data_offset) # [36:38] # Small font page offset. This will be the last data offset if there are none.
record += struct.pack('>H', last_data_offset) # [38:40] # Large font page offset. This will be the last data offset if there are none.
record += struct.pack('>H', image_data_offset) # [40:42] # Image offset. This will be the last data offset if there are none.
record += struct.pack('>H', link_offset) # [42:44] # Links offset. This will be the last data offset if there are none.
record += struct.pack('>H', meta_data_offset) # [44:46] # Metadata offset. This will be the last data offset if there are none.
record += struct.pack('>H', 0) # [46:48] # Unknown.
record += struct.pack('>H', last_data_offset) # [48:50] # Footnote offset. This will be the last data offset if there are none.
record += struct.pack('>H', last_data_offset) # [50:52] # Sidebar offset. This will be the last data offset if there are none.
record += struct.pack('>H', last_data_offset) # [52:54] # Last data offset.
for i in range(54, 132, 2):
record += struct.pack('>H', 0) # [54:132]
return record
| gpl-3.0 |
lsqtongxin/django | tests/template_tests/test_extends.py | 154 | 7062 | import os
from django.template import Context, Engine, TemplateDoesNotExist
from django.template.loader_tags import ExtendsError
from django.template.loaders.base import Loader
from django.test import SimpleTestCase, ignore_warnings
from django.utils.deprecation import RemovedInDjango20Warning
from .utils import ROOT
RECURSIVE = os.path.join(ROOT, 'recursive_templates')
class ExtendsBehaviorTests(SimpleTestCase):
def test_normal_extend(self):
engine = Engine(dirs=[os.path.join(RECURSIVE, 'fs')])
template = engine.get_template('one.html')
output = template.render(Context({}))
self.assertEqual(output.strip(), 'three two one')
def test_extend_recursive(self):
engine = Engine(dirs=[
os.path.join(RECURSIVE, 'fs'),
os.path.join(RECURSIVE, 'fs2'),
os.path.join(RECURSIVE, 'fs3'),
])
template = engine.get_template('recursive.html')
output = template.render(Context({}))
self.assertEqual(output.strip(), 'fs3/recursive fs2/recursive fs/recursive')
def test_extend_missing(self):
engine = Engine(dirs=[os.path.join(RECURSIVE, 'fs')])
template = engine.get_template('extend-missing.html')
with self.assertRaises(TemplateDoesNotExist) as e:
template.render(Context({}))
tried = e.exception.tried
self.assertEqual(len(tried), 1)
self.assertEqual(tried[0][0].template_name, 'missing.html')
def test_recursive_multiple_loaders(self):
engine = Engine(
dirs=[os.path.join(RECURSIVE, 'fs')],
loaders=[
('django.template.loaders.locmem.Loader', {
'one.html': '{% extends "one.html" %}{% block content %}{{ block.super }} locmem-one{% endblock %}',
'two.html': '{% extends "two.html" %}{% block content %}{{ block.super }} locmem-two{% endblock %}',
'three.html': (
'{% extends "three.html" %}{% block content %}{{ block.super }} locmem-three{% endblock %}'
),
}),
'django.template.loaders.filesystem.Loader',
],
)
template = engine.get_template('one.html')
output = template.render(Context({}))
self.assertEqual(output.strip(), 'three locmem-three two locmem-two one locmem-one')
def test_extend_self_error(self):
"""
Catch if a template extends itself and no other matching
templates are found.
"""
engine = Engine(dirs=[os.path.join(RECURSIVE, 'fs')])
template = engine.get_template('self.html')
with self.assertRaises(TemplateDoesNotExist):
template.render(Context({}))
def test_extend_cached(self):
engine = Engine(
dirs=[
os.path.join(RECURSIVE, 'fs'),
os.path.join(RECURSIVE, 'fs2'),
os.path.join(RECURSIVE, 'fs3'),
],
loaders=[
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader',
]),
],
)
template = engine.get_template('recursive.html')
output = template.render(Context({}))
self.assertEqual(output.strip(), 'fs3/recursive fs2/recursive fs/recursive')
cache = engine.template_loaders[0].get_template_cache
self.assertEqual(len(cache), 3)
expected_path = os.path.join('fs', 'recursive.html')
self.assertTrue(cache['recursive.html'].origin.name.endswith(expected_path))
# Render another path that uses the same templates from the cache
template = engine.get_template('other-recursive.html')
output = template.render(Context({}))
self.assertEqual(output.strip(), 'fs3/recursive fs2/recursive fs/recursive')
# Template objects should not be duplicated.
self.assertEqual(len(cache), 4)
expected_path = os.path.join('fs', 'other-recursive.html')
self.assertTrue(cache['other-recursive.html'].origin.name.endswith(expected_path))
def test_unique_history_per_loader(self):
"""
Extending should continue even if two loaders return the same
name for a template.
"""
engine = Engine(
loaders=[
['django.template.loaders.locmem.Loader', {
'base.html': '{% extends "base.html" %}{% block content %}{{ block.super }} loader1{% endblock %}',
}],
['django.template.loaders.locmem.Loader', {
'base.html': '{% block content %}loader2{% endblock %}',
}],
]
)
template = engine.get_template('base.html')
output = template.render(Context({}))
self.assertEqual(output.strip(), 'loader2 loader1')
class NonRecursiveLoader(Loader):
def __init__(self, engine, templates_dict):
self.templates_dict = templates_dict
super(NonRecursiveLoader, self).__init__(engine)
def load_template_source(self, template_name, template_dirs=None):
try:
return self.templates_dict[template_name], template_name
except KeyError:
raise TemplateDoesNotExist(template_name)
@ignore_warnings(category=RemovedInDjango20Warning)
class NonRecursiveLoaderExtendsTests(SimpleTestCase):
loaders = [
('template_tests.test_extends.NonRecursiveLoader', {
'base.html': 'base',
'index.html': '{% extends "base.html" %}',
'recursive.html': '{% extends "recursive.html" %}',
'other-recursive.html': '{% extends "recursive.html" %}',
'a.html': '{% extends "b.html" %}',
'b.html': '{% extends "a.html" %}',
}),
]
def test_extend(self):
engine = Engine(loaders=self.loaders)
output = engine.render_to_string('index.html')
self.assertEqual(output, 'base')
def test_extend_cached(self):
engine = Engine(loaders=[
('django.template.loaders.cached.Loader', self.loaders),
])
output = engine.render_to_string('index.html')
self.assertEqual(output, 'base')
cache = engine.template_loaders[0].template_cache
self.assertTrue('base.html' in cache)
self.assertTrue('index.html' in cache)
# Render a second time from cache
output = engine.render_to_string('index.html')
self.assertEqual(output, 'base')
def test_extend_error(self):
engine = Engine(loaders=self.loaders)
msg = 'Cannot extend templates recursively when using non-recursive template loaders'
with self.assertRaisesMessage(ExtendsError, msg):
engine.render_to_string('recursive.html')
with self.assertRaisesMessage(ExtendsError, msg):
engine.render_to_string('other-recursive.html')
with self.assertRaisesMessage(ExtendsError, msg):
engine.render_to_string('a.html')
| bsd-3-clause |
lsqtongxin/django | django/utils/log.py | 116 | 5216 | from __future__ import unicode_literals
import logging
import logging.config # needed when logging_config doesn't start with logging.config
import sys
import warnings
from copy import copy
from django.conf import settings
from django.core import mail
from django.core.mail import get_connection
from django.utils.deprecation import RemovedInNextVersionWarning
from django.utils.module_loading import import_string
from django.views.debug import ExceptionReporter
# Default logging for Django. This sends an email to the site admins on every
# HTTP 500 error. Depending on DEBUG, all other log records are either sent to
# the console (DEBUG=True) or discarded by mean of the NullHandler (DEBUG=False).
DEFAULT_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'handlers': {
'console': {
'level': 'INFO',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django': {
'handlers': ['console', 'mail_admins'],
},
'py.warnings': {
'handlers': ['console'],
},
}
}
def configure_logging(logging_config, logging_settings):
if not sys.warnoptions:
# Route warnings through python logging
logging.captureWarnings(True)
# RemovedInNextVersionWarning is a subclass of DeprecationWarning which
# is hidden by default, hence we force the "default" behavior
warnings.simplefilter("default", RemovedInNextVersionWarning)
if logging_config:
# First find the logging configuration function ...
logging_config_func = import_string(logging_config)
logging.config.dictConfig(DEFAULT_LOGGING)
# ... then invoke it with the logging settings
if logging_settings:
logging_config_func(logging_settings)
class AdminEmailHandler(logging.Handler):
"""An exception log handler that emails log entries to site admins.
If the request is passed as the first argument to the log record,
request data will be provided in the email report.
"""
def __init__(self, include_html=False, email_backend=None):
logging.Handler.__init__(self)
self.include_html = include_html
self.email_backend = email_backend
def emit(self, record):
try:
request = record.request
subject = '%s (%s IP): %s' % (
record.levelname,
('internal' if request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS
else 'EXTERNAL'),
record.getMessage()
)
except Exception:
subject = '%s: %s' % (
record.levelname,
record.getMessage()
)
request = None
subject = self.format_subject(subject)
# Since we add a nicely formatted traceback on our own, create a copy
# of the log record without the exception data.
no_exc_record = copy(record)
no_exc_record.exc_info = None
no_exc_record.exc_text = None
if record.exc_info:
exc_info = record.exc_info
else:
exc_info = (None, record.getMessage(), None)
reporter = ExceptionReporter(request, is_email=True, *exc_info)
message = "%s\n\n%s" % (self.format(no_exc_record), reporter.get_traceback_text())
html_message = reporter.get_traceback_html() if self.include_html else None
self.send_mail(subject, message, fail_silently=True, html_message=html_message)
def send_mail(self, subject, message, *args, **kwargs):
mail.mail_admins(subject, message, *args, connection=self.connection(), **kwargs)
def connection(self):
return get_connection(backend=self.email_backend, fail_silently=True)
def format_subject(self, subject):
"""
Escape CR and LF characters, and limit length.
RFC 2822's hard limit is 998 characters per line. So, minus "Subject: "
the actual subject must be no longer than 989 characters.
"""
formatted_subject = subject.replace('\n', '\\n').replace('\r', '\\r')
return formatted_subject[:989]
class CallbackFilter(logging.Filter):
"""
A logging filter that checks the return value of a given callable (which
takes the record-to-be-logged as its only parameter) to decide whether to
log a record.
"""
def __init__(self, callback):
self.callback = callback
def filter(self, record):
if self.callback(record):
return 1
return 0
class RequireDebugFalse(logging.Filter):
def filter(self, record):
return not settings.DEBUG
class RequireDebugTrue(logging.Filter):
def filter(self, record):
return settings.DEBUG
| bsd-3-clause |
mchdks/python-social-auth | social/backends/soundcloud.py | 83 | 2156 | """
Soundcloud OAuth2 backend, docs at:
http://psa.matiasaguirre.net/docs/backends/soundcloud.html
"""
from social.p3 import urlencode
from social.backends.oauth import BaseOAuth2
class SoundcloudOAuth2(BaseOAuth2):
"""Soundcloud OAuth authentication backend"""
name = 'soundcloud'
AUTHORIZATION_URL = 'https://soundcloud.com/connect'
ACCESS_TOKEN_URL = 'https://api.soundcloud.com/oauth2/token'
ACCESS_TOKEN_METHOD = 'POST'
SCOPE_SEPARATOR = ','
REDIRECT_STATE = False
EXTRA_DATA = [
('id', 'id'),
('refresh_token', 'refresh_token'),
('expires', 'expires')
]
def get_user_details(self, response):
"""Return user details from Soundcloud account"""
fullname, first_name, last_name = self.get_user_names(
response.get('full_name')
)
return {'username': response.get('username'),
'email': response.get('email') or '',
'fullname': fullname,
'first_name': first_name,
'last_name': last_name}
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
return self.get_json('https://api.soundcloud.com/me.json',
params={'oauth_token': access_token})
def auth_url(self):
"""Return redirect url"""
state = None
if self.STATE_PARAMETER or self.REDIRECT_STATE:
# Store state in session for further request validation. The state
# value is passed as state parameter (as specified in OAuth2 spec),
# but also added to redirect_uri, that way we can still verify the
# request if the provider doesn't implement the state parameter.
# Reuse token if any.
name = self.name + '_state'
state = self.strategy.session_get(name) or self.state_token()
self.strategy.session_set(name, state)
params = self.auth_params(state)
params.update(self.get_scope_argument())
params.update(self.auth_extra_arguments())
return self.AUTHORIZATION_URL + '?' + urlencode(params)
| bsd-3-clause |
mcgee/ns-3 | doc/manual/source/conf.py | 75 | 7047 | # -*- coding: utf-8 -*-
#
# ns-3 documentation build configuration file, created by
# sphinx-quickstart on Tue Dec 14 09:00:39 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.pngmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ns-3'
copyright = u'2010, ns-3 project'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'ns-3-dev'
# The full version, including alpha/beta/rc tags.
release = 'ns-3-dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'ns3_html_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['../..']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'Manual'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y %H:%M'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ns-3doc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'ns-3-manual.tex', u'ns-3 Manual',
u'ns-3 project', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = '../../ns3_html_theme/static/ns-3.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
latex_preamble = '\usepackage{amssymb}'
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ns-3-manual', u'ns-3 Manual',
[u'ns-3 project'], 1)
]
| gpl-2.0 |
ardumont/pygit2 | test/test_repository.py | 1 | 21901 | # -*- coding: UTF-8 -*-
#
# Copyright 2010-2014 The pygit2 contributors
#
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2,
# as published by the Free Software Foundation.
#
# In addition to the permissions in the GNU General Public License,
# the authors give you unlimited permission to link the compiled
# version of this file into combinations with other programs,
# and to distribute those combinations without any restriction
# coming from the use of this file. (The General Public License
# restrictions do apply in other respects; for example, they cover
# modification of the file, and distribution when not linked into
# a combined executable.)
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
"""Tests for Repository objects."""
# Import from the future
from __future__ import absolute_import
from __future__ import unicode_literals
# Import from the Standard Library
import binascii
import unittest
import tempfile
import os
from os.path import join, realpath
import sys
# Import from pygit2
from pygit2 import GIT_OBJ_ANY, GIT_OBJ_BLOB, GIT_OBJ_COMMIT
from pygit2 import init_repository, clone_repository, discover_repository
from pygit2 import Oid, Reference, hashfile
import pygit2
from . import utils
try:
import __pypy__
except ImportError:
__pypy__ = None
HEAD_SHA = '784855caf26449a1914d2cf62d12b9374d76ae78'
PARENT_SHA = 'f5e5aa4e36ab0fe62ee1ccc6eb8f79b866863b87' # HEAD^
BLOB_HEX = 'af431f20fc541ed6d5afede3e2dc7160f6f01f16'
BLOB_RAW = binascii.unhexlify(BLOB_HEX.encode('ascii'))
BLOB_OID = Oid(raw=BLOB_RAW)
class RepositoryTest(utils.BareRepoTestCase):
def test_is_empty(self):
self.assertFalse(self.repo.is_empty)
def test_is_bare(self):
self.assertTrue(self.repo.is_bare)
def test_head(self):
head = self.repo.head
self.assertEqual(HEAD_SHA, head.target.hex)
self.assertEqual(type(head), Reference)
self.assertFalse(self.repo.head_is_unborn)
self.assertFalse(self.repo.head_is_detached)
def test_set_head(self):
# Test setting a detatched HEAD.
self.repo.set_head(Oid(hex=PARENT_SHA))
self.assertEqual(self.repo.head.target.hex, PARENT_SHA)
# And test setting a normal HEAD.
self.repo.set_head("refs/heads/master")
self.assertEqual(self.repo.head.name, "refs/heads/master")
self.assertEqual(self.repo.head.target.hex, HEAD_SHA)
def test_read(self):
self.assertRaises(TypeError, self.repo.read, 123)
self.assertRaisesWithArg(KeyError, '1' * 40, self.repo.read, '1' * 40)
ab = self.repo.read(BLOB_OID)
a = self.repo.read(BLOB_HEX)
self.assertEqual(ab, a)
self.assertEqual((GIT_OBJ_BLOB, b'a contents\n'), a)
a2 = self.repo.read('7f129fd57e31e935c6d60a0c794efe4e6927664b')
self.assertEqual((GIT_OBJ_BLOB, b'a contents 2\n'), a2)
a_hex_prefix = BLOB_HEX[:4]
a3 = self.repo.read(a_hex_prefix)
self.assertEqual((GIT_OBJ_BLOB, b'a contents\n'), a3)
def test_write(self):
data = b"hello world"
# invalid object type
self.assertRaises(ValueError, self.repo.write, GIT_OBJ_ANY, data)
oid = self.repo.write(GIT_OBJ_BLOB, data)
self.assertEqual(type(oid), Oid)
def test_contains(self):
self.assertRaises(TypeError, lambda: 123 in self.repo)
self.assertTrue(BLOB_OID in self.repo)
self.assertTrue(BLOB_HEX in self.repo)
self.assertTrue(BLOB_HEX[:10] in self.repo)
self.assertFalse('a' * 40 in self.repo)
self.assertFalse('a' * 20 in self.repo)
def test_iterable(self):
l = [obj for obj in self.repo]
oid = Oid(hex=BLOB_HEX)
self.assertTrue(oid in l)
def test_lookup_blob(self):
self.assertRaises(TypeError, lambda: self.repo[123])
self.assertEqual(self.repo[BLOB_OID].hex, BLOB_HEX)
a = self.repo[BLOB_HEX]
self.assertEqual(b'a contents\n', a.read_raw())
self.assertEqual(BLOB_HEX, a.hex)
self.assertEqual(GIT_OBJ_BLOB, a.type)
def test_lookup_blob_prefix(self):
a = self.repo[BLOB_HEX[:5]]
self.assertEqual(b'a contents\n', a.read_raw())
self.assertEqual(BLOB_HEX, a.hex)
self.assertEqual(GIT_OBJ_BLOB, a.type)
def test_lookup_commit(self):
commit_sha = '5fe808e8953c12735680c257f56600cb0de44b10'
commit = self.repo[commit_sha]
self.assertEqual(commit_sha, commit.hex)
self.assertEqual(GIT_OBJ_COMMIT, commit.type)
self.assertEqual(('Second test data commit.\n\n'
'This commit has some additional text.\n'),
commit.message)
def test_lookup_commit_prefix(self):
commit_sha = '5fe808e8953c12735680c257f56600cb0de44b10'
commit_sha_prefix = commit_sha[:7]
too_short_prefix = commit_sha[:3]
commit = self.repo[commit_sha_prefix]
self.assertEqual(commit_sha, commit.hex)
self.assertEqual(GIT_OBJ_COMMIT, commit.type)
self.assertEqual(
('Second test data commit.\n\n'
'This commit has some additional text.\n'),
commit.message)
self.assertRaises(ValueError, self.repo.__getitem__, too_short_prefix)
def test_expand_id(self):
commit_sha = '5fe808e8953c12735680c257f56600cb0de44b10'
expanded = self.repo.expand_id(commit_sha[:7])
self.assertEqual(commit_sha, expanded.hex)
@unittest.skipIf(__pypy__ is not None, "skip refcounts checks in pypy")
def test_lookup_commit_refcount(self):
start = sys.getrefcount(self.repo)
commit_sha = '5fe808e8953c12735680c257f56600cb0de44b10'
commit = self.repo[commit_sha]
del commit
end = sys.getrefcount(self.repo)
self.assertEqual(start, end)
def test_get_path(self):
directory = realpath(self.repo.path)
expected = realpath(self.repo_path)
self.assertEqual(directory, expected)
def test_get_workdir(self):
self.assertEqual(self.repo.workdir, None)
def test_revparse_single(self):
parent = self.repo.revparse_single('HEAD^')
self.assertEqual(parent.hex, PARENT_SHA)
def test_hash(self):
data = "foobarbaz"
hashed_sha1 = pygit2.hash(data)
written_sha1 = self.repo.create_blob(data)
self.assertEqual(hashed_sha1, written_sha1)
def test_hashfile(self):
data = "bazbarfoo"
tempfile_path = tempfile.mkstemp()[1]
with open(tempfile_path, 'w') as fh:
fh.write(data)
hashed_sha1 = hashfile(tempfile_path)
os.unlink(tempfile_path)
written_sha1 = self.repo.create_blob(data)
self.assertEqual(hashed_sha1, written_sha1)
def test_conflicts_in_bare_repository(self):
def create_conflict_file(repo, branch, content):
oid = repo.create_blob(content.encode('utf-8'))
tb = repo.TreeBuilder()
tb.insert('conflict', oid, pygit2.GIT_FILEMODE_BLOB)
tree = tb.write()
sig = pygit2.Signature('Author', '[email protected]')
commit = repo.create_commit(branch.name, sig, sig,
'Conflict', tree, [branch.target])
self.assertIsNotNone(commit)
return commit
b1 = self.repo.create_branch('b1', self.repo.head.peel())
c1 = create_conflict_file(self.repo, b1, 'ASCII - abc')
b2 = self.repo.create_branch('b2', self.repo.head.peel())
c2 = create_conflict_file(self.repo, b2, 'Unicode - äüö')
index = self.repo.merge_commits(c1, c2)
self.assertIsNotNone(index.conflicts)
# ConflictCollection does not allow calling len(...) on it directly so
# we have to calculate length by iterating over its entries
self.assertEqual(sum(1 for _ in index.conflicts), 1)
(a, t, o) = index.conflicts['conflict']
diff = self.repo.merge_file_from_index(a, t, o)
self.assertEqual(diff, '''<<<<<<< conflict
ASCII - abc
=======
Unicode - äüö
>>>>>>> conflict
''')
class RepositoryTest_II(utils.RepoTestCase):
def test_is_empty(self):
self.assertFalse(self.repo.is_empty)
def test_is_bare(self):
self.assertFalse(self.repo.is_bare)
def test_get_path(self):
directory = realpath(self.repo.path)
expected = realpath(join(self.repo_path, '.git'))
self.assertEqual(directory, expected)
def test_get_workdir(self):
directory = realpath(self.repo.workdir)
expected = realpath(self.repo_path)
self.assertEqual(directory, expected)
def test_set_workdir(self):
directory = tempfile.mkdtemp()
self.repo.workdir = directory
self.assertEqual(realpath(self.repo.workdir), realpath(directory))
def test_checkout_ref(self):
ref_i18n = self.repo.lookup_reference('refs/heads/i18n')
# checkout i18n with conflicts and default strategy should
# not be possible
self.assertRaises(pygit2.GitError, self.repo.checkout, ref_i18n)
# checkout i18n with GIT_CHECKOUT_FORCE
head = self.repo.head
head = self.repo[head.target]
self.assertTrue('new' not in head.tree)
self.repo.checkout(ref_i18n, strategy=pygit2.GIT_CHECKOUT_FORCE)
head = self.repo.head
head = self.repo[head.target]
self.assertEqual(head.hex, ref_i18n.target.hex)
self.assertTrue('new' in head.tree)
self.assertTrue('bye.txt' not in self.repo.status())
def test_checkout_branch(self):
branch_i18n = self.repo.lookup_branch('i18n')
# checkout i18n with conflicts and default strategy should
# not be possible
self.assertRaises(pygit2.GitError, self.repo.checkout, branch_i18n)
# checkout i18n with GIT_CHECKOUT_FORCE
head = self.repo.head
head = self.repo[head.target]
self.assertTrue('new' not in head.tree)
self.repo.checkout(branch_i18n, strategy=pygit2.GIT_CHECKOUT_FORCE)
head = self.repo.head
head = self.repo[head.target]
self.assertEqual(head.hex, branch_i18n.target.hex)
self.assertTrue('new' in head.tree)
self.assertTrue('bye.txt' not in self.repo.status())
def test_checkout_index(self):
# some changes to working dir
with open(os.path.join(self.repo.workdir, 'hello.txt'), 'w') as f:
f.write('new content')
# checkout index
self.assertTrue('hello.txt' in self.repo.status())
self.repo.checkout(strategy=pygit2.GIT_CHECKOUT_FORCE)
self.assertTrue('hello.txt' not in self.repo.status())
def test_checkout_head(self):
# some changes to the index
with open(os.path.join(self.repo.workdir, 'bye.txt'), 'w') as f:
f.write('new content')
self.repo.index.add('bye.txt')
# checkout from index should not change anything
self.assertTrue('bye.txt' in self.repo.status())
self.repo.checkout(strategy=pygit2.GIT_CHECKOUT_FORCE)
self.assertTrue('bye.txt' in self.repo.status())
# checkout from head will reset index as well
self.repo.checkout('HEAD', strategy=pygit2.GIT_CHECKOUT_FORCE)
self.assertTrue('bye.txt' not in self.repo.status())
def test_checkout_alternative_dir(self):
ref_i18n = self.repo.lookup_reference('refs/heads/i18n')
extra_dir = os.path.join(self.repo.workdir, 'extra-dir')
os.mkdir(extra_dir)
self.assertTrue(len(os.listdir(extra_dir)) == 0)
self.repo.checkout(ref_i18n, directory=extra_dir)
self.assertFalse(len(os.listdir(extra_dir)) == 0)
def test_merge_base(self):
commit = self.repo.merge_base(
'5ebeeebb320790caf276b9fc8b24546d63316533',
'4ec4389a8068641da2d6578db0419484972284c8')
self.assertEqual(commit.hex,
'acecd5ea2924a4b900e7e149496e1f4b57976e51')
# Create a commit without any merge base to any other
sig = pygit2.Signature("me", "[email protected]")
indep = self.repo.create_commit(None, sig, sig, "a new root commit",
self.repo[commit].peel(pygit2.Tree).id, [])
self.assertEqual(None, self.repo.merge_base(indep, commit))
def test_ahead_behind(self):
ahead, behind = self.repo.ahead_behind('5ebeeebb320790caf276b9fc8b24546d63316533',
'4ec4389a8068641da2d6578db0419484972284c8')
self.assertEqual(1, ahead)
self.assertEqual(2, behind)
ahead, behind = self.repo.ahead_behind('4ec4389a8068641da2d6578db0419484972284c8',
'5ebeeebb320790caf276b9fc8b24546d63316533')
self.assertEqual(2, ahead)
self.assertEqual(1, behind)
def test_reset_hard(self):
ref = "5ebeeebb320790caf276b9fc8b24546d63316533"
with open(os.path.join(self.repo.workdir, "hello.txt")) as f:
lines = f.readlines()
self.assertTrue("hola mundo\n" in lines)
self.assertTrue("bonjour le monde\n" in lines)
self.repo.reset(
ref,
pygit2.GIT_RESET_HARD)
self.assertEqual(self.repo.head.target.hex, ref)
with open(os.path.join(self.repo.workdir, "hello.txt")) as f:
lines = f.readlines()
#Hard reset will reset the working copy too
self.assertFalse("hola mundo\n" in lines)
self.assertFalse("bonjour le monde\n" in lines)
def test_reset_soft(self):
ref = "5ebeeebb320790caf276b9fc8b24546d63316533"
with open(os.path.join(self.repo.workdir, "hello.txt")) as f:
lines = f.readlines()
self.assertTrue("hola mundo\n" in lines)
self.assertTrue("bonjour le monde\n" in lines)
self.repo.reset(
ref,
pygit2.GIT_RESET_SOFT)
self.assertEqual(self.repo.head.target.hex, ref)
with open(os.path.join(self.repo.workdir, "hello.txt")) as f:
lines = f.readlines()
#Soft reset will not reset the working copy
self.assertTrue("hola mundo\n" in lines)
self.assertTrue("bonjour le monde\n" in lines)
#soft reset will keep changes in the index
diff = self.repo.diff(cached=True)
self.assertRaises(KeyError, lambda: diff[0])
def test_reset_mixed(self):
ref = "5ebeeebb320790caf276b9fc8b24546d63316533"
with open(os.path.join(self.repo.workdir, "hello.txt")) as f:
lines = f.readlines()
self.assertTrue("hola mundo\n" in lines)
self.assertTrue("bonjour le monde\n" in lines)
self.repo.reset(
ref,
pygit2.GIT_RESET_MIXED)
self.assertEqual(self.repo.head.target.hex, ref)
with open(os.path.join(self.repo.workdir, "hello.txt")) as f:
lines = f.readlines()
#mixed reset will not reset the working copy
self.assertTrue("hola mundo\n" in lines)
self.assertTrue("bonjour le monde\n" in lines)
#mixed reset will set the index to match working copy
diff = self.repo.diff(cached=True)
self.assertTrue("hola mundo\n" in diff.patch)
self.assertTrue("bonjour le monde\n" in diff.patch)
class RepositorySignatureTest(utils.RepoTestCase):
def test_default_signature(self):
config = self.repo.config
config['user.name'] = 'Random J Hacker'
config['user.email'] ='[email protected]'
sig = self.repo.default_signature
self.assertEqual('Random J Hacker', sig.name)
self.assertEqual('[email protected]', sig.email)
class NewRepositoryTest(utils.NoRepoTestCase):
def test_new_repo(self):
repo = init_repository(self._temp_dir, False)
oid = repo.write(GIT_OBJ_BLOB, "Test")
self.assertEqual(type(oid), Oid)
assert os.path.exists(os.path.join(self._temp_dir, '.git'))
class InitRepositoryTest(utils.NoRepoTestCase):
# under the assumption that repo.is_bare works
def test_no_arg(self):
repo = init_repository(self._temp_dir)
self.assertFalse(repo.is_bare)
def test_pos_arg_false(self):
repo = init_repository(self._temp_dir, False)
self.assertFalse(repo.is_bare)
def test_pos_arg_true(self):
repo = init_repository(self._temp_dir, True)
self.assertTrue(repo.is_bare)
def test_keyword_arg_false(self):
repo = init_repository(self._temp_dir, bare=False)
self.assertFalse(repo.is_bare)
def test_keyword_arg_true(self):
repo = init_repository(self._temp_dir, bare=True)
self.assertTrue(repo.is_bare)
class DiscoverRepositoryTest(utils.NoRepoTestCase):
def test_discover_repo(self):
repo = init_repository(self._temp_dir, False)
subdir = os.path.join(self._temp_dir, "test1", "test2")
os.makedirs(subdir)
self.assertEqual(repo.path, discover_repository(subdir))
class EmptyRepositoryTest(utils.EmptyRepoTestCase):
def test_is_empty(self):
self.assertTrue(self.repo.is_empty)
def test_is_base(self):
self.assertFalse(self.repo.is_bare)
def test_head(self):
self.assertTrue(self.repo.head_is_unborn)
self.assertFalse(self.repo.head_is_detached)
class CloneRepositoryTest(utils.NoRepoTestCase):
def test_clone_repository(self):
repo_path = "./test/data/testrepo.git/"
repo = clone_repository(repo_path, self._temp_dir)
self.assertFalse(repo.is_empty)
self.assertFalse(repo.is_bare)
def test_clone_bare_repository(self):
repo_path = "./test/data/testrepo.git/"
repo = clone_repository(repo_path, self._temp_dir, bare=True)
self.assertFalse(repo.is_empty)
self.assertTrue(repo.is_bare)
def test_clone_repository_and_remote_callbacks(self):
src_repo_relpath = "./test/data/testrepo.git/"
repo_path = os.path.join(self._temp_dir, "clone-into")
url = 'file://' + os.path.realpath(src_repo_relpath)
def create_repository(path, bare):
return init_repository(path, bare)
# here we override the name
def create_remote(repo, name, url):
return repo.remotes.create("custom_remote", url)
repo = clone_repository(url, repo_path, repository=create_repository, remote=create_remote)
self.assertFalse(repo.is_empty)
self.assertTrue('refs/remotes/custom_remote/master' in repo.listall_references())
self.assertIsNotNone(repo.remotes["custom_remote"])
def test_clone_with_credentials(self):
repo = clone_repository(
"https://bitbucket.org/libgit2/testgitrepository.git",
self._temp_dir, callbacks=pygit2.RemoteCallbacks(credentials=pygit2.UserPass("libgit2", "libgit2")))
self.assertFalse(repo.is_empty)
def test_clone_with_checkout_branch(self):
# create a test case which isolates the remote
test_repo = clone_repository('./test/data/testrepo.git',
os.path.join(self._temp_dir, 'testrepo-orig.git'),
bare=True)
test_repo.create_branch('test', test_repo[test_repo.head.target])
repo = clone_repository(test_repo.path,
os.path.join(self._temp_dir, 'testrepo.git'),
checkout_branch='test', bare=True)
self.assertEqual(repo.lookup_reference('HEAD').target, 'refs/heads/test')
# FIXME The tests below are commented because they are broken:
#
# - test_clone_push_url: Passes, but does nothing useful.
#
# - test_clone_fetch_spec: Segfaults because of a bug in libgit2 0.19,
# this has been fixed already, so wait for 0.20
#
# - test_clone_push_spec: Passes, but does nothing useful.
#
# def test_clone_push_url(self):
# repo_path = "./test/data/testrepo.git/"
# repo = clone_repository(
# repo_path, self._temp_dir, push_url="custom_push_url"
# )
# self.assertFalse(repo.is_empty)
# # FIXME: When pygit2 supports retrieving the pushurl parameter,
# # enable this test
# # self.assertEqual(repo.remotes[0].pushurl, "custom_push_url")
# def test_clone_fetch_spec(self):
# repo_path = "./test/data/testrepo.git/"
# repo = clone_repository(repo_path, self._temp_dir,
# fetch_spec="refs/heads/test")
# self.assertFalse(repo.is_empty)
# # FIXME: When pygit2 retrieve the fetchspec we passed to git clone.
# # fetchspec seems to be going through, but the Repository class is
# # not getting it.
# # self.assertEqual(repo.remotes[0].fetchspec, "refs/heads/test")
# def test_clone_push_spec(self):
# repo_path = "./test/data/testrepo.git/"
# repo = clone_repository(repo_path, self._temp_dir,
# push_spec="refs/heads/test")
# self.assertFalse(repo.is_empty)
# # FIXME: When pygit2 supports retrieving the pushspec parameter,
# # enable this test
# # not sure how to test this either... couldn't find pushspec
# # self.assertEqual(repo.remotes[0].fetchspec, "refs/heads/test")
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
joericearchitect/site-joe-rice-architect | devops/deployment/ansible/ec2-inventory/ec2.py | 12 | 63782 | #!/usr/bin/env python
'''
EC2 external inventory script
=================================
Generates inventory that Ansible can understand by making API request to
AWS EC2 using the Boto library.
NOTE: This script assumes Ansible is being executed where the environment
variables needed for Boto have already been set:
export AWS_ACCESS_KEY_ID='AK123'
export AWS_SECRET_ACCESS_KEY='abc123'
This script also assumes there is an ec2.ini file alongside it. To specify a
different path to ec2.ini, define the EC2_INI_PATH environment variable:
export EC2_INI_PATH=/path/to/my_ec2.ini
If you're using eucalyptus you need to set the above variables and
you need to define:
export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus
If you're using boto profiles (requires boto>=2.24.0) you can choose a profile
using the --boto-profile command line argument (e.g. ec2.py --boto-profile prod) or using
the AWS_PROFILE variable:
AWS_PROFILE=prod ansible-playbook -i ec2.py myplaybook.yml
For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html
When run against a specific host, this script returns the following variables:
- ec2_ami_launch_index
- ec2_architecture
- ec2_association
- ec2_attachTime
- ec2_attachment
- ec2_attachmentId
- ec2_block_devices
- ec2_client_token
- ec2_deleteOnTermination
- ec2_description
- ec2_deviceIndex
- ec2_dns_name
- ec2_eventsSet
- ec2_group_name
- ec2_hypervisor
- ec2_id
- ec2_image_id
- ec2_instanceState
- ec2_instance_type
- ec2_ipOwnerId
- ec2_ip_address
- ec2_item
- ec2_kernel
- ec2_key_name
- ec2_launch_time
- ec2_monitored
- ec2_monitoring
- ec2_networkInterfaceId
- ec2_ownerId
- ec2_persistent
- ec2_placement
- ec2_platform
- ec2_previous_state
- ec2_private_dns_name
- ec2_private_ip_address
- ec2_publicIp
- ec2_public_dns_name
- ec2_ramdisk
- ec2_reason
- ec2_region
- ec2_requester_id
- ec2_root_device_name
- ec2_root_device_type
- ec2_security_group_ids
- ec2_security_group_names
- ec2_shutdown_state
- ec2_sourceDestCheck
- ec2_spot_instance_request_id
- ec2_state
- ec2_state_code
- ec2_state_reason
- ec2_status
- ec2_subnet_id
- ec2_tenancy
- ec2_virtualization_type
- ec2_vpc_id
These variables are pulled out of a boto.ec2.instance object. There is a lack of
consistency with variable spellings (camelCase and underscores) since this
just loops through all variables the object exposes. It is preferred to use the
ones with underscores when multiple exist.
In addition, if an instance has AWS Tags associated with it, each tag is a new
variable named:
- ec2_tag_[Key] = [Value]
Security groups are comma-separated in 'ec2_security_group_ids' and
'ec2_security_group_names'.
'''
# (c) 2012, Peter Sankauskas
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
import sys
import os
import argparse
import re
from time import time
import boto
from boto import ec2
from boto import rds
from boto import elasticache
from boto import route53
import six
from ansible.module_utils import ec2 as ec2_utils
HAS_BOTO3 = False
try:
import boto3
HAS_BOTO3 = True
except ImportError:
pass
from six.moves import configparser
from collections import defaultdict
try:
import json
except ImportError:
import simplejson as json
class Ec2Inventory(object):
def _empty_inventory(self):
return {"_meta" : {"hostvars" : {}}}
def __init__(self):
''' Main execution path '''
# Inventory grouped by instance IDs, tags, security groups, regions,
# and availability zones
self.inventory = self._empty_inventory()
# Index of hostname (address) to instance ID
self.index = {}
# Boto profile to use (if any)
self.boto_profile = None
# AWS credentials.
self.credentials = {}
# Read settings and parse CLI arguments
self.parse_cli_args()
self.read_settings()
# Make sure that profile_name is not passed at all if not set
# as pre 2.24 boto will fall over otherwise
if self.boto_profile:
if not hasattr(boto.ec2.EC2Connection, 'profile_name'):
self.fail_with_error("boto version must be >= 2.24 to use profile")
# Cache
if self.args.refresh_cache:
self.do_api_calls_update_cache()
elif not self.is_cache_valid():
self.do_api_calls_update_cache()
# Data to print
if self.args.host:
data_to_print = self.get_host_info()
elif self.args.list:
# Display list of instances for inventory
if self.inventory == self._empty_inventory():
data_to_print = self.get_inventory_from_cache()
else:
data_to_print = self.json_format_dict(self.inventory, True)
print(data_to_print)
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_index):
return True
return False
def read_settings(self):
''' Reads the settings from the ec2.ini file '''
if six.PY3:
config = configparser.ConfigParser()
else:
config = configparser.SafeConfigParser()
ec2_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ec2.ini')
ec2_ini_path = os.path.expanduser(os.path.expandvars(os.environ.get('EC2_INI_PATH', ec2_default_ini_path)))
config.read(ec2_ini_path)
# is eucalyptus?
self.eucalyptus_host = None
self.eucalyptus = False
if config.has_option('ec2', 'eucalyptus'):
self.eucalyptus = config.getboolean('ec2', 'eucalyptus')
if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'):
self.eucalyptus_host = config.get('ec2', 'eucalyptus_host')
# Regions
self.regions = []
configRegions = config.get('ec2', 'regions')
configRegions_exclude = config.get('ec2', 'regions_exclude')
if (configRegions == 'all'):
if self.eucalyptus_host:
self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name, **self.credentials)
else:
for regionInfo in ec2.regions():
if regionInfo.name not in configRegions_exclude:
self.regions.append(regionInfo.name)
else:
self.regions = configRegions.split(",")
# Destination addresses
self.destination_variable = config.get('ec2', 'destination_variable')
self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable')
if config.has_option('ec2', 'hostname_variable'):
self.hostname_variable = config.get('ec2', 'hostname_variable')
else:
self.hostname_variable = None
if config.has_option('ec2', 'destination_format') and \
config.has_option('ec2', 'destination_format_tags'):
self.destination_format = config.get('ec2', 'destination_format')
self.destination_format_tags = config.get('ec2', 'destination_format_tags').split(',')
else:
self.destination_format = None
self.destination_format_tags = None
# Route53
self.route53_enabled = config.getboolean('ec2', 'route53')
self.route53_excluded_zones = []
if config.has_option('ec2', 'route53_excluded_zones'):
self.route53_excluded_zones.extend(
config.get('ec2', 'route53_excluded_zones', '').split(','))
# Include RDS instances?
self.rds_enabled = True
if config.has_option('ec2', 'rds'):
self.rds_enabled = config.getboolean('ec2', 'rds')
# Include RDS cluster instances?
if config.has_option('ec2', 'include_rds_clusters'):
self.include_rds_clusters = config.getboolean('ec2', 'include_rds_clusters')
else:
self.include_rds_clusters = False
# Include ElastiCache instances?
self.elasticache_enabled = True
if config.has_option('ec2', 'elasticache'):
self.elasticache_enabled = config.getboolean('ec2', 'elasticache')
# Return all EC2 instances?
if config.has_option('ec2', 'all_instances'):
self.all_instances = config.getboolean('ec2', 'all_instances')
else:
self.all_instances = False
# Instance states to be gathered in inventory. Default is 'running'.
# Setting 'all_instances' to 'yes' overrides this option.
ec2_valid_instance_states = [
'pending',
'running',
'shutting-down',
'terminated',
'stopping',
'stopped'
]
self.ec2_instance_states = []
if self.all_instances:
self.ec2_instance_states = ec2_valid_instance_states
elif config.has_option('ec2', 'instance_states'):
for instance_state in config.get('ec2', 'instance_states').split(','):
instance_state = instance_state.strip()
if instance_state not in ec2_valid_instance_states:
continue
self.ec2_instance_states.append(instance_state)
else:
self.ec2_instance_states = ['running']
# Return all RDS instances? (if RDS is enabled)
if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled:
self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances')
else:
self.all_rds_instances = False
# Return all ElastiCache replication groups? (if ElastiCache is enabled)
if config.has_option('ec2', 'all_elasticache_replication_groups') and self.elasticache_enabled:
self.all_elasticache_replication_groups = config.getboolean('ec2', 'all_elasticache_replication_groups')
else:
self.all_elasticache_replication_groups = False
# Return all ElastiCache clusters? (if ElastiCache is enabled)
if config.has_option('ec2', 'all_elasticache_clusters') and self.elasticache_enabled:
self.all_elasticache_clusters = config.getboolean('ec2', 'all_elasticache_clusters')
else:
self.all_elasticache_clusters = False
# Return all ElastiCache nodes? (if ElastiCache is enabled)
if config.has_option('ec2', 'all_elasticache_nodes') and self.elasticache_enabled:
self.all_elasticache_nodes = config.getboolean('ec2', 'all_elasticache_nodes')
else:
self.all_elasticache_nodes = False
# boto configuration profile (prefer CLI argument)
self.boto_profile = self.args.boto_profile
if config.has_option('ec2', 'boto_profile') and not self.boto_profile:
self.boto_profile = config.get('ec2', 'boto_profile')
# AWS credentials (prefer environment variables)
if not (self.boto_profile or os.environ.get('AWS_ACCESS_KEY_ID') or
os.environ.get('AWS_PROFILE')):
if config.has_option('credentials', 'aws_access_key_id'):
aws_access_key_id = config.get('credentials', 'aws_access_key_id')
else:
aws_access_key_id = None
if config.has_option('credentials', 'aws_secret_access_key'):
aws_secret_access_key = config.get('credentials', 'aws_secret_access_key')
else:
aws_secret_access_key = None
if config.has_option('credentials', 'aws_security_token'):
aws_security_token = config.get('credentials', 'aws_security_token')
else:
aws_security_token = None
if aws_access_key_id:
self.credentials = {
'aws_access_key_id': aws_access_key_id,
'aws_secret_access_key': aws_secret_access_key
}
if aws_security_token:
self.credentials['security_token'] = aws_security_token
# Cache related
cache_dir = os.path.expanduser(config.get('ec2', 'cache_path'))
if self.boto_profile:
cache_dir = os.path.join(cache_dir, 'profile_' + self.boto_profile)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
cache_name = 'ansible-ec2'
aws_profile = lambda: (self.boto_profile or
os.environ.get('AWS_PROFILE') or
os.environ.get('AWS_ACCESS_KEY_ID') or
self.credentials.get('aws_access_key_id', None))
if aws_profile():
cache_name = '%s-%s' % (cache_name, aws_profile())
self.cache_path_cache = cache_dir + "/%s.cache" % cache_name
self.cache_path_index = cache_dir + "/%s.index" % cache_name
self.cache_max_age = config.getint('ec2', 'cache_max_age')
if config.has_option('ec2', 'expand_csv_tags'):
self.expand_csv_tags = config.getboolean('ec2', 'expand_csv_tags')
else:
self.expand_csv_tags = False
# Configure nested groups instead of flat namespace.
if config.has_option('ec2', 'nested_groups'):
self.nested_groups = config.getboolean('ec2', 'nested_groups')
else:
self.nested_groups = False
# Replace dash or not in group names
if config.has_option('ec2', 'replace_dash_in_groups'):
self.replace_dash_in_groups = config.getboolean('ec2', 'replace_dash_in_groups')
else:
self.replace_dash_in_groups = True
# Configure which groups should be created.
group_by_options = [
'group_by_instance_id',
'group_by_region',
'group_by_availability_zone',
'group_by_ami_id',
'group_by_instance_type',
'group_by_key_pair',
'group_by_vpc_id',
'group_by_security_group',
'group_by_tag_keys',
'group_by_tag_none',
'group_by_route53_names',
'group_by_rds_engine',
'group_by_rds_parameter_group',
'group_by_elasticache_engine',
'group_by_elasticache_cluster',
'group_by_elasticache_parameter_group',
'group_by_elasticache_replication_group',
]
for option in group_by_options:
if config.has_option('ec2', option):
setattr(self, option, config.getboolean('ec2', option))
else:
setattr(self, option, True)
# Do we need to just include hosts that match a pattern?
try:
pattern_include = config.get('ec2', 'pattern_include')
if pattern_include and len(pattern_include) > 0:
self.pattern_include = re.compile(pattern_include)
else:
self.pattern_include = None
except configparser.NoOptionError:
self.pattern_include = None
# Do we need to exclude hosts that match a pattern?
try:
pattern_exclude = config.get('ec2', 'pattern_exclude');
if pattern_exclude and len(pattern_exclude) > 0:
self.pattern_exclude = re.compile(pattern_exclude)
else:
self.pattern_exclude = None
except configparser.NoOptionError:
self.pattern_exclude = None
# Instance filters (see boto and EC2 API docs). Ignore invalid filters.
self.ec2_instance_filters = defaultdict(list)
if config.has_option('ec2', 'instance_filters'):
filters = [f for f in config.get('ec2', 'instance_filters').split(',') if f]
for instance_filter in filters:
instance_filter = instance_filter.strip()
if not instance_filter or '=' not in instance_filter:
continue
filter_key, filter_value = [x.strip() for x in instance_filter.split('=', 1)]
if not filter_key:
continue
self.ec2_instance_filters[filter_key].append(filter_value)
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)')
parser.add_argument('--profile', '--boto-profile', action='store', dest='boto_profile',
help='Use boto profile for connections to EC2')
self.args = parser.parse_args()
def do_api_calls_update_cache(self):
''' Do API calls to each region, and save data in cache files '''
if self.route53_enabled:
self.get_route53_records()
for region in self.regions:
self.get_instances_by_region(region)
if self.rds_enabled:
self.get_rds_instances_by_region(region)
if self.elasticache_enabled:
self.get_elasticache_clusters_by_region(region)
self.get_elasticache_replication_groups_by_region(region)
if self.include_rds_clusters:
self.include_rds_clusters_by_region(region)
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
def connect(self, region):
''' create connection to api server'''
if self.eucalyptus:
conn = boto.connect_euca(host=self.eucalyptus_host, **self.credentials)
conn.APIVersion = '2010-08-31'
else:
conn = self.connect_to_aws(ec2, region)
return conn
def boto_fix_security_token_in_profile(self, connect_args):
''' monkey patch for boto issue boto/boto#2100 '''
profile = 'profile ' + self.boto_profile
if boto.config.has_option(profile, 'aws_security_token'):
connect_args['security_token'] = boto.config.get(profile, 'aws_security_token')
return connect_args
def connect_to_aws(self, module, region):
connect_args = self.credentials
# only pass the profile name if it's set (as it is not supported by older boto versions)
if self.boto_profile:
connect_args['profile_name'] = self.boto_profile
self.boto_fix_security_token_in_profile(connect_args)
conn = module.connect_to_region(region, **connect_args)
# connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
if conn is None:
self.fail_with_error("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
return conn
def get_instances_by_region(self, region):
''' Makes an AWS EC2 API call to the list of instances in a particular
region '''
try:
conn = self.connect(region)
reservations = []
if self.ec2_instance_filters:
for filter_key, filter_values in self.ec2_instance_filters.items():
reservations.extend(conn.get_all_instances(filters = { filter_key : filter_values }))
else:
reservations = conn.get_all_instances()
# Pull the tags back in a second step
# AWS are on record as saying that the tags fetched in the first `get_all_instances` request are not
# reliable and may be missing, and the only way to guarantee they are there is by calling `get_all_tags`
instance_ids = []
for reservation in reservations:
instance_ids.extend([instance.id for instance in reservation.instances])
max_filter_value = 199
tags = []
for i in range(0, len(instance_ids), max_filter_value):
tags.extend(conn.get_all_tags(filters={'resource-type': 'instance', 'resource-id': instance_ids[i:i+max_filter_value]}))
tags_by_instance_id = defaultdict(dict)
for tag in tags:
tags_by_instance_id[tag.res_id][tag.name] = tag.value
for reservation in reservations:
for instance in reservation.instances:
instance.tags = tags_by_instance_id[instance.id]
self.add_instance(instance, region)
except boto.exception.BotoServerError as e:
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
else:
backend = 'Eucalyptus' if self.eucalyptus else 'AWS'
error = "Error connecting to %s backend.\n%s" % (backend, e.message)
self.fail_with_error(error, 'getting EC2 instances')
def get_rds_instances_by_region(self, region):
''' Makes an AWS API call to the list of RDS instances in a particular
region '''
try:
conn = self.connect_to_aws(rds, region)
if conn:
marker = None
while True:
instances = conn.get_all_dbinstances(marker=marker)
marker = instances.marker
for instance in instances:
self.add_rds_instance(instance, region)
if not marker:
break
except boto.exception.BotoServerError as e:
error = e.reason
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
if not e.reason == "Forbidden":
error = "Looks like AWS RDS is down:\n%s" % e.message
self.fail_with_error(error, 'getting RDS instances')
def include_rds_clusters_by_region(self, region):
if not HAS_BOTO3:
self.fail_with_error("Working with RDS clusters requires boto3 - please install boto3 and try again",
"getting RDS clusters")
client = ec2_utils.boto3_inventory_conn('client', 'rds', region, **self.credentials)
marker, clusters = '', []
while marker is not None:
resp = client.describe_db_clusters(Marker=marker)
clusters.extend(resp["DBClusters"])
marker = resp.get('Marker', None)
account_id = boto.connect_iam().get_user().arn.split(':')[4]
c_dict = {}
for c in clusters:
# remove these datetime objects as there is no serialisation to json
# currently in place and we don't need the data yet
if 'EarliestRestorableTime' in c:
del c['EarliestRestorableTime']
if 'LatestRestorableTime' in c:
del c['LatestRestorableTime']
if self.ec2_instance_filters == {}:
matches_filter = True
else:
matches_filter = False
try:
# arn:aws:rds:<region>:<account number>:<resourcetype>:<name>
tags = client.list_tags_for_resource(
ResourceName='arn:aws:rds:' + region + ':' + account_id + ':cluster:' + c['DBClusterIdentifier'])
c['Tags'] = tags['TagList']
if self.ec2_instance_filters:
for filter_key, filter_values in self.ec2_instance_filters.items():
# get AWS tag key e.g. tag:env will be 'env'
tag_name = filter_key.split(":", 1)[1]
# Filter values is a list (if you put multiple values for the same tag name)
matches_filter = any(d['Key'] == tag_name and d['Value'] in filter_values for d in c['Tags'])
if matches_filter:
# it matches a filter, so stop looking for further matches
break
except Exception as e:
if e.message.find('DBInstanceNotFound') >= 0:
# AWS RDS bug (2016-01-06) means deletion does not fully complete and leave an 'empty' cluster.
# Ignore errors when trying to find tags for these
pass
# ignore empty clusters caused by AWS bug
if len(c['DBClusterMembers']) == 0:
continue
elif matches_filter:
c_dict[c['DBClusterIdentifier']] = c
self.inventory['db_clusters'] = c_dict
def get_elasticache_clusters_by_region(self, region):
''' Makes an AWS API call to the list of ElastiCache clusters (with
nodes' info) in a particular region.'''
# ElastiCache boto module doesn't provide a get_all_intances method,
# that's why we need to call describe directly (it would be called by
# the shorthand method anyway...)
try:
conn = self.connect_to_aws(elasticache, region)
if conn:
# show_cache_node_info = True
# because we also want nodes' information
response = conn.describe_cache_clusters(None, None, None, True)
except boto.exception.BotoServerError as e:
error = e.reason
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
if not e.reason == "Forbidden":
error = "Looks like AWS ElastiCache is down:\n%s" % e.message
self.fail_with_error(error, 'getting ElastiCache clusters')
try:
# Boto also doesn't provide wrapper classes to CacheClusters or
# CacheNodes. Because of that we can't make use of the get_list
# method in the AWSQueryConnection. Let's do the work manually
clusters = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters']
except KeyError as e:
error = "ElastiCache query to AWS failed (unexpected format)."
self.fail_with_error(error, 'getting ElastiCache clusters')
for cluster in clusters:
self.add_elasticache_cluster(cluster, region)
def get_elasticache_replication_groups_by_region(self, region):
''' Makes an AWS API call to the list of ElastiCache replication groups
in a particular region.'''
# ElastiCache boto module doesn't provide a get_all_intances method,
# that's why we need to call describe directly (it would be called by
# the shorthand method anyway...)
try:
conn = self.connect_to_aws(elasticache, region)
if conn:
response = conn.describe_replication_groups()
except boto.exception.BotoServerError as e:
error = e.reason
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
if not e.reason == "Forbidden":
error = "Looks like AWS ElastiCache [Replication Groups] is down:\n%s" % e.message
self.fail_with_error(error, 'getting ElastiCache clusters')
try:
# Boto also doesn't provide wrapper classes to ReplicationGroups
# Because of that we can't make use of the get_list method in the
# AWSQueryConnection. Let's do the work manually
replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups']
except KeyError as e:
error = "ElastiCache [Replication Groups] query to AWS failed (unexpected format)."
self.fail_with_error(error, 'getting ElastiCache clusters')
for replication_group in replication_groups:
self.add_elasticache_replication_group(replication_group, region)
def get_auth_error_message(self):
''' create an informative error message if there is an issue authenticating'''
errors = ["Authentication error retrieving ec2 inventory."]
if None in [os.environ.get('AWS_ACCESS_KEY_ID'), os.environ.get('AWS_SECRET_ACCESS_KEY')]:
errors.append(' - No AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY environment vars found')
else:
errors.append(' - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment vars found but may not be correct')
boto_paths = ['/etc/boto.cfg', '~/.boto', '~/.aws/credentials']
boto_config_found = list(p for p in boto_paths if os.path.isfile(os.path.expanduser(p)))
if len(boto_config_found) > 0:
errors.append(" - Boto configs found at '%s', but the credentials contained may not be correct" % ', '.join(boto_config_found))
else:
errors.append(" - No Boto config found at any expected location '%s'" % ', '.join(boto_paths))
return '\n'.join(errors)
def fail_with_error(self, err_msg, err_operation=None):
'''log an error to std err for ansible-playbook to consume and exit'''
if err_operation:
err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format(
err_msg=err_msg, err_operation=err_operation)
sys.stderr.write(err_msg)
sys.exit(1)
def get_instance(self, region, instance_id):
conn = self.connect(region)
reservations = conn.get_all_instances([instance_id])
for reservation in reservations:
for instance in reservation.instances:
return instance
def add_instance(self, instance, region):
''' Adds an instance to the inventory and index, as long as it is
addressable '''
# Only return instances with desired instance states
if instance.state not in self.ec2_instance_states:
return
# Select the best destination address
if self.destination_format and self.destination_format_tags:
dest = self.destination_format.format(*[ getattr(instance, 'tags').get(tag, '') for tag in self.destination_format_tags ])
elif instance.subnet_id:
dest = getattr(instance, self.vpc_destination_variable, None)
if dest is None:
dest = getattr(instance, 'tags').get(self.vpc_destination_variable, None)
else:
dest = getattr(instance, self.destination_variable, None)
if dest is None:
dest = getattr(instance, 'tags').get(self.destination_variable, None)
if not dest:
# Skip instances we cannot address (e.g. private VPC subnet)
return
# Set the inventory name
hostname = None
if self.hostname_variable:
if self.hostname_variable.startswith('tag_'):
hostname = instance.tags.get(self.hostname_variable[4:], None)
else:
hostname = getattr(instance, self.hostname_variable)
# If we can't get a nice hostname, use the destination address
if not hostname:
hostname = dest
else:
hostname = self.to_safe(hostname).lower()
# if we only want to include hosts that match a pattern, skip those that don't
if self.pattern_include and not self.pattern_include.match(hostname):
return
# if we need to exclude hosts that match a pattern, skip those
if self.pattern_exclude and self.pattern_exclude.match(hostname):
return
# Add to index
self.index[hostname] = [region, instance.id]
# Inventory: Group by instance ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[instance.id] = [hostname]
if self.nested_groups:
self.push_group(self.inventory, 'instances', instance.id)
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone:
self.push(self.inventory, instance.placement, hostname)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, instance.placement)
self.push_group(self.inventory, 'zones', instance.placement)
# Inventory: Group by Amazon Machine Image (AMI) ID
if self.group_by_ami_id:
ami_id = self.to_safe(instance.image_id)
self.push(self.inventory, ami_id, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'images', ami_id)
# Inventory: Group by instance type
if self.group_by_instance_type:
type_name = self.to_safe('type_' + instance.instance_type)
self.push(self.inventory, type_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by key pair
if self.group_by_key_pair and instance.key_name:
key_name = self.to_safe('key_' + instance.key_name)
self.push(self.inventory, key_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'keys', key_name)
# Inventory: Group by VPC
if self.group_by_vpc_id and instance.vpc_id:
vpc_id_name = self.to_safe('vpc_id_' + instance.vpc_id)
self.push(self.inventory, vpc_id_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'vpcs', vpc_id_name)
# Inventory: Group by security group
if self.group_by_security_group:
try:
for group in instance.groups:
key = self.to_safe("security_group_" + group.name)
self.push(self.inventory, key, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
except AttributeError:
self.fail_with_error('\n'.join(['Package boto seems a bit older.',
'Please upgrade boto >= 2.3.0.']))
# Inventory: Group by tag keys
if self.group_by_tag_keys:
for k, v in instance.tags.items():
if self.expand_csv_tags and v and ',' in v:
values = map(lambda x: x.strip(), v.split(','))
else:
values = [v]
for v in values:
if v:
key = self.to_safe("tag_" + k + "=" + v)
else:
key = self.to_safe("tag_" + k)
self.push(self.inventory, key, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k))
if v:
self.push_group(self.inventory, self.to_safe("tag_" + k), key)
# Inventory: Group by Route53 domain names if enabled
if self.route53_enabled and self.group_by_route53_names:
route53_names = self.get_instance_route53_names(instance)
for name in route53_names:
self.push(self.inventory, name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'route53', name)
# Global Tag: instances without tags
if self.group_by_tag_none and len(instance.tags) == 0:
self.push(self.inventory, 'tag_none', hostname)
if self.nested_groups:
self.push_group(self.inventory, 'tags', 'tag_none')
# Global Tag: tag all EC2 instances
self.push(self.inventory, 'ec2', hostname)
self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance)
self.inventory["_meta"]["hostvars"][hostname]['ansible_ssh_host'] = dest
def add_rds_instance(self, instance, region):
''' Adds an RDS instance to the inventory and index, as long as it is
addressable '''
# Only want available instances unless all_rds_instances is True
if not self.all_rds_instances and instance.status != 'available':
return
# Select the best destination address
dest = instance.endpoint[0]
if not dest:
# Skip instances we cannot address (e.g. private VPC subnet)
return
# Set the inventory name
hostname = None
if self.hostname_variable:
if self.hostname_variable.startswith('tag_'):
hostname = instance.tags.get(self.hostname_variable[4:], None)
else:
hostname = getattr(instance, self.hostname_variable)
# If we can't get a nice hostname, use the destination address
if not hostname:
hostname = dest
hostname = self.to_safe(hostname).lower()
# Add to index
self.index[hostname] = [region, instance.id]
# Inventory: Group by instance ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[instance.id] = [hostname]
if self.nested_groups:
self.push_group(self.inventory, 'instances', instance.id)
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone:
self.push(self.inventory, instance.availability_zone, hostname)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, instance.availability_zone)
self.push_group(self.inventory, 'zones', instance.availability_zone)
# Inventory: Group by instance type
if self.group_by_instance_type:
type_name = self.to_safe('type_' + instance.instance_class)
self.push(self.inventory, type_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by VPC
if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id:
vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id)
self.push(self.inventory, vpc_id_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'vpcs', vpc_id_name)
# Inventory: Group by security group
if self.group_by_security_group:
try:
if instance.security_group:
key = self.to_safe("security_group_" + instance.security_group.name)
self.push(self.inventory, key, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
except AttributeError:
self.fail_with_error('\n'.join(['Package boto seems a bit older.',
'Please upgrade boto >= 2.3.0.']))
# Inventory: Group by engine
if self.group_by_rds_engine:
self.push(self.inventory, self.to_safe("rds_" + instance.engine), hostname)
if self.nested_groups:
self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine))
# Inventory: Group by parameter group
if self.group_by_rds_parameter_group:
self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), hostname)
if self.nested_groups:
self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name))
# Global Tag: all RDS instances
self.push(self.inventory, 'rds', hostname)
self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance)
self.inventory["_meta"]["hostvars"][hostname]['ansible_ssh_host'] = dest
def add_elasticache_cluster(self, cluster, region):
''' Adds an ElastiCache cluster to the inventory and index, as long as
it's nodes are addressable '''
# Only want available clusters unless all_elasticache_clusters is True
if not self.all_elasticache_clusters and cluster['CacheClusterStatus'] != 'available':
return
# Select the best destination address
if 'ConfigurationEndpoint' in cluster and cluster['ConfigurationEndpoint']:
# Memcached cluster
dest = cluster['ConfigurationEndpoint']['Address']
is_redis = False
else:
# Redis sigle node cluster
# Because all Redis clusters are single nodes, we'll merge the
# info from the cluster with info about the node
dest = cluster['CacheNodes'][0]['Endpoint']['Address']
is_redis = True
if not dest:
# Skip clusters we cannot address (e.g. private VPC subnet)
return
# Add to index
self.index[dest] = [region, cluster['CacheClusterId']]
# Inventory: Group by instance ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[cluster['CacheClusterId']] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', cluster['CacheClusterId'])
# Inventory: Group by region
if self.group_by_region and not is_redis:
self.push(self.inventory, region, dest)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone and not is_redis:
self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])
self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])
# Inventory: Group by node type
if self.group_by_instance_type and not is_redis:
type_name = self.to_safe('type_' + cluster['CacheNodeType'])
self.push(self.inventory, type_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by VPC (information not available in the current
# AWS API version for ElastiCache)
# Inventory: Group by security group
if self.group_by_security_group and not is_redis:
# Check for the existence of the 'SecurityGroups' key and also if
# this key has some value. When the cluster is not placed in a SG
# the query can return None here and cause an error.
if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:
for security_group in cluster['SecurityGroups']:
key = self.to_safe("security_group_" + security_group['SecurityGroupId'])
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
# Inventory: Group by engine
if self.group_by_elasticache_engine and not is_redis:
self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_engines', self.to_safe(cluster['Engine']))
# Inventory: Group by parameter group
if self.group_by_elasticache_parameter_group:
self.push(self.inventory, self.to_safe("elasticache_parameter_group_" + cluster['CacheParameterGroup']['CacheParameterGroupName']), dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_parameter_groups', self.to_safe(cluster['CacheParameterGroup']['CacheParameterGroupName']))
# Inventory: Group by replication group
if self.group_by_elasticache_replication_group and 'ReplicationGroupId' in cluster and cluster['ReplicationGroupId']:
self.push(self.inventory, self.to_safe("elasticache_replication_group_" + cluster['ReplicationGroupId']), dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_replication_groups', self.to_safe(cluster['ReplicationGroupId']))
# Global Tag: all ElastiCache clusters
self.push(self.inventory, 'elasticache_clusters', cluster['CacheClusterId'])
host_info = self.get_host_info_dict_from_describe_dict(cluster)
self.inventory["_meta"]["hostvars"][dest] = host_info
# Add the nodes
for node in cluster['CacheNodes']:
self.add_elasticache_node(node, cluster, region)
def add_elasticache_node(self, node, cluster, region):
''' Adds an ElastiCache node to the inventory and index, as long as
it is addressable '''
# Only want available nodes unless all_elasticache_nodes is True
if not self.all_elasticache_nodes and node['CacheNodeStatus'] != 'available':
return
# Select the best destination address
dest = node['Endpoint']['Address']
if not dest:
# Skip nodes we cannot address (e.g. private VPC subnet)
return
node_id = self.to_safe(cluster['CacheClusterId'] + '_' + node['CacheNodeId'])
# Add to index
self.index[dest] = [region, node_id]
# Inventory: Group by node ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[node_id] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', node_id)
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, dest)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone:
self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])
self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])
# Inventory: Group by node type
if self.group_by_instance_type:
type_name = self.to_safe('type_' + cluster['CacheNodeType'])
self.push(self.inventory, type_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by VPC (information not available in the current
# AWS API version for ElastiCache)
# Inventory: Group by security group
if self.group_by_security_group:
# Check for the existence of the 'SecurityGroups' key and also if
# this key has some value. When the cluster is not placed in a SG
# the query can return None here and cause an error.
if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:
for security_group in cluster['SecurityGroups']:
key = self.to_safe("security_group_" + security_group['SecurityGroupId'])
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
# Inventory: Group by engine
if self.group_by_elasticache_engine:
self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_engines', self.to_safe("elasticache_" + cluster['Engine']))
# Inventory: Group by parameter group (done at cluster level)
# Inventory: Group by replication group (done at cluster level)
# Inventory: Group by ElastiCache Cluster
if self.group_by_elasticache_cluster:
self.push(self.inventory, self.to_safe("elasticache_cluster_" + cluster['CacheClusterId']), dest)
# Global Tag: all ElastiCache nodes
self.push(self.inventory, 'elasticache_nodes', dest)
host_info = self.get_host_info_dict_from_describe_dict(node)
if dest in self.inventory["_meta"]["hostvars"]:
self.inventory["_meta"]["hostvars"][dest].update(host_info)
else:
self.inventory["_meta"]["hostvars"][dest] = host_info
def add_elasticache_replication_group(self, replication_group, region):
''' Adds an ElastiCache replication group to the inventory and index '''
# Only want available clusters unless all_elasticache_replication_groups is True
if not self.all_elasticache_replication_groups and replication_group['Status'] != 'available':
return
# Select the best destination address (PrimaryEndpoint)
dest = replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address']
if not dest:
# Skip clusters we cannot address (e.g. private VPC subnet)
return
# Add to index
self.index[dest] = [region, replication_group['ReplicationGroupId']]
# Inventory: Group by ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[replication_group['ReplicationGroupId']] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', replication_group['ReplicationGroupId'])
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, dest)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone (doesn't apply to replication groups)
# Inventory: Group by node type (doesn't apply to replication groups)
# Inventory: Group by VPC (information not available in the current
# AWS API version for replication groups
# Inventory: Group by security group (doesn't apply to replication groups)
# Check this value in cluster level
# Inventory: Group by engine (replication groups are always Redis)
if self.group_by_elasticache_engine:
self.push(self.inventory, 'elasticache_redis', dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_engines', 'redis')
# Global Tag: all ElastiCache clusters
self.push(self.inventory, 'elasticache_replication_groups', replication_group['ReplicationGroupId'])
host_info = self.get_host_info_dict_from_describe_dict(replication_group)
self.inventory["_meta"]["hostvars"][dest] = host_info
def get_route53_records(self):
''' Get and store the map of resource records to domain names that
point to them. '''
r53_conn = route53.Route53Connection()
all_zones = r53_conn.get_zones()
route53_zones = [ zone for zone in all_zones if zone.name[:-1]
not in self.route53_excluded_zones ]
self.route53_records = {}
for zone in route53_zones:
rrsets = r53_conn.get_all_rrsets(zone.id)
for record_set in rrsets:
record_name = record_set.name
if record_name.endswith('.'):
record_name = record_name[:-1]
for resource in record_set.resource_records:
self.route53_records.setdefault(resource, set())
self.route53_records[resource].add(record_name)
def get_instance_route53_names(self, instance):
''' Check if an instance is referenced in the records we have from
Route53. If it is, return the list of domain names pointing to said
instance. If nothing points to it, return an empty list. '''
instance_attributes = [ 'public_dns_name', 'private_dns_name',
'ip_address', 'private_ip_address' ]
name_list = set()
for attrib in instance_attributes:
try:
value = getattr(instance, attrib)
except AttributeError:
continue
if value in self.route53_records:
name_list.update(self.route53_records[value])
return list(name_list)
def get_host_info_dict_from_instance(self, instance):
instance_vars = {}
for key in vars(instance):
value = getattr(instance, key)
key = self.to_safe('ec2_' + key)
# Handle complex types
# state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518
if key == 'ec2__state':
instance_vars['ec2_state'] = instance.state or ''
instance_vars['ec2_state_code'] = instance.state_code
elif key == 'ec2__previous_state':
instance_vars['ec2_previous_state'] = instance.previous_state or ''
instance_vars['ec2_previous_state_code'] = instance.previous_state_code
elif type(value) in [int, bool]:
instance_vars[key] = value
elif isinstance(value, six.string_types):
instance_vars[key] = value.strip()
elif type(value) == type(None):
instance_vars[key] = ''
elif key == 'ec2_region':
instance_vars[key] = value.name
elif key == 'ec2__placement':
instance_vars['ec2_placement'] = value.zone
elif key == 'ec2_tags':
for k, v in value.items():
if self.expand_csv_tags and ',' in v:
v = list(map(lambda x: x.strip(), v.split(',')))
key = self.to_safe('ec2_tag_' + k)
instance_vars[key] = v
elif key == 'ec2_groups':
group_ids = []
group_names = []
for group in value:
group_ids.append(group.id)
group_names.append(group.name)
instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids])
instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names])
elif key == 'ec2_block_device_mapping':
instance_vars["ec2_block_devices"] = {}
for k, v in value.items():
instance_vars["ec2_block_devices"][ os.path.basename(k) ] = v.volume_id
else:
pass
# TODO Product codes if someone finds them useful
#print key
#print type(value)
#print value
return instance_vars
def get_host_info_dict_from_describe_dict(self, describe_dict):
''' Parses the dictionary returned by the API call into a flat list
of parameters. This method should be used only when 'describe' is
used directly because Boto doesn't provide specific classes. '''
# I really don't agree with prefixing everything with 'ec2'
# because EC2, RDS and ElastiCache are different services.
# I'm just following the pattern used until now to not break any
# compatibility.
host_info = {}
for key in describe_dict:
value = describe_dict[key]
key = self.to_safe('ec2_' + self.uncammelize(key))
# Handle complex types
# Target: Memcached Cache Clusters
if key == 'ec2_configuration_endpoint' and value:
host_info['ec2_configuration_endpoint_address'] = value['Address']
host_info['ec2_configuration_endpoint_port'] = value['Port']
# Target: Cache Nodes and Redis Cache Clusters (single node)
if key == 'ec2_endpoint' and value:
host_info['ec2_endpoint_address'] = value['Address']
host_info['ec2_endpoint_port'] = value['Port']
# Target: Redis Replication Groups
if key == 'ec2_node_groups' and value:
host_info['ec2_endpoint_address'] = value[0]['PrimaryEndpoint']['Address']
host_info['ec2_endpoint_port'] = value[0]['PrimaryEndpoint']['Port']
replica_count = 0
for node in value[0]['NodeGroupMembers']:
if node['CurrentRole'] == 'primary':
host_info['ec2_primary_cluster_address'] = node['ReadEndpoint']['Address']
host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port']
host_info['ec2_primary_cluster_id'] = node['CacheClusterId']
elif node['CurrentRole'] == 'replica':
host_info['ec2_replica_cluster_address_'+ str(replica_count)] = node['ReadEndpoint']['Address']
host_info['ec2_replica_cluster_port_'+ str(replica_count)] = node['ReadEndpoint']['Port']
host_info['ec2_replica_cluster_id_'+ str(replica_count)] = node['CacheClusterId']
replica_count += 1
# Target: Redis Replication Groups
if key == 'ec2_member_clusters' and value:
host_info['ec2_member_clusters'] = ','.join([str(i) for i in value])
# Target: All Cache Clusters
elif key == 'ec2_cache_parameter_group':
host_info["ec2_cache_node_ids_to_reboot"] = ','.join([str(i) for i in value['CacheNodeIdsToReboot']])
host_info['ec2_cache_parameter_group_name'] = value['CacheParameterGroupName']
host_info['ec2_cache_parameter_apply_status'] = value['ParameterApplyStatus']
# Target: Almost everything
elif key == 'ec2_security_groups':
# Skip if SecurityGroups is None
# (it is possible to have the key defined but no value in it).
if value is not None:
sg_ids = []
for sg in value:
sg_ids.append(sg['SecurityGroupId'])
host_info["ec2_security_group_ids"] = ','.join([str(i) for i in sg_ids])
# Target: Everything
# Preserve booleans and integers
elif type(value) in [int, bool]:
host_info[key] = value
# Target: Everything
# Sanitize string values
elif isinstance(value, six.string_types):
host_info[key] = value.strip()
# Target: Everything
# Replace None by an empty string
elif type(value) == type(None):
host_info[key] = ''
else:
# Remove non-processed complex types
pass
return host_info
def get_host_info(self):
''' Get variables about a specific host '''
if len(self.index) == 0:
# Need to load index from cache
self.load_index_from_cache()
if not self.args.host in self.index:
# try updating the cache
self.do_api_calls_update_cache()
if not self.args.host in self.index:
# host might not exist anymore
return self.json_format_dict({}, True)
(region, instance_id) = self.index[self.args.host]
instance = self.get_instance(region, instance_id)
return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True)
def push(self, my_dict, key, element):
''' Push an element onto an array that may not have been defined in
the dict '''
group_info = my_dict.setdefault(key, [])
if isinstance(group_info, dict):
host_list = group_info.setdefault('hosts', [])
host_list.append(element)
else:
group_info.append(element)
def push_group(self, my_dict, key, element):
''' Push a group as a child of another group. '''
parent_group = my_dict.setdefault(key, {})
if not isinstance(parent_group, dict):
parent_group = my_dict[key] = {'hosts': parent_group}
child_groups = parent_group.setdefault('children', [])
if element not in child_groups:
child_groups.append(element)
def get_inventory_from_cache(self):
''' Reads the inventory from the cache file and returns it as a JSON
object '''
cache = open(self.cache_path_cache, 'r')
json_inventory = cache.read()
return json_inventory
def load_index_from_cache(self):
''' Reads the index from the cache file sets self.index '''
cache = open(self.cache_path_index, 'r')
json_index = cache.read()
self.index = json.loads(json_index)
def write_to_cache(self, data, filename):
''' Writes data in JSON format to a file '''
json_data = self.json_format_dict(data, True)
cache = open(filename, 'w')
cache.write(json_data)
cache.close()
def uncammelize(self, key):
temp = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', key)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp).lower()
def to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
regex = "[^A-Za-z0-9\_"
if not self.replace_dash_in_groups:
regex += "\-"
return re.sub(regex + "]", "_", word)
def json_format_dict(self, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
# Run the script
Ec2Inventory()
| apache-2.0 |
techtonik/readthedocs.org | readthedocs/vcs_support/base.py | 34 | 4583 | import logging
import os
import shutil
import subprocess
from collections import namedtuple
from os.path import basename
log = logging.getLogger(__name__)
class VCSVersion(object):
"""
Represents a Version (tag or branch) in a VCS.
This class should only be instantiated in BaseVCS subclasses.
It can act as a context manager to temporarily switch to this tag (eg to
build docs for this tag).
"""
def __init__(self, repository, identifier, verbose_name):
self.repository = repository
self.identifier = identifier
self.verbose_name = verbose_name
def __repr__(self):
return "<VCSVersion: %s:%s" % (self.repository.repo_url,
self.verbose_name)
class VCSProject(namedtuple("VCSProject",
"name default_branch working_dir repo_url")):
"""Transient object to encapsulate a projects stuff"""
pass
class BaseCLI(object):
"""
Helper class for CLI-heavy classes.
"""
log_tmpl = u'VCS[{name}:{ident}]: {args}'
def __call__(self, *args):
return self.run(args)
def run(self, *args):
"""
:param bits: list of command and args. See `subprocess` docs
"""
process = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.working_dir, shell=False,
env=self.env)
try:
log.info(self.log_tmpl.format(ident=basename(self.working_dir),
name=self.name,
args=' '.join(args)))
except UnicodeDecodeError:
# >:x
pass
stdout, stderr = process.communicate()
try:
log.info(self.log_tmpl.format(ident=basename(self.working_dir),
name=self.name,
args=stdout))
except UnicodeDecodeError:
# >:x
pass
return (process.returncode, stdout, stderr)
@property
def env(self):
return os.environ.copy()
class BaseVCS(BaseCLI):
"""
Base for VCS Classes.
Built on top of the BaseCLI.
"""
supports_tags = False # Whether this VCS supports tags or not.
supports_branches = False # Whether this VCS supports branches or not.
# =========================================================================
# General methods
# =========================================================================
def __init__(self, project, version, **kwargs):
self.default_branch = project.default_branch
self.name = project.name
self.repo_url = project.repo_url
self.working_dir = project.working_dir
def check_working_dir(self):
if not os.path.exists(self.working_dir):
os.makedirs(self.working_dir)
def make_clean_working_dir(self):
"Ensures that the working dir exists and is empty"
shutil.rmtree(self.working_dir, ignore_errors=True)
self.check_working_dir()
def update(self):
"""
If self.working_dir is already a valid local copy of the repository,
update the repository, else create a new local copy of the repository.
"""
self.check_working_dir()
# =========================================================================
# Tag / Branch related methods
# These methods only apply if supports_tags = True and/or
# support_branches = True
# =========================================================================
@property
def tags(self):
"""
Returns a list of VCSVersion objects. See VCSVersion for more
information.
"""
raise NotImplementedError
@property
def branches(self):
"""
Returns a list of VCSVersion objects. See VCSVersion for more
information.
"""
raise NotImplementedError
@property
def commit(self):
"""
Returns a string representing the current commit.
"""
raise NotImplementedError
def checkout(self, identifier=None):
"""
Set the state to the given identifier.
If identifier is None, checkout to the latest revision.
The type and format of identifier may change from VCS to VCS, so each
backend is responsible to understand it's identifiers.
"""
self.check_working_dir()
| mit |
mybios/angle | src/tests/deqp_tests/generate_deqp_tests.py | 24 | 1253 | import os
import re
import sys
def ReadFileAsLines(filename):
"""Reads a file, removing blank lines and lines that start with #"""
file = open(filename, "r")
raw_lines = file.readlines()
file.close()
lines = []
for line in raw_lines:
line = line.strip()
if len(line) > 0 and not line.startswith("#"):
lines.append(line)
return lines
def GetCleanTestName(testName):
replacements = { "dEQP-": "", ".*": "", ".":"_", }
cleanName = testName
for replaceKey in replacements:
cleanName = cleanName.replace(replaceKey, replacements[replaceKey])
return cleanName
def GenerateTests(outFile, testNames):
''' Remove duplicate tests '''
testNames = list(set(testNames))
outFile.write("#include \"deqp_tests.h\"\n\n")
for test in testNames:
outFile.write("TEST(deqp, " + GetCleanTestName(test) + ")\n")
outFile.write("{\n")
outFile.write(" RunDEQPTest(\"" + test + "\", GetCurrentConfig());\n")
outFile.write("}\n\n")
def main(argv):
tests = ReadFileAsLines(argv[0])
output = open(argv[1], 'wb')
GenerateTests(output, tests)
output.close()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| bsd-3-clause |
pepeportela/edx-platform | lms/djangoapps/verify_student/tests/test_fake_software_secure.py | 10 | 2751 | """
Tests for the fake software secure response.
"""
from django.test import TestCase
from mock import patch
from lms.djangoapps.verify_student.models import SoftwareSecurePhotoVerification
from student.tests.factories import UserFactory
from util.testing import UrlResetMixin
class SoftwareSecureFakeViewTest(UrlResetMixin, TestCase):
"""
Base class to test the fake software secure view.
"""
URLCONF_MODULES = ['verify_student.urls']
def setUp(self, **kwargs):
enable_software_secure_fake = kwargs.get('enable_software_secure_fake', False)
with patch.dict('django.conf.settings.FEATURES', {'ENABLE_SOFTWARE_SECURE_FAKE': enable_software_secure_fake}):
super(SoftwareSecureFakeViewTest, self).setUp()
self.user = UserFactory.create(username="test", password="test")
self.attempt = SoftwareSecurePhotoVerification.objects.create(user=self.user)
self.client.login(username="test", password="test")
class SoftwareSecureFakeViewDisabledTest(SoftwareSecureFakeViewTest):
"""
Test the fake software secure response when feature flag
'ENABLE_SOFTWARE_SECURE_FAKE' is not enabled.
"""
def setUp(self):
super(SoftwareSecureFakeViewDisabledTest, self).setUp(enable_software_secure_fake=False)
def test_get_method_without_enable_feature_flag(self):
"""
Test that the user gets 404 response if the feature flag
'ENABLE_SOFTWARE_SECURE_FAKE' is not enabled.
"""
response = self.client.get(
'/verify_student/software-secure-fake-response'
)
self.assertEqual(response.status_code, 404)
class SoftwareSecureFakeViewEnabledTest(SoftwareSecureFakeViewTest):
"""
Test the fake software secure response when feature flag
'ENABLE_SOFTWARE_SECURE_FAKE' is enabled.
"""
def setUp(self):
super(SoftwareSecureFakeViewEnabledTest, self).setUp(enable_software_secure_fake=True)
def test_get_method_without_logged_in_user(self):
"""
Test that the user gets 302 response if that user is not logged in.
"""
self.client.logout()
response = self.client.get(
'/verify_student/software-secure-fake-response'
)
self.assertEqual(response.status_code, 302)
def test_get_method(self):
"""
Test that GET method of fake software secure view uses the most recent
attempt for the logged-in user.
"""
response = self.client.get(
'/verify_student/software-secure-fake-response'
)
self.assertEqual(response.status_code, 200)
self.assertIn('EdX-ID', response.content)
self.assertIn('results_callback', response.content)
| agpl-3.0 |
SomethingExplosive/android_external_chromium_org | third_party/tlslite/tlslite/Checker.py | 359 | 6301 | """Class for post-handshake certificate checking."""
from utils.cryptomath import hashAndBase64
from X509 import X509
from X509CertChain import X509CertChain
from errors import *
class Checker:
"""This class is passed to a handshake function to check the other
party's certificate chain.
If a handshake function completes successfully, but the Checker
judges the other party's certificate chain to be missing or
inadequate, a subclass of
L{tlslite.errors.TLSAuthenticationError} will be raised.
Currently, the Checker can check either an X.509 or a cryptoID
chain (for the latter, cryptoIDlib must be installed).
"""
def __init__(self, cryptoID=None, protocol=None,
x509Fingerprint=None,
x509TrustList=None, x509CommonName=None,
checkResumedSession=False):
"""Create a new Checker instance.
You must pass in one of these argument combinations:
- cryptoID[, protocol] (requires cryptoIDlib)
- x509Fingerprint
- x509TrustList[, x509CommonName] (requires cryptlib_py)
@type cryptoID: str
@param cryptoID: A cryptoID which the other party's certificate
chain must match. The cryptoIDlib module must be installed.
Mutually exclusive with all of the 'x509...' arguments.
@type protocol: str
@param protocol: A cryptoID protocol URI which the other
party's certificate chain must match. Requires the 'cryptoID'
argument.
@type x509Fingerprint: str
@param x509Fingerprint: A hex-encoded X.509 end-entity
fingerprint which the other party's end-entity certificate must
match. Mutually exclusive with the 'cryptoID' and
'x509TrustList' arguments.
@type x509TrustList: list of L{tlslite.X509.X509}
@param x509TrustList: A list of trusted root certificates. The
other party must present a certificate chain which extends to
one of these root certificates. The cryptlib_py module must be
installed. Mutually exclusive with the 'cryptoID' and
'x509Fingerprint' arguments.
@type x509CommonName: str
@param x509CommonName: The end-entity certificate's 'CN' field
must match this value. For a web server, this is typically a
server name such as 'www.amazon.com'. Mutually exclusive with
the 'cryptoID' and 'x509Fingerprint' arguments. Requires the
'x509TrustList' argument.
@type checkResumedSession: bool
@param checkResumedSession: If resumed sessions should be
checked. This defaults to False, on the theory that if the
session was checked once, we don't need to bother
re-checking it.
"""
if cryptoID and (x509Fingerprint or x509TrustList):
raise ValueError()
if x509Fingerprint and x509TrustList:
raise ValueError()
if x509CommonName and not x509TrustList:
raise ValueError()
if protocol and not cryptoID:
raise ValueError()
if cryptoID:
import cryptoIDlib #So we raise an error here
if x509TrustList:
import cryptlib_py #So we raise an error here
self.cryptoID = cryptoID
self.protocol = protocol
self.x509Fingerprint = x509Fingerprint
self.x509TrustList = x509TrustList
self.x509CommonName = x509CommonName
self.checkResumedSession = checkResumedSession
def __call__(self, connection):
"""Check a TLSConnection.
When a Checker is passed to a handshake function, this will
be called at the end of the function.
@type connection: L{tlslite.TLSConnection.TLSConnection}
@param connection: The TLSConnection to examine.
@raise tlslite.errors.TLSAuthenticationError: If the other
party's certificate chain is missing or bad.
"""
if not self.checkResumedSession and connection.resumed:
return
if self.cryptoID or self.x509Fingerprint or self.x509TrustList:
if connection._client:
chain = connection.session.serverCertChain
else:
chain = connection.session.clientCertChain
if self.x509Fingerprint or self.x509TrustList:
if isinstance(chain, X509CertChain):
if self.x509Fingerprint:
if chain.getFingerprint() != self.x509Fingerprint:
raise TLSFingerprintError(\
"X.509 fingerprint mismatch: %s, %s" % \
(chain.getFingerprint(), self.x509Fingerprint))
else: #self.x509TrustList
if not chain.validate(self.x509TrustList):
raise TLSValidationError("X.509 validation failure")
if self.x509CommonName and \
(chain.getCommonName() != self.x509CommonName):
raise TLSAuthorizationError(\
"X.509 Common Name mismatch: %s, %s" % \
(chain.getCommonName(), self.x509CommonName))
elif chain:
raise TLSAuthenticationTypeError()
else:
raise TLSNoAuthenticationError()
elif self.cryptoID:
import cryptoIDlib.CertChain
if isinstance(chain, cryptoIDlib.CertChain.CertChain):
if chain.cryptoID != self.cryptoID:
raise TLSFingerprintError(\
"cryptoID mismatch: %s, %s" % \
(chain.cryptoID, self.cryptoID))
if self.protocol:
if not chain.checkProtocol(self.protocol):
raise TLSAuthorizationError(\
"cryptoID protocol mismatch")
if not chain.validate():
raise TLSValidationError("cryptoID validation failure")
elif chain:
raise TLSAuthenticationTypeError()
else:
raise TLSNoAuthenticationError()
| bsd-3-clause |
Oweoqi/metagoofil | hachoir_parser/container/riff.py | 86 | 16938 | # -*- coding: UTF-8 -*-
"""
RIFF parser, able to parse:
* AVI video container
* WAV audio container
* CDA file
Documents:
- libavformat source code from ffmpeg library
http://ffmpeg.mplayerhq.hu/
- Video for Windows Programmer's Guide
http://www.opennet.ru/docs/formats/avi.txt
- What is an animated cursor?
http://www.gdgsoft.com/anituner/help/aniformat.htm
Authors:
* Aurélien Jacobs
* Mickaël KENIKSSI
* Victor Stinner
Changelog:
* 2007-03-30: support ACON (animated icons)
* 2006-08-08: merge AVI, WAV and CDA parsers into RIFF parser
* 2006-08-03: creation of CDA parser by Mickaël KENIKSSI
* 2005-06-21: creation of WAV parser by Victor Stinner
* 2005-06-08: creation of AVI parser by Victor Stinner and Aurélien Jacobs
Thanks to:
* Wojtek Kaniewski (wojtekka AT logonet.com.pl) for its CDA file
format information
"""
from hachoir_parser import Parser
from hachoir_core.field import (FieldSet, ParserError,
UInt8, UInt16, UInt32, Enum,
Bit, NullBits, NullBytes,
RawBytes, String, PaddingBytes,
SubFile)
from hachoir_core.tools import alignValue, humanDuration
from hachoir_core.endian import LITTLE_ENDIAN
from hachoir_core.text_handler import filesizeHandler, textHandler
from hachoir_parser.video.fourcc import audio_codec_name, video_fourcc_name
from hachoir_parser.image.ico import IcoFile
from datetime import timedelta
def parseText(self):
yield String(self, "text", self["size"].value,
strip=" \0", truncate="\0",
charset="ISO-8859-1")
def parseRawFormat(self, size):
yield RawBytes(self, "raw_format", size)
def parseVideoFormat(self, size):
yield UInt32(self, "video_size", "Video format: Size")
yield UInt32(self, "width", "Video format: Width")
yield UInt32(self, "height", "Video format: Height")
yield UInt16(self, "panes", "Video format: Panes")
yield UInt16(self, "depth", "Video format: Depth")
yield UInt32(self, "tag1", "Video format: Tag1")
yield UInt32(self, "img_size", "Video format: Image size")
yield UInt32(self, "xpels_meter", "Video format: XPelsPerMeter")
yield UInt32(self, "ypels_meter", "Video format: YPelsPerMeter")
yield UInt32(self, "clr_used", "Video format: ClrUsed")
yield UInt32(self, "clr_important", "Video format: ClrImportant")
def parseAudioFormat(self, size):
yield Enum(UInt16(self, "codec", "Audio format: Codec id"), audio_codec_name)
yield UInt16(self, "channel", "Audio format: Channels")
yield UInt32(self, "sample_rate", "Audio format: Sample rate")
yield UInt32(self, "bit_rate", "Audio format: Bit rate")
yield UInt16(self, "block_align", "Audio format: Block align")
if size >= 16:
yield UInt16(self, "bits_per_sample", "Audio format: Bits per sample")
if size >= 18:
yield UInt16(self, "ext_size", "Audio format: Size of extra information")
if size >= 28: # and self["a_channel"].value > 2
yield UInt16(self, "reserved", "Audio format: ")
yield UInt32(self, "channel_mask", "Audio format: channels placement bitmask")
yield UInt32(self, "subformat", "Audio format: Subformat id")
def parseAVIStreamFormat(self):
size = self["size"].value
strtype = self["../stream_hdr/stream_type"].value
TYPE_HANDLER = {
"vids": (parseVideoFormat, 40),
"auds": (parseAudioFormat, 16)
}
handler = parseRawFormat
if strtype in TYPE_HANDLER:
info = TYPE_HANDLER[strtype]
if info[1] <= size:
handler = info[0]
for field in handler(self, size):
yield field
def parseAVIStreamHeader(self):
if self["size"].value != 56:
raise ParserError("Invalid stream header size")
yield String(self, "stream_type", 4, "Stream type four character code", charset="ASCII")
field = String(self, "fourcc", 4, "Stream four character code", strip=" \0", charset="ASCII")
if self["stream_type"].value == "vids":
yield Enum(field, video_fourcc_name, lambda text: text.upper())
else:
yield field
yield UInt32(self, "flags", "Stream flags")
yield UInt16(self, "priority", "Stream priority")
yield String(self, "language", 2, "Stream language", charset="ASCII", strip="\0")
yield UInt32(self, "init_frames", "InitialFrames")
yield UInt32(self, "scale", "Time scale")
yield UInt32(self, "rate", "Divide by scale to give frame rate")
yield UInt32(self, "start", "Stream start time (unit: rate/scale)")
yield UInt32(self, "length", "Stream length (unit: rate/scale)")
yield UInt32(self, "buf_size", "Suggested buffer size")
yield UInt32(self, "quality", "Stream quality")
yield UInt32(self, "sample_size", "Size of samples")
yield UInt16(self, "left", "Destination rectangle (left)")
yield UInt16(self, "top", "Destination rectangle (top)")
yield UInt16(self, "right", "Destination rectangle (right)")
yield UInt16(self, "bottom", "Destination rectangle (bottom)")
class RedBook(FieldSet):
"""
RedBook offset parser, used in CD audio (.cda) file
"""
def createFields(self):
yield UInt8(self, "frame")
yield UInt8(self, "second")
yield UInt8(self, "minute")
yield PaddingBytes(self, "notused", 1)
def formatSerialNumber(field):
"""
Format an disc serial number.
Eg. 0x00085C48 => "0008-5C48"
"""
sn = field.value
return "%04X-%04X" % (sn >> 16, sn & 0xFFFF)
def parseCDDA(self):
"""
HSG address format: number of 1/75 second
HSG offset = (minute*60 + second)*75 + frame + 150 (from RB offset)
HSG length = (minute*60 + second)*75 + frame (from RB length)
"""
yield UInt16(self, "cda_version", "CD file version (currently 1)")
yield UInt16(self, "track_no", "Number of track")
yield textHandler(UInt32(self, "disc_serial", "Disc serial number"),
formatSerialNumber)
yield UInt32(self, "hsg_offset", "Track offset (HSG format)")
yield UInt32(self, "hsg_length", "Track length (HSG format)")
yield RedBook(self, "rb_offset", "Track offset (Red-book format)")
yield RedBook(self, "rb_length", "Track length (Red-book format)")
def parseWAVFormat(self):
size = self["size"].value
if size not in (16, 18):
self.warning("Format with size of %s bytes is not supported!" % size)
yield Enum(UInt16(self, "codec", "Audio codec"), audio_codec_name)
yield UInt16(self, "nb_channel", "Number of audio channel")
yield UInt32(self, "sample_per_sec", "Sample per second")
yield UInt32(self, "byte_per_sec", "Average byte per second")
yield UInt16(self, "block_align", "Block align")
yield UInt16(self, "bit_per_sample", "Bits per sample")
def parseWAVFact(self):
yield UInt32(self, "nb_sample", "Number of samples in audio stream")
def parseAviHeader(self):
yield UInt32(self, "microsec_per_frame", "Microsecond per frame")
yield UInt32(self, "max_byte_per_sec", "Maximum byte per second")
yield NullBytes(self, "reserved", 4)
# Flags
yield NullBits(self, "reserved[]", 4)
yield Bit(self, "has_index")
yield Bit(self, "must_use_index")
yield NullBits(self, "reserved[]", 2)
yield Bit(self, "is_interleaved")
yield NullBits(self, "reserved[]", 2)
yield Bit(self, "trust_cktype")
yield NullBits(self, "reserved[]", 4)
yield Bit(self, "was_capture_file")
yield Bit(self, "is_copyrighted")
yield NullBits(self, "reserved[]", 14)
yield UInt32(self, "total_frame", "Total number of frames in the video")
yield UInt32(self, "init_frame", "Initial frame (used in interleaved video)")
yield UInt32(self, "nb_stream", "Number of streams")
yield UInt32(self, "sug_buf_size", "Suggested buffer size")
yield UInt32(self, "width", "Width in pixel")
yield UInt32(self, "height", "Height in pixel")
yield UInt32(self, "scale")
yield UInt32(self, "rate")
yield UInt32(self, "start")
yield UInt32(self, "length")
def parseODML(self):
yield UInt32(self, "total_frame", "Real number of frame of OpenDML video")
padding = self["size"].value - 4
if 0 < padding:
yield NullBytes(self, "padding[]", padding)
class AVIIndexEntry(FieldSet):
size = 16*8
def createFields(self):
yield String(self, "tag", 4, "Tag", charset="ASCII")
yield UInt32(self, "flags")
yield UInt32(self, "start", "Offset from start of movie data")
yield UInt32(self, "length")
def parseIndex(self):
while not self.eof:
yield AVIIndexEntry(self, "index[]")
class Chunk(FieldSet):
TAG_INFO = {
# This dictionnary is edited by RiffFile.validate()
"LIST": ("list[]", None, "Sub-field list"),
"JUNK": ("junk[]", None, "Junk (padding)"),
# Metadata
"INAM": ("title", parseText, "Document title"),
"IART": ("artist", parseText, "Artist"),
"ICMT": ("comment", parseText, "Comment"),
"ICOP": ("copyright", parseText, "Copyright"),
"IENG": ("author", parseText, "Author"),
"ICRD": ("creation_date", parseText, "Creation date"),
"ISFT": ("producer", parseText, "Producer"),
"IDIT": ("datetime", parseText, "Date time"),
# TODO: Todo: see below
# "strn": Stream description
# TWOCC code, movie/field[]/tag.value[2:4]:
# "db": "Uncompressed video frame",
# "dc": "Compressed video frame",
# "wb": "Audio data",
# "pc": "Palette change"
}
subtag_info = {
"INFO": ("info", "File informations"),
"hdrl": ("headers", "Headers"),
"strl": ("stream[]", "Stream header list"),
"movi": ("movie", "Movie stream"),
"odml": ("odml", "ODML"),
}
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
self._size = (8 + alignValue(self["size"].value, 2)) * 8
tag = self["tag"].value
if tag in self.TAG_INFO:
self.tag_info = self.TAG_INFO[tag]
if tag == "LIST":
subtag = self["subtag"].value
if subtag in self.subtag_info:
info = self.subtag_info[subtag]
self.tag_info = (info[0], None, info[1])
self._name = self.tag_info[0]
self._description = self.tag_info[2]
else:
self.tag_info = ("field[]", None, None)
def createFields(self):
yield String(self, "tag", 4, "Tag", charset="ASCII")
yield filesizeHandler(UInt32(self, "size", "Size"))
if not self["size"].value:
return
if self["tag"].value == "LIST":
yield String(self, "subtag", 4, "Sub-tag", charset="ASCII")
handler = self.tag_info[1]
while 8 < (self.size - self.current_size)/8:
field = self.__class__(self, "field[]")
yield field
if (field.size/8) % 2 != 0:
yield UInt8(self, "padding[]", "Padding")
else:
handler = self.tag_info[1]
if handler:
for field in handler(self):
yield field
else:
yield RawBytes(self, "raw_content", self["size"].value)
padding = self.seekBit(self._size)
if padding:
yield padding
def createDescription(self):
tag = self["tag"].display
return u"Chunk (tag %s)" % tag
class ChunkAVI(Chunk):
TAG_INFO = Chunk.TAG_INFO.copy()
TAG_INFO.update({
"strh": ("stream_hdr", parseAVIStreamHeader, "Stream header"),
"strf": ("stream_fmt", parseAVIStreamFormat, "Stream format"),
"avih": ("avi_hdr", parseAviHeader, "AVI header"),
"idx1": ("index", parseIndex, "Stream index"),
"dmlh": ("odml_hdr", parseODML, "ODML header"),
})
class ChunkCDDA(Chunk):
TAG_INFO = Chunk.TAG_INFO.copy()
TAG_INFO.update({
'fmt ': ("cdda", parseCDDA, "CD audio informations"),
})
class ChunkWAVE(Chunk):
TAG_INFO = Chunk.TAG_INFO.copy()
TAG_INFO.update({
'fmt ': ("format", parseWAVFormat, "Audio format"),
'fact': ("nb_sample", parseWAVFact, "Number of samples"),
'data': ("audio_data", None, "Audio stream data"),
})
def parseAnimationHeader(self):
yield UInt32(self, "hdr_size", "Size of header (36 bytes)")
if self["hdr_size"].value != 36:
self.warning("Animation header with unknown size (%s)" % self["size"].value)
yield UInt32(self, "nb_frame", "Number of unique Icons in this cursor")
yield UInt32(self, "nb_step", "Number of Blits before the animation cycles")
yield UInt32(self, "cx")
yield UInt32(self, "cy")
yield UInt32(self, "bit_count")
yield UInt32(self, "planes")
yield UInt32(self, "jiffie_rate", "Default Jiffies (1/60th of a second) if rate chunk not present")
yield Bit(self, "is_icon")
yield NullBits(self, "padding", 31)
def parseAnimationSequence(self):
while not self.eof:
yield UInt32(self, "icon[]")
def formatJiffie(field):
sec = float(field.value) / 60
return humanDuration(timedelta(seconds=sec))
def parseAnimationRate(self):
while not self.eof:
yield textHandler(UInt32(self, "rate[]"), formatJiffie)
def parseIcon(self):
yield SubFile(self, "icon_file", self["size"].value, parser_class=IcoFile)
class ChunkACON(Chunk):
TAG_INFO = Chunk.TAG_INFO.copy()
TAG_INFO.update({
'anih': ("anim_hdr", parseAnimationHeader, "Animation header"),
'seq ': ("anim_seq", parseAnimationSequence, "Animation sequence"),
'rate': ("anim_rate", parseAnimationRate, "Animation sequence"),
'icon': ("icon[]", parseIcon, "Icon"),
})
class RiffFile(Parser):
PARSER_TAGS = {
"id": "riff",
"category": "container",
"file_ext": ("avi", "cda", "wav", "ani"),
"min_size": 16*8,
"mime": (u"video/x-msvideo", u"audio/x-wav", u"audio/x-cda"),
# FIXME: Use regex "RIFF.{4}(WAVE|CDDA|AVI )"
"magic": (
("AVI LIST", 8*8),
("WAVEfmt ", 8*8),
("CDDAfmt ", 8*8),
("ACONanih", 8*8),
),
"description": "Microsoft RIFF container"
}
VALID_TYPES = {
"WAVE": (ChunkWAVE, u"audio/x-wav", u"Microsoft WAVE audio", ".wav"),
"CDDA": (ChunkCDDA, u"audio/x-cda", u"Microsoft Windows audio CD file (cda)", ".cda"),
"AVI ": (ChunkAVI, u"video/x-msvideo", u"Microsoft AVI video", ".avi"),
"ACON": (ChunkACON, u"image/x-ani", u"Microsoft Windows animated cursor", ".ani"),
}
endian = LITTLE_ENDIAN
def validate(self):
if self.stream.readBytes(0, 4) != "RIFF":
return "Wrong signature"
if self["type"].value not in self.VALID_TYPES:
return "Unknown RIFF content type"
return True
def createFields(self):
yield String(self, "signature", 4, "AVI header (RIFF)", charset="ASCII")
yield filesizeHandler(UInt32(self, "filesize", "File size"))
yield String(self, "type", 4, "Content type (\"AVI \", \"WAVE\", ...)", charset="ASCII")
# Choose chunk type depending on file type
try:
chunk_cls = self.VALID_TYPES[self["type"].value][0]
except KeyError:
chunk_cls = Chunk
# Parse all chunks up to filesize
while self.current_size < self["filesize"].value*8+8:
yield chunk_cls(self, "chunk[]")
if not self.eof:
yield RawBytes(self, "padding[]", (self.size-self.current_size)/8)
def createMimeType(self):
try:
return self.VALID_TYPES[self["type"].value][1]
except KeyError:
return None
def createDescription(self):
tag = self["type"].value
if tag == "AVI ":
desc = u"Microsoft AVI video"
if "headers/avi_hdr" in self:
header = self["headers/avi_hdr"]
desc += ": %ux%u pixels" % (header["width"].value, header["height"].value)
microsec = header["microsec_per_frame"].value
if microsec:
desc += ", %.1f fps" % (1000000.0 / microsec)
if "total_frame" in header and header["total_frame"].value:
delta = timedelta(seconds=float(header["total_frame"].value) * microsec)
desc += ", " + humanDuration(delta)
return desc
else:
try:
return self.VALID_TYPES[tag][2]
except KeyError:
return u"Microsoft RIFF container"
def createContentSize(self):
size = (self["filesize"].value + 8) * 8
return min(size, self.stream.size)
def createFilenameSuffix(self):
try:
return self.VALID_TYPES[self["type"].value][3]
except KeyError:
return ".riff"
| gpl-2.0 |
ninapavlich/lesleyloraine | lesleyloraine/apps/email/migrations/0002_auto_20160205_2337.py | 3 | 1436 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('email', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='emailcategory',
name='admin_note',
field=models.TextField(help_text=b'Not publicly visible', null=True, verbose_name='admin note', blank=True),
),
migrations.AlterField(
model_name='emailcategorysubscriptionsettings',
name='admin_note',
field=models.TextField(help_text=b'Not publicly visible', null=True, verbose_name='admin note', blank=True),
),
migrations.AlterField(
model_name='emailreceipt',
name='admin_note',
field=models.TextField(help_text=b'Not publicly visible', null=True, verbose_name='admin note', blank=True),
),
migrations.AlterField(
model_name='emailtemplate',
name='admin_note',
field=models.TextField(help_text=b'Not publicly visible', null=True, verbose_name='admin note', blank=True),
),
migrations.AlterField(
model_name='usersubscriptionsettings',
name='admin_note',
field=models.TextField(help_text=b'Not publicly visible', null=True, verbose_name='admin note', blank=True),
),
]
| mit |
arifsetiawan/edx-platform | lms/djangoapps/courseware/features/conditional.py | 102 | 4723 | # pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
from lettuce import world, steps
from nose.tools import assert_in, assert_true # pylint: disable=no-name-in-module
from common import i_am_registered_for_the_course, visit_scenario_item
from problems_setup import add_problem_to_course, answer_problem
@steps
class ConditionalSteps(object):
COURSE_NUM = 'test_course'
def setup_conditional(self, step, condition_type, condition, cond_value):
r'that a course has a Conditional conditioned on (?P<condition_type>\w+) (?P<condition>\w+)=(?P<cond_value>\w+)$'
i_am_registered_for_the_course(step, self.COURSE_NUM)
world.scenario_dict['VERTICAL'] = world.ItemFactory(
parent_location=world.scenario_dict['SECTION'].location,
category='vertical',
display_name="Test Vertical",
)
world.scenario_dict['WRAPPER'] = world.ItemFactory(
parent_location=world.scenario_dict['VERTICAL'].location,
category='wrapper',
display_name="Test Poll Wrapper"
)
if condition_type == 'problem':
world.scenario_dict['CONDITION_SOURCE'] = add_problem_to_course(self.COURSE_NUM, 'string')
elif condition_type == 'poll':
world.scenario_dict['CONDITION_SOURCE'] = world.ItemFactory(
parent_location=world.scenario_dict['WRAPPER'].location,
category='poll_question',
display_name='Conditional Poll',
data={
'question': 'Is this a good poll?',
'answers': [
{'id': 'yes', 'text': 'Yes, of course'},
{'id': 'no', 'text': 'Of course not!'}
],
}
)
else:
raise Exception("Unknown condition type: {!r}".format(condition_type))
metadata = {
'xml_attributes': {
condition: cond_value
}
}
world.scenario_dict['CONDITIONAL'] = world.ItemFactory(
parent_location=world.scenario_dict['WRAPPER'].location,
category='conditional',
display_name="Test Conditional",
metadata=metadata,
sources_list=[world.scenario_dict['CONDITION_SOURCE'].location],
)
world.ItemFactory(
parent_location=world.scenario_dict['CONDITIONAL'].location,
category='html',
display_name='Conditional Contents',
data='<html><div class="hidden-contents">Hidden Contents</p></html>'
)
def setup_problem_attempts(self, step, not_attempted=None):
r'that the conditioned problem has (?P<not_attempted>not )?been attempted$'
visit_scenario_item('CONDITION_SOURCE')
if not_attempted is None:
answer_problem(self.COURSE_NUM, 'string', True)
world.css_click("button.check")
def when_i_view_the_conditional(self, step):
r'I view the conditional$'
visit_scenario_item('CONDITIONAL')
world.wait_for_js_variable_truthy('$(".xblock-student_view[data-type=Conditional]").data("initialized")')
def check_visibility(self, step, visible):
r'the conditional contents are (?P<visible>\w+)$'
world.wait_for_ajax_complete()
assert_in(visible, ('visible', 'hidden'))
if visible == 'visible':
world.wait_for_visible('.hidden-contents')
assert_true(world.css_visible('.hidden-contents'))
else:
assert_true(world.is_css_not_present('.hidden-contents'))
assert_true(
world.css_contains_text(
'.conditional-message',
'must be attempted before this will become visible.'
)
)
def answer_poll(self, step, answer):
r' I answer the conditioned poll "([^"]*)"$'
visit_scenario_item('CONDITION_SOURCE')
world.wait_for_js_variable_truthy('$(".xblock-student_view[data-type=Poll]").data("initialized")')
world.wait_for_ajax_complete()
answer_text = [
poll_answer['text']
for poll_answer
in world.scenario_dict['CONDITION_SOURCE'].answers
if poll_answer['id'] == answer
][0]
text_selector = '.poll_answer .text'
poll_texts = world.retry_on_exception(
lambda: [elem.text for elem in world.css_find(text_selector)]
)
for idx, poll_text in enumerate(poll_texts):
if poll_text == answer_text:
world.css_click(text_selector, index=idx)
return
ConditionalSteps()
| agpl-3.0 |
lucywyman/slides | source/source/conf.py | 1 | 9886 | # -*- coding: utf-8 -*-
#
# slides documentation build configuration file, created by
# sphinx-quickstart on Wed Jul 1 21:05:39 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'rst2pdf.pdfbuilder',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'slides'
copyright = u'2015, l'
author = u'l'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2015.07.01'
# The full version, including alpha/beta/rc tags.
release = '2015.07.01'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['./static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = ['static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
#html_style = 'styles.css'
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'slidesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
#latex_documents = [
# (master_doc, 'slides.tex', u'slides Documentation',
# u'l', 'manual'),
#]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
#man_pages = [
# (master_doc, 'slides', u'slides Documentation',
# [author], 1)
#]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
#texinfo_documents = [
# (master_doc, 'slides', u'slides Documentation',
# author, 'slides', 'One line description of project.',
# 'Miscellaneous'),
#]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Hieroglyph Slide Configuration ------------
extensions += [
'hieroglyph',
]
slide_title = 'slides'
slide_theme = 'single-level'
slide_levels = 3
# Place custom static assets in the static directory and uncomment
# the following lines to include them
slide_theme_options = {
# 'custom_css': 'custom.css',
# 'custom_js': 'custom.js',
}
# ----------------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| mit |
bw2/gemini | gemini/vep.py | 4 | 11612 | #!/usr/bin/env python
#############
# CSQ: Consequence|Codons|Amino_acids|Gene|hgnc|Feature|EXON|polyphen|sift|Protein_position|BIOTYPE
# missense_variant|gAg/gTg|E/V|ENSG00000188157||ENST00000379370|12/36|probably_damaging(0.932)|deleterious(0.02)|728/2045_protein_coding
# nc_transcript_variant|||ENSG00000116254|CHD5|ENST00000491020|5/6|||||
#############
from collections import defaultdict, namedtuple
import itertools
class EffectDetails(object):
def __init__(self, impact_string, severity, detail_string, counter, labels):
fields = self._prep_fields(detail_string, labels)
self.effect_severity = severity
self.effect_name = impact_string
self.anno_id = counter
fields.pop("consequence", None)
self.codon_change = fields.pop("codons", None)
self.aa_change = fields.pop("amino_acids", None)
self.ensembl_gene = fields.pop("gene", None)
self.hgnc = fields.pop("symbol", None)
self.gene = self.hgnc or self.ensembl_gene
self.transcript = fields.pop("feature", None)
self.exon = fields.pop("exon", None)
self.polyphen = fields.pop("polyphen", None)
self.sift = fields.pop("sift", None)
self.aa_length = fields.pop("protein_position", None)
self.biotype = fields.pop("biotype", None)
self.warnings = fields.pop("warning", None)
self.extra_fields = {"vep_%s" % k: v for k, v in fields.items()}
self.consequence = effect_dict[self.effect_name] if self.effect_severity is not None else self.effect_name
self.so = self.effect_name # VEP impacts are SO by default
# rules for being exonic.
# 1. the impact must be in the list of exonic impacts
# 3. must be protein_coding
self.is_exonic = 0
if self.effect_name in exonic_impacts and \
self.biotype == "protein_coding":
self.is_exonic = 1
# rules for being loss-of-function (lof).
# must be protein_coding
# must be a coding variant with HIGH impact
if self.effect_severity == "HIGH" and self.biotype == "protein_coding":
self.is_lof = 1
else:
self.is_lof = 0
# Rules for being coding
# must be protein_coding
# Exonic but not UTR's
if self.is_exonic and not (self.effect_name == "5_prime_UTR_variant" or
self.effect_name == "3_prime_UTR_variant"):
self.is_coding = 1
else:
self.is_coding = 0
# parse Polyphen predictions
if self.polyphen is not None:
self.polyphen_b = self.polyphen.split("(")
self.polyphen_pred = self.polyphen_b[0]
self.polyphen2 = self.polyphen_b[1].split(")")
self.polyphen_score = self.polyphen2[0]
else:
self.polyphen_pred = None
self.polyphen_score = None
# parse SIFT predictions
if self.sift is not None:
self.sift_b = self.sift.split("(")
self.sift_pred = self.sift_b[0]
self.sift2 = self.sift_b[1].split(")")
self.sift_score = self.sift2[0]
else:
self.sift_pred = None
self.sift_score = None
def _prep_fields(self, detail_string, labels):
"""Prepare a dictionary mapping labels to provided fields in the consequence.
"""
out = {}
for key, val in itertools.izip_longest(labels, detail_string.split("|")):
if val and val.strip():
if key is None:
out["warnings"] = val.strip()
else:
out[key.strip().lower()] = val.strip()
return out
def __str__(self):
return "\t".join([self.consequence, self.effect_severity, str(self.codon_change),
str(self.aa_change), str(self.aa_length), str(self.biotype),
str(self.ensembl_gene), str(self.gene), str(self.transcript),
str(self.exon), str(self.is_exonic), str(self.anno_id), str(self.polyphen_pred),
str(self.polyphen_score), str(self.sift_pred), str(self.sift_score),
str(self.is_coding), str(self.is_lof), str(self.so)])
def __repr__(self):
return self.__str__()
exonic_impacts = ["stop_gained",
"stop_lost",
"frameshift_variant",
"initiator_codon_variant",
"inframe_deletion",
"inframe_insertion",
"missense_variant",
"incomplete_terminal_codon_variant",
"stop_retained_variant",
"synonymous_variant",
"coding_sequence_variant",
"5_prime_UTR_variant",
"3_prime_UTR_variant",
"transcript_ablation",
"transcript_amplification",
"feature_elongation",
"feature_truncation"]
effect_names = ["splice_acceptor_variant", "splice_donor_variant",
"stop_gained", "stop_lost",
"non_coding_exon_variant", "frameshift_variant",
"initiator_codon_variant", "inframe_deletion",
"inframe_insertion", "missense_variant",
"splice_region_variant", "incomplete_terminal_codon_variant",
"stop_retained_variant", "synonymous_variant",
"coding_sequence_variant", "mature_miRNA_variant",
"5_prime_UTR_variant", "3_prime_UTR_variant",
"intron_variant", "NMD_transcript_variant",
"nc_transcript_variant", "upstream_gene_variant",
"downstream_gene_variant", "regulatory_region_variant",
"TF_binding_site_variant", "intergenic_variant",
"regulatory_region_ablation", "regulatory_region_amplification",
"transcript_ablation", "transcript_amplification",
"TFBS_ablation", "TFBS_amplification",
"feature_elongation", "feature_truncation"]
effect_dict = defaultdict()
effect_dict = {
'splice_acceptor_variant': 'splice_acceptor', 'splice_donor_variant': 'splice_donor',
'stop_gained': 'stop_gain', 'stop_lost': 'stop_loss',
'non_coding_exon_variant': 'nc_exon', 'frameshift_variant': 'frame_shift',
'initiator_codon_variant': 'transcript_codon_change', 'inframe_deletion': 'inframe_codon_loss',
'inframe_insertion': 'inframe_codon_gain', 'missense_variant': 'non_syn_coding',
'splice_region_variant': 'splice_region', 'incomplete_terminal_codon_variant': 'incomplete_terminal_codon',
'stop_retained_variant': 'synonymous_stop', 'synonymous_variant': 'synonymous_coding',
'coding_sequence_variant': 'CDS', 'mature_miRNA_variant': 'mature_miRNA',
'5_prime_UTR_variant': 'UTR_5_prime', '3_prime_UTR_variant': 'UTR_3_prime',
'intron_variant': 'intron', 'NMD_transcript_variant': 'NMD_transcript',
'nc_transcript_variant': 'nc_transcript', 'upstream_gene_variant': 'upstream',
'downstream_gene_variant': 'downstream', 'regulatory_region_variant': 'regulatory_region',
'TF_binding_site_variant': 'TF_binding_site', 'intergenic_variant': 'intergenic',
'regulatory_region_ablation': 'regulatory_region_ablation', 'regulatory_region_amplification': 'regulatory_region_amplification',
'transcript_ablation': 'transcript_ablation', 'transcript_amplification': 'transcript_amplification',
'TFBS_ablation': 'TFBS_ablation', 'TFBS_amplification': 'TFBS_amplification',
'feature_elongation': 'feature_elongation', 'feature_truncation': 'feature_truncation'}
effect_desc = ["The variant hits the splice acceptor site (2 basepair region at 3' end of an intron)", "The variant hits the splice donor site (2 basepair region at 5'end of an intron)",
"Variant causes a STOP codon", "Variant causes stop codon to be mutated into a non-stop codon",
"Variant causes a change in the non coding exon sequence", "Insertion or deletion causes a frame shift in coding sequence",
"Variant causes atleast one base change in the first codon of a transcript", "An inframe non-syn variant that deletes bases from the coding sequence",
"An inframe non-syn variant that inserts bases in the coding sequence", "The variant causes a different amino acid in the coding sequence",
"Variant causes a change within the region of a splice site (1-3bps into an exon or 3-8bps into an intron)", "The variant hits the incomplete codon of a transcript whose end co-ordinate is not known",
"The variant causes stop codon to be mutated into another stop codon", "The variant causes no amino acid change in coding sequence",
"Variant hits coding sequence with indeterminate effect", "The variant hits a microRNA",
"Variant hits the 5 prime untranslated region", "Variant hits the 3 prime untranslated region",
"Variant hits an intron", "A variant hits a transcript that is predicted to undergo nonsense mediated decay",
"Variant hits a gene that does not code for a protein", "The variant hits upstream of a gene (5' of a gene)",
"The variant hits downstream of a gene (3' of a gene)", "Variant hits the regulatory region annotated by Ensembl(e.g promoter)",
"Variant falls in a transcription factor binding motif within an Ensembl regulatory region", "The variant is located in the intergenic region, between genes",
"SV causes ablation of a regulatory region", "SV results in an amplification of a regulatory region",
"SV causes an ablation/deletion of a transcript feature", "SV causes an amplification of a transcript feature",
"SV results in a deletion of the TFBS", "SV results in an amplification of a region containing TFBS",
"SV causes an extension of a genomic feature wrt reference", "SV causes a reduction of a genomic feature compared to reference"]
effect_priorities = ["HIGH", "HIGH",
"HIGH", "HIGH",
"LOW", "HIGH",
"HIGH", "MED",
"MED", "MED",
"MED", "LOW",
"LOW", "LOW",
"LOW", "MED",
"LOW", "LOW",
"LOW", "LOW",
"LOW", "LOW",
"LOW", "MED",
"MED", "LOW",
"MED", "MED",
"LOW", "LOW",
"MED", "MED",
"LOW", "LOW"]
effect_priority_codes = [1, 1,
1, 1,
3, 1,
1, 2,
2, 2,
2, 3,
3, 3,
3, 2,
3, 3,
3, 3,
3, 3,
3, 2,
2, 3,
2, 2,
3, 3,
2, 2,
3, 3]
effect_ids = range(1, 35)
effect_map = {}
EffectInfo = namedtuple(
'EffectInfo', ['id', 'priority', 'priority_code', 'desc'])
for i, effect_name in enumerate(effect_names):
info = EffectInfo(effect_ids[i], effect_priorities[i],
effect_priority_codes[i], effect_desc[i])
effect_map[effect_name] = info
| mit |
xianggong/m2c_unit_test | test/integer/mad_hi_ushortushortushort/compile.py | 1861 | 4430 | #!/usr/bin/python
import os
import subprocess
import re
def runCommand(command):
p = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
p.wait()
return iter(p.stdout.readline, b'')
def dumpRunCommand(command, dump_file_name, postfix):
dumpFile = open(dump_file_name + postfix, "w+")
dumpFile.write(command + "\n")
for line in runCommand(command.split()):
dumpFile.write(line)
def rmFile(file_name):
cmd = "rm -rf " + file_name
runCommand(cmd.split())
def rnm_ir(file_name):
# Append all unnamed variable with prefix 'tmp_'
ir_file_name = file_name + ".ll"
if os.path.isfile(ir_file_name):
fo = open(ir_file_name, "rw+")
lines = fo.readlines()
fo.seek(0)
fo.truncate()
for line in lines:
# Add entry block identifier
if "define" in line:
line += "entry:\n"
# Rename all unnamed variables
line = re.sub('\%([0-9]+)',
r'%tmp_\1',
line.rstrip())
# Also rename branch name
line = re.sub('(\;\ \<label\>\:)([0-9]+)',
r'tmp_\2:',
line.rstrip())
fo.write(line + '\n')
def gen_ir(file_name):
# Directories
root_dir = '../../../'
header_dir = root_dir + "inc/"
# Headers
header = " -I " + header_dir
header += " -include " + header_dir + "m2c_buildin_fix.h "
header += " -include " + header_dir + "clc/clc.h "
header += " -D cl_clang_storage_class_specifiers "
gen_ir = "clang -S -emit-llvm -O0 -target r600-- -mcpu=verde "
cmd_gen_ir = gen_ir + header + file_name + ".cl"
dumpRunCommand(cmd_gen_ir, file_name, ".clang.log")
def asm_ir(file_name):
if os.path.isfile(file_name + ".ll"):
# Command to assemble IR to bitcode
gen_bc = "llvm-as "
gen_bc_src = file_name + ".ll"
gen_bc_dst = file_name + ".bc"
cmd_gen_bc = gen_bc + gen_bc_src + " -o " + gen_bc_dst
runCommand(cmd_gen_bc.split())
def opt_bc(file_name):
if os.path.isfile(file_name + ".bc"):
# Command to optmize bitcode
opt_bc = "opt --mem2reg "
opt_ir_src = file_name + ".bc"
opt_ir_dst = file_name + ".opt.bc"
cmd_opt_bc = opt_bc + opt_ir_src + " -o " + opt_ir_dst
runCommand(cmd_opt_bc.split())
def dis_bc(file_name):
if os.path.isfile(file_name + ".bc"):
# Command to disassemble bitcode
dis_bc = "llvm-dis "
dis_ir_src = file_name + ".opt.bc"
dis_ir_dst = file_name + ".opt.ll"
cmd_dis_bc = dis_bc + dis_ir_src + " -o " + dis_ir_dst
runCommand(cmd_dis_bc.split())
def m2c_gen(file_name):
if os.path.isfile(file_name + ".opt.bc"):
# Command to disassemble bitcode
m2c_gen = "m2c --llvm2si "
m2c_gen_src = file_name + ".opt.bc"
cmd_m2c_gen = m2c_gen + m2c_gen_src
dumpRunCommand(cmd_m2c_gen, file_name, ".m2c.llvm2si.log")
# Remove file if size is 0
if os.path.isfile(file_name + ".opt.s"):
if os.path.getsize(file_name + ".opt.s") == 0:
rmFile(file_name + ".opt.s")
def m2c_bin(file_name):
if os.path.isfile(file_name + ".opt.s"):
# Command to disassemble bitcode
m2c_bin = "m2c --si2bin "
m2c_bin_src = file_name + ".opt.s"
cmd_m2c_bin = m2c_bin + m2c_bin_src
dumpRunCommand(cmd_m2c_bin, file_name, ".m2c.si2bin.log")
def main():
# Commands
for file in os.listdir("./"):
if file.endswith(".cl"):
file_name = os.path.splitext(file)[0]
# Execute commands
gen_ir(file_name)
rnm_ir(file_name)
asm_ir(file_name)
opt_bc(file_name)
dis_bc(file_name)
m2c_gen(file_name)
m2c_bin(file_name)
if __name__ == "__main__":
main()
| gpl-2.0 |
MartijnBraam/CouchPotatoServer | libs/rtorrent/err.py | 182 | 1638 | # Copyright (c) 2013 Chris Lucas, <[email protected]>
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from rtorrent.common import convert_version_tuple_to_str
class RTorrentVersionError(Exception):
def __init__(self, min_version, cur_version):
self.min_version = min_version
self.cur_version = cur_version
self.msg = "Minimum version required: {0}".format(
convert_version_tuple_to_str(min_version))
def __str__(self):
return(self.msg)
class MethodError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return(self.msg)
| gpl-3.0 |
Panos512/invenio | modules/miscutil/lib/upgrades/invenio_2014_11_04_format_recjson.py | 5 | 1452 | ## -*- mode: python; coding: utf-8; -*-
##
## This file is part of Invenio.
## Copyright (C) 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Add new format `recjson` to format table."""
from invenio.dbquery import run_sql
depends_on = ['invenio_release_1_1_0']
def info():
"""Upgrade recipe information."""
return "New format recjson to format table."
def do_upgrade():
"""Upgrade recipe procedure."""
if not run_sql("SELECT id FROM format WHERE code='recjson'"):
run_sql("INSERT INTO format "
"(name,code,description,content_type,visibility) "
"VALUES ('recjson','recjson', 'recjson record representation',"
"'application/json', 0)")
def estimate():
"""Upgrade recipe time estimate."""
return 1
| gpl-2.0 |
spthaolt/VTK | Examples/Modelling/Python/expCos.py | 8 | 2383 | #!/usr/bin/env python
# This example demonstrates how to use a programmable filter and how
# to use the special vtkDataSetToDataSet::GetOutputPort() methods
import vtk
from math import *
# We create a 100 by 100 point plane to sample
plane = vtk.vtkPlaneSource()
plane.SetXResolution(100)
plane.SetYResolution(100)
# We transform the plane by a factor of 10 on X and Y
transform = vtk.vtkTransform()
transform.Scale(10, 10, 1)
transF = vtk.vtkTransformPolyDataFilter()
transF.SetInputConnection(plane.GetOutputPort())
transF.SetTransform(transform)
# Compute Bessel function and derivatives. We'll use a programmable filter
# for this. Note the unusual GetPolyDataInput() & GetOutputPort() methods.
besselF = vtk.vtkProgrammableFilter()
besselF.SetInputConnection(transF.GetOutputPort())
# The SetExecuteMethod takes a Python function as an argument
# In here is where all the processing is done.
def bessel():
input = besselF.GetPolyDataInput()
numPts = input.GetNumberOfPoints()
newPts = vtk.vtkPoints()
derivs = vtk.vtkFloatArray()
for i in range(0, numPts):
x = input.GetPoint(i)
x0, x1 = x[:2]
r = sqrt(x0*x0+x1*x1)
x2 = exp(-r)*cos(10.0*r)
deriv = -exp(-r)*(cos(10.0*r)+10.0*sin(10.0*r))
newPts.InsertPoint(i, x0, x1, x2)
derivs.InsertValue(i, deriv)
besselF.GetPolyDataOutput().CopyStructure(input)
besselF.GetPolyDataOutput().SetPoints(newPts)
besselF.GetPolyDataOutput().GetPointData().SetScalars(derivs)
besselF.SetExecuteMethod(bessel)
# We warp the plane based on the scalar values calculated above
warp = vtk.vtkWarpScalar()
warp.SetInput(besselF.GetPolyDataOutput())
warp.XYPlaneOn()
warp.SetScaleFactor(0.5)
# We create a mapper and actor as usual. In the case we adjust the
# scalar range of the mapper to match that of the computed scalars
mapper = vtk.vtkPolyDataMapper()
mapper.SetInput(warp.GetPolyDataOutput())
mapper.SetScalarRange(besselF.GetPolyDataOutput().GetScalarRange())
carpet = vtk.vtkActor()
carpet.SetMapper(mapper)
# Create the RenderWindow, Renderer
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren.AddActor(carpet)
renWin.SetSize(500, 500)
ren.ResetCamera()
ren.GetActiveCamera().Zoom(1.5)
iren.Initialize()
renWin.Render()
iren.Start()
| bsd-3-clause |
Karaage-Cluster/karaage | karaage/projects/utils.py | 2 | 1729 | # Copyright 2010-2017, The University of Melbourne
# Copyright 2010-2017, Brian May
#
# This file is part of Karaage.
#
# Karaage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Karaage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Karaage If not, see <http://www.gnu.org/licenses/>.
from karaage.machines.models import Account
from karaage.projects.models import Project
def add_user_to_project(person, project):
if not person.has_account():
Account.create(person, project)
project.group.members.add(person)
def remove_user_from_project(person, project):
project.group.members.remove(person)
def get_new_pid(institute):
""" Return a new Project ID
Keyword arguments:
institute_id -- Institute id
"""
number = '0001'
prefix = 'p%s' % institute.name.replace(' ', '')[:4]
found = True
while found:
try:
Project.objects.get(pid=prefix + number)
number = str(int(number) + 1)
if len(number) == 1:
number = '000' + number
elif len(number) == 2:
number = '00' + number
elif len(number) == 3:
number = '0' + number
except Project.DoesNotExist:
found = False
return prefix + number
| gpl-3.0 |
Omegaphora/external_chromium_org_tools_gyp | pylib/gyp/xcode_ninja.py | 22 | 10034 | # Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Xcode-ninja wrapper project file generator.
This updates the data structures passed to the Xcode gyp generator to build
with ninja instead. The Xcode project itself is transformed into a list of
executable targets, each with a build step to build with ninja, and a target
with every source and resource file. This appears to sidestep some of the
major performance headaches experienced using complex projects and large number
of targets within Xcode.
"""
import errno
import gyp.generator.ninja
import os
import re
import xml.sax.saxutils
def _WriteWorkspace(main_gyp, sources_gyp):
""" Create a workspace to wrap main and sources gyp paths. """
(build_file_root, build_file_ext) = os.path.splitext(main_gyp)
workspace_path = build_file_root + '.xcworkspace'
try:
os.makedirs(workspace_path)
except OSError, e:
if e.errno != errno.EEXIST:
raise
output_string = '<?xml version="1.0" encoding="UTF-8"?>\n' + \
'<Workspace version = "1.0">\n'
for gyp_name in [main_gyp, sources_gyp]:
name = os.path.splitext(os.path.basename(gyp_name))[0] + '.xcodeproj'
name = xml.sax.saxutils.quoteattr("group:" + name)
output_string += ' <FileRef location = %s></FileRef>\n' % name
output_string += '</Workspace>\n'
workspace_file = os.path.join(workspace_path, "contents.xcworkspacedata")
try:
with open(workspace_file, 'r') as input_file:
input_string = input_file.read()
if input_string == output_string:
return
except IOError:
# Ignore errors if the file doesn't exist.
pass
with open(workspace_file, 'w') as output_file:
output_file.write(output_string)
def _TargetFromSpec(old_spec, params):
""" Create fake target for xcode-ninja wrapper. """
# Determine ninja top level build dir (e.g. /path/to/out).
ninja_toplevel = None
jobs = 0
if params:
options = params['options']
ninja_toplevel = \
os.path.join(options.toplevel_dir,
gyp.generator.ninja.ComputeOutputDir(params))
jobs = params.get('generator_flags', {}).get('xcode_ninja_jobs', 0)
target_name = old_spec.get('target_name')
product_name = old_spec.get('product_name', target_name)
ninja_target = {}
ninja_target['target_name'] = target_name
ninja_target['product_name'] = product_name
ninja_target['toolset'] = old_spec.get('toolset')
ninja_target['default_configuration'] = old_spec.get('default_configuration')
ninja_target['configurations'] = {}
# Tell Xcode to look in |ninja_toplevel| for build products.
new_xcode_settings = {}
if ninja_toplevel:
new_xcode_settings['CONFIGURATION_BUILD_DIR'] = \
"%s/$(CONFIGURATION)$(EFFECTIVE_PLATFORM_NAME)" % ninja_toplevel
if 'configurations' in old_spec:
for config in old_spec['configurations'].iterkeys():
old_xcode_settings = \
old_spec['configurations'][config].get('xcode_settings', {})
if 'IPHONEOS_DEPLOYMENT_TARGET' in old_xcode_settings:
new_xcode_settings['CODE_SIGNING_REQUIRED'] = "NO"
new_xcode_settings['IPHONEOS_DEPLOYMENT_TARGET'] = \
old_xcode_settings['IPHONEOS_DEPLOYMENT_TARGET']
ninja_target['configurations'][config] = {}
ninja_target['configurations'][config]['xcode_settings'] = \
new_xcode_settings
ninja_target['mac_bundle'] = old_spec.get('mac_bundle', 0)
ninja_target['ios_app_extension'] = old_spec.get('ios_app_extension', 0)
ninja_target['type'] = old_spec['type']
if ninja_toplevel:
ninja_target['actions'] = [
{
'action_name': 'Compile and copy %s via ninja' % target_name,
'inputs': [],
'outputs': [],
'action': [
'env',
'PATH=%s' % os.environ['PATH'],
'ninja',
'-C',
new_xcode_settings['CONFIGURATION_BUILD_DIR'],
target_name,
],
'message': 'Compile and copy %s via ninja' % target_name,
},
]
if jobs > 0:
ninja_target['actions'][0]['action'].extend(('-j', jobs))
return ninja_target
def IsValidTargetForWrapper(target_extras, executable_target_pattern, spec):
"""Limit targets for Xcode wrapper.
Xcode sometimes performs poorly with too many targets, so only include
proper executable targets, with filters to customize.
Arguments:
target_extras: Regular expression to always add, matching any target.
executable_target_pattern: Regular expression limiting executable targets.
spec: Specifications for target.
"""
target_name = spec.get('target_name')
# Always include targets matching target_extras.
if target_extras is not None and re.search(target_extras, target_name):
return True
# Otherwise just show executable targets.
if spec.get('type', '') == 'executable' and \
spec.get('product_extension', '') != 'bundle':
# If there is a filter and the target does not match, exclude the target.
if executable_target_pattern is not None:
if not re.search(executable_target_pattern, target_name):
return False
return True
return False
def CreateWrapper(target_list, target_dicts, data, params):
"""Initialize targets for the ninja wrapper.
This sets up the necessary variables in the targets to generate Xcode projects
that use ninja as an external builder.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
data: Dict of flattened build files keyed on gyp path.
params: Dict of global options for gyp.
"""
orig_gyp = params['build_files'][0]
for gyp_name, gyp_dict in data.iteritems():
if gyp_name == orig_gyp:
depth = gyp_dict['_DEPTH']
# Check for custom main gyp name, otherwise use the default CHROMIUM_GYP_FILE
# and prepend .ninja before the .gyp extension.
generator_flags = params.get('generator_flags', {})
main_gyp = generator_flags.get('xcode_ninja_main_gyp', None)
if main_gyp is None:
(build_file_root, build_file_ext) = os.path.splitext(orig_gyp)
main_gyp = build_file_root + ".ninja" + build_file_ext
# Create new |target_list|, |target_dicts| and |data| data structures.
new_target_list = []
new_target_dicts = {}
new_data = {}
# Set base keys needed for |data|.
new_data[main_gyp] = {}
new_data[main_gyp]['included_files'] = []
new_data[main_gyp]['targets'] = []
new_data[main_gyp]['xcode_settings'] = \
data[orig_gyp].get('xcode_settings', {})
# Normally the xcode-ninja generator includes only valid executable targets.
# If |xcode_ninja_executable_target_pattern| is set, that list is reduced to
# executable targets that match the pattern. (Default all)
executable_target_pattern = \
generator_flags.get('xcode_ninja_executable_target_pattern', None)
# For including other non-executable targets, add the matching target name
# to the |xcode_ninja_target_pattern| regular expression. (Default none)
target_extras = generator_flags.get('xcode_ninja_target_pattern', None)
for old_qualified_target in target_list:
spec = target_dicts[old_qualified_target]
if IsValidTargetForWrapper(target_extras, executable_target_pattern, spec):
# Add to new_target_list.
target_name = spec.get('target_name')
new_target_name = '%s:%s#target' % (main_gyp, target_name)
new_target_list.append(new_target_name)
# Add to new_target_dicts.
new_target_dicts[new_target_name] = _TargetFromSpec(spec, params)
# Add to new_data.
for old_target in data[old_qualified_target.split(':')[0]]['targets']:
if old_target['target_name'] == target_name:
new_data_target = {}
new_data_target['target_name'] = old_target['target_name']
new_data_target['toolset'] = old_target['toolset']
new_data[main_gyp]['targets'].append(new_data_target)
# Create sources target.
sources_target_name = 'sources_for_indexing'
sources_target = _TargetFromSpec(
{ 'target_name' : sources_target_name,
'toolset': 'target',
'default_configuration': 'Default',
'mac_bundle': '0',
'type': 'executable'
}, None)
# Tell Xcode to look everywhere for headers.
sources_target['configurations'] = {'Default': { 'include_dirs': [ depth ] } }
sources = []
for target, target_dict in target_dicts.iteritems():
base = os.path.dirname(target)
files = target_dict.get('sources', []) + \
target_dict.get('mac_bundle_resources', [])
# Remove files starting with $. These are mostly intermediate files for the
# build system.
files = [ file for file in files if not file.startswith('$')]
# Make sources relative to root build file.
relative_path = os.path.dirname(main_gyp)
sources += [ os.path.relpath(os.path.join(base, file), relative_path)
for file in files ]
sources_target['sources'] = sorted(set(sources))
# Put sources_to_index in it's own gyp.
sources_gyp = \
os.path.join(os.path.dirname(main_gyp), sources_target_name + ".gyp")
fully_qualified_target_name = \
'%s:%s#target' % (sources_gyp, sources_target_name)
# Add to new_target_list, new_target_dicts and new_data.
new_target_list.append(fully_qualified_target_name)
new_target_dicts[fully_qualified_target_name] = sources_target
new_data_target = {}
new_data_target['target_name'] = sources_target['target_name']
new_data_target['_DEPTH'] = depth
new_data_target['toolset'] = "target"
new_data[sources_gyp] = {}
new_data[sources_gyp]['targets'] = []
new_data[sources_gyp]['included_files'] = []
new_data[sources_gyp]['xcode_settings'] = \
data[orig_gyp].get('xcode_settings', {})
new_data[sources_gyp]['targets'].append(new_data_target)
# Write workspace to file.
_WriteWorkspace(main_gyp, sources_gyp)
return (new_target_list, new_target_dicts, new_data)
| bsd-3-clause |
samklr/spark-timeseries | python/sparkts/test/test_timeseriesrdd.py | 6 | 5407 | from test_utils import PySparkTestCase
from sparkts.timeseriesrdd import *
from sparkts.timeseriesrdd import _TimeSeriesSerializer
from sparkts.datetimeindex import *
import pandas as pd
import numpy as np
from unittest import TestCase
from io import BytesIO
from pyspark.sql import SQLContext
class TimeSeriesSerializerTestCase(TestCase):
def test_times_series_serializer(self):
serializer = _TimeSeriesSerializer()
stream = BytesIO()
series = [('abc', np.array([4.0, 4.0, 5.0])), ('123', np.array([1.0, 2.0, 3.0]))]
serializer.dump_stream(iter(series), stream)
stream.seek(0)
reconstituted = list(serializer.load_stream(stream))
self.assertEquals(reconstituted[0][0], series[0][0])
self.assertEquals(reconstituted[1][0], series[1][0])
self.assertTrue((reconstituted[0][1] == series[0][1]).all())
self.assertTrue((reconstituted[1][1] == series[1][1]).all())
class TimeSeriesRDDTestCase(PySparkTestCase):
def test_time_series_rdd(self):
freq = DayFrequency(1, self.sc)
start = '2015-04-09'
dt_index = uniform(start, periods=10, freq=freq, sc=self.sc)
vecs = [np.arange(0, 10), np.arange(10, 20), np.arange(20, 30)]
rdd = self.sc.parallelize(vecs).map(lambda x: (str(x[0]), x))
tsrdd = TimeSeriesRDD(dt_index, rdd)
self.assertEquals(tsrdd.count(), 3)
contents = tsrdd.collectAsMap()
self.assertEquals(len(contents), 3)
self.assertTrue((contents["0"] == np.arange(0, 10)).all())
self.assertTrue((contents["10"] == np.arange(10, 20)).all())
self.assertTrue((contents["20"] == np.arange(20, 30)).all())
subslice = tsrdd['2015-04-10':'2015-04-15']
self.assertEquals(subslice.index(), uniform('2015-04-10', periods=6, freq=freq, sc=self.sc))
contents = subslice.collectAsMap()
self.assertEquals(len(contents), 3)
self.assertTrue((contents["0"] == np.arange(1, 7)).all())
self.assertTrue((contents["10"] == np.arange(11, 17)).all())
self.assertTrue((contents["20"] == np.arange(21, 27)).all())
def test_to_instants(self):
vecs = [np.arange(x, x + 4) for x in np.arange(0, 20, 4)]
labels = ['a', 'b', 'c', 'd', 'e']
start = '2015-4-9'
dt_index = uniform(start, periods=4, freq=DayFrequency(1, self.sc), sc=self.sc)
rdd = self.sc.parallelize(zip(labels, vecs), 3)
tsrdd = TimeSeriesRDD(dt_index, rdd)
samples = tsrdd.to_instants().collect()
target_dates = ['2015-4-9', '2015-4-10', '2015-4-11', '2015-4-12']
self.assertEquals([x[0] for x in samples], [pd.Timestamp(x) for x in target_dates])
self.assertTrue((samples[0][1] == np.arange(0, 20, 4)).all())
self.assertTrue((samples[1][1] == np.arange(1, 20, 4)).all())
self.assertTrue((samples[2][1] == np.arange(2, 20, 4)).all())
self.assertTrue((samples[3][1] == np.arange(3, 20, 4)).all())
def test_to_observations(self):
sql_ctx = SQLContext(self.sc)
vecs = [np.arange(x, x + 4) for x in np.arange(0, 20, 4)]
labels = ['a', 'b', 'c', 'd', 'e']
start = '2015-4-9'
dt_index = uniform(start, periods=4, freq=DayFrequency(1, self.sc), sc=self.sc)
print(dt_index._jdt_index.size())
rdd = self.sc.parallelize(zip(labels, vecs), 3)
tsrdd = TimeSeriesRDD(dt_index, rdd)
obsdf = tsrdd.to_observations_dataframe(sql_ctx)
tsrdd_from_df = time_series_rdd_from_observations( \
dt_index, obsdf, 'timestamp', 'key', 'value')
ts1 = tsrdd.collect()
ts1.sort(key = lambda x: x[0])
ts2 = tsrdd_from_df.collect()
ts2.sort(key = lambda x: x[0])
self.assertTrue(all([pair[0][0] == pair[1][0] and (pair[0][1] == pair[1][1]).all() \
for pair in zip(ts1, ts2)]))
df1 = obsdf.collect()
df1.sort(key = lambda x: x.value)
df2 = tsrdd_from_df.to_observations_dataframe(sql_ctx).collect()
df2.sort(key = lambda x: x.value)
self.assertEquals(df1, df2)
def test_filter(self):
vecs = [np.arange(x, x + 4) for x in np.arange(0, 20, 4)]
labels = ['a', 'b', 'c', 'd', 'e']
start = '2015-4-9'
dt_index = uniform(start, periods=4, freq=DayFrequency(1, self.sc), sc=self.sc)
rdd = self.sc.parallelize(zip(labels, vecs), 3)
tsrdd = TimeSeriesRDD(dt_index, rdd)
filtered = tsrdd.filter(lambda x: x[0] == 'a' or x[0] == 'b')
self.assertEquals(filtered.count(), 2)
# assert it has TimeSeriesRDD functionality:
filtered['2015-04-10':'2015-04-15'].count()
def test_to_pandas_series_rdd(self):
vecs = [np.arange(x, x + 4) for x in np.arange(0, 20, 4)]
labels = ['a', 'b', 'c', 'd', 'e']
start = '2015-4-9'
dt_index = uniform(start, periods=4, freq=DayFrequency(1, self.sc), sc=self.sc)
rdd = self.sc.parallelize(zip(labels, vecs), 3)
tsrdd = TimeSeriesRDD(dt_index, rdd)
series_arr = tsrdd.to_pandas_series_rdd().collect()
pd_index = dt_index.to_pandas_index()
self.assertEquals(len(vecs), len(series_arr))
for i in xrange(len(vecs)):
self.assertEquals(series_arr[i][0], labels[i])
self.assertTrue(pd.Series(vecs[i], pd_index).equals(series_arr[i][1]))
| apache-2.0 |
janslow/boto | boto/s3/key.py | 22 | 82475 | # Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011, Nexenta Systems Inc.
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import email.utils
import errno
import hashlib
import mimetypes
import os
import re
import base64
import binascii
import math
from hashlib import md5
import boto.utils
from boto.compat import BytesIO, six, urllib, encodebytes
from boto.exception import BotoClientError
from boto.exception import StorageDataError
from boto.exception import PleaseRetryException
from boto.provider import Provider
from boto.s3.keyfile import KeyFile
from boto.s3.user import User
from boto import UserAgent
from boto.utils import compute_md5, compute_hash
from boto.utils import find_matching_headers
from boto.utils import merge_headers_by_name
class Key(object):
"""
Represents a key (object) in an S3 bucket.
:ivar bucket: The parent :class:`boto.s3.bucket.Bucket`.
:ivar name: The name of this Key object.
:ivar metadata: A dictionary containing user metadata that you
wish to store with the object or that has been retrieved from
an existing object.
:ivar cache_control: The value of the `Cache-Control` HTTP header.
:ivar content_type: The value of the `Content-Type` HTTP header.
:ivar content_encoding: The value of the `Content-Encoding` HTTP header.
:ivar content_disposition: The value of the `Content-Disposition` HTTP
header.
:ivar content_language: The value of the `Content-Language` HTTP header.
:ivar etag: The `etag` associated with this object.
:ivar last_modified: The string timestamp representing the last
time this object was modified in S3.
:ivar owner: The ID of the owner of this object.
:ivar storage_class: The storage class of the object. Currently, one of:
STANDARD | REDUCED_REDUNDANCY | GLACIER
:ivar md5: The MD5 hash of the contents of the object.
:ivar size: The size, in bytes, of the object.
:ivar version_id: The version ID of this object, if it is a versioned
object.
:ivar encrypted: Whether the object is encrypted while at rest on
the server.
"""
DefaultContentType = 'application/octet-stream'
RestoreBody = """<?xml version="1.0" encoding="UTF-8"?>
<RestoreRequest xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Days>%s</Days>
</RestoreRequest>"""
BufferSize = boto.config.getint('Boto', 'key_buffer_size', 8192)
# The object metadata fields a user can set, other than custom metadata
# fields (i.e., those beginning with a provider-specific prefix like
# x-amz-meta).
base_user_settable_fields = set(["cache-control", "content-disposition",
"content-encoding", "content-language",
"content-md5", "content-type",
"x-robots-tag", "expires"])
_underscore_base_user_settable_fields = set()
for f in base_user_settable_fields:
_underscore_base_user_settable_fields.add(f.replace('-', '_'))
# Metadata fields, whether user-settable or not, other than custom
# metadata fields (i.e., those beginning with a provider specific prefix
# like x-amz-meta).
base_fields = (base_user_settable_fields |
set(["last-modified", "content-length", "date", "etag"]))
def __init__(self, bucket=None, name=None):
self.bucket = bucket
self.name = name
self.metadata = {}
self.cache_control = None
self.content_type = self.DefaultContentType
self.content_encoding = None
self.content_disposition = None
self.content_language = None
self.filename = None
self.etag = None
self.is_latest = False
self.last_modified = None
self.owner = None
self._storage_class = None
self.path = None
self.resp = None
self.mode = None
self.size = None
self.version_id = None
self.source_version_id = None
self.delete_marker = False
self.encrypted = None
# If the object is being restored, this attribute will be set to True.
# If the object is restored, it will be set to False. Otherwise this
# value will be None. If the restore is completed (ongoing_restore =
# False), the expiry_date will be populated with the expiry date of the
# restored object.
self.ongoing_restore = None
self.expiry_date = None
self.local_hashes = {}
def __repr__(self):
if self.bucket:
name = u'<Key: %s,%s>' % (self.bucket.name, self.name)
else:
name = u'<Key: None,%s>' % self.name
# Encode to bytes for Python 2 to prevent display decoding issues
if not isinstance(name, str):
name = name.encode('utf-8')
return name
def __iter__(self):
return self
@property
def provider(self):
provider = None
if self.bucket and self.bucket.connection:
provider = self.bucket.connection.provider
return provider
def _get_key(self):
return self.name
def _set_key(self, value):
self.name = value
key = property(_get_key, _set_key);
def _get_md5(self):
if 'md5' in self.local_hashes and self.local_hashes['md5']:
return binascii.b2a_hex(self.local_hashes['md5'])
def _set_md5(self, value):
if value:
self.local_hashes['md5'] = binascii.a2b_hex(value)
elif 'md5' in self.local_hashes:
self.local_hashes.pop('md5', None)
md5 = property(_get_md5, _set_md5);
def _get_base64md5(self):
if 'md5' in self.local_hashes and self.local_hashes['md5']:
md5 = self.local_hashes['md5']
if not isinstance(md5, bytes):
md5 = md5.encode('utf-8')
return binascii.b2a_base64(md5).decode('utf-8').rstrip('\n')
def _set_base64md5(self, value):
if value:
if not isinstance(value, six.string_types):
value = value.decode('utf-8')
self.local_hashes['md5'] = binascii.a2b_base64(value)
elif 'md5' in self.local_hashes:
del self.local_hashes['md5']
base64md5 = property(_get_base64md5, _set_base64md5);
def _get_storage_class(self):
if self._storage_class is None and self.bucket:
# Attempt to fetch storage class
list_items = list(self.bucket.list(self.name.encode('utf-8')))
if len(list_items) and getattr(list_items[0], '_storage_class',
None):
self._storage_class = list_items[0]._storage_class
else:
# Key is not yet saved? Just use default...
self._storage_class = 'STANDARD'
return self._storage_class
def _set_storage_class(self, value):
self._storage_class = value
storage_class = property(_get_storage_class, _set_storage_class)
def get_md5_from_hexdigest(self, md5_hexdigest):
"""
A utility function to create the 2-tuple (md5hexdigest, base64md5)
from just having a precalculated md5_hexdigest.
"""
digest = binascii.unhexlify(md5_hexdigest)
base64md5 = encodebytes(digest)
if base64md5[-1] == '\n':
base64md5 = base64md5[0:-1]
return (md5_hexdigest, base64md5)
def handle_encryption_headers(self, resp):
provider = self.bucket.connection.provider
if provider.server_side_encryption_header:
self.encrypted = resp.getheader(
provider.server_side_encryption_header, None)
else:
self.encrypted = None
def handle_version_headers(self, resp, force=False):
provider = self.bucket.connection.provider
# If the Key object already has a version_id attribute value, it
# means that it represents an explicit version and the user is
# doing a get_contents_*(version_id=<foo>) to retrieve another
# version of the Key. In that case, we don't really want to
# overwrite the version_id in this Key object. Comprende?
if self.version_id is None or force:
self.version_id = resp.getheader(provider.version_id, None)
self.source_version_id = resp.getheader(provider.copy_source_version_id,
None)
if resp.getheader(provider.delete_marker, 'false') == 'true':
self.delete_marker = True
else:
self.delete_marker = False
def handle_restore_headers(self, response):
provider = self.bucket.connection.provider
header = response.getheader(provider.restore_header)
if header is None:
return
parts = header.split(',', 1)
for part in parts:
key, val = [i.strip() for i in part.split('=')]
val = val.replace('"', '')
if key == 'ongoing-request':
self.ongoing_restore = True if val.lower() == 'true' else False
elif key == 'expiry-date':
self.expiry_date = val
def handle_addl_headers(self, headers):
"""
Used by Key subclasses to do additional, provider-specific
processing of response headers. No-op for this base class.
"""
pass
def open_read(self, headers=None, query_args='',
override_num_retries=None, response_headers=None):
"""
Open this key for reading
:type headers: dict
:param headers: Headers to pass in the web request
:type query_args: string
:param query_args: Arguments to pass in the query string
(ie, 'torrent')
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
"""
if self.resp is None:
self.mode = 'r'
provider = self.bucket.connection.provider
self.resp = self.bucket.connection.make_request(
'GET', self.bucket.name, self.name, headers,
query_args=query_args,
override_num_retries=override_num_retries)
if self.resp.status < 199 or self.resp.status > 299:
body = self.resp.read()
raise provider.storage_response_error(self.resp.status,
self.resp.reason, body)
response_headers = self.resp.msg
self.metadata = boto.utils.get_aws_metadata(response_headers,
provider)
for name, value in response_headers.items():
# To get correct size for Range GETs, use Content-Range
# header if one was returned. If not, use Content-Length
# header.
if (name.lower() == 'content-length' and
'Content-Range' not in response_headers):
self.size = int(value)
elif name.lower() == 'content-range':
end_range = re.sub('.*/(.*)', '\\1', value)
self.size = int(end_range)
elif name.lower() in Key.base_fields:
self.__dict__[name.lower().replace('-', '_')] = value
self.handle_version_headers(self.resp)
self.handle_encryption_headers(self.resp)
self.handle_restore_headers(self.resp)
self.handle_addl_headers(self.resp.getheaders())
def open_write(self, headers=None, override_num_retries=None):
"""
Open this key for writing.
Not yet implemented
:type headers: dict
:param headers: Headers to pass in the write request
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying PUT.
"""
raise BotoClientError('Not Implemented')
def open(self, mode='r', headers=None, query_args=None,
override_num_retries=None):
if mode == 'r':
self.mode = 'r'
self.open_read(headers=headers, query_args=query_args,
override_num_retries=override_num_retries)
elif mode == 'w':
self.mode = 'w'
self.open_write(headers=headers,
override_num_retries=override_num_retries)
else:
raise BotoClientError('Invalid mode: %s' % mode)
closed = False
def close(self, fast=False):
"""
Close this key.
:type fast: bool
:param fast: True if you want the connection to be closed without first
reading the content. This should only be used in cases where subsequent
calls don't need to return the content from the open HTTP connection.
Note: As explained at
http://docs.python.org/2/library/httplib.html#httplib.HTTPConnection.getresponse,
callers must read the whole response before sending a new request to the
server. Calling Key.close(fast=True) and making a subsequent request to
the server will work because boto will get an httplib exception and
close/reopen the connection.
"""
if self.resp and not fast:
self.resp.read()
self.resp = None
self.mode = None
self.closed = True
def next(self):
"""
By providing a next method, the key object supports use as an iterator.
For example, you can now say:
for bytes in key:
write bytes to a file or whatever
All of the HTTP connection stuff is handled for you.
"""
self.open_read()
data = self.resp.read(self.BufferSize)
if not data:
self.close()
raise StopIteration
return data
# Python 3 iterator support
__next__ = next
def read(self, size=0):
self.open_read()
if size == 0:
data = self.resp.read()
else:
data = self.resp.read(size)
if not data:
self.close()
return data
def change_storage_class(self, new_storage_class, dst_bucket=None,
validate_dst_bucket=True):
"""
Change the storage class of an existing key.
Depending on whether a different destination bucket is supplied
or not, this will either move the item within the bucket, preserving
all metadata and ACL info bucket changing the storage class or it
will copy the item to the provided destination bucket, also
preserving metadata and ACL info.
:type new_storage_class: string
:param new_storage_class: The new storage class for the Key.
Possible values are:
* STANDARD
* REDUCED_REDUNDANCY
:type dst_bucket: string
:param dst_bucket: The name of a destination bucket. If not
provided the current bucket of the key will be used.
:type validate_dst_bucket: bool
:param validate_dst_bucket: If True, will validate the dst_bucket
by using an extra list request.
"""
bucket_name = dst_bucket or self.bucket.name
if new_storage_class == 'STANDARD':
return self.copy(bucket_name, self.name,
reduced_redundancy=False, preserve_acl=True,
validate_dst_bucket=validate_dst_bucket)
elif new_storage_class == 'REDUCED_REDUNDANCY':
return self.copy(bucket_name, self.name,
reduced_redundancy=True, preserve_acl=True,
validate_dst_bucket=validate_dst_bucket)
else:
raise BotoClientError('Invalid storage class: %s' %
new_storage_class)
def copy(self, dst_bucket, dst_key, metadata=None,
reduced_redundancy=False, preserve_acl=False,
encrypt_key=False, validate_dst_bucket=True):
"""
Copy this Key to another bucket.
:type dst_bucket: string
:param dst_bucket: The name of the destination bucket
:type dst_key: string
:param dst_key: The name of the destination key
:type metadata: dict
:param metadata: Metadata to be associated with new key. If
metadata is supplied, it will replace the metadata of the
source key being copied. If no metadata is supplied, the
source key's metadata will be copied to the new key.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will force the
storage class of the new Key to be REDUCED_REDUNDANCY
regardless of the storage class of the key being copied.
The Reduced Redundancy Storage (RRS) feature of S3,
provides lower redundancy at lower storage cost.
:type preserve_acl: bool
:param preserve_acl: If True, the ACL from the source key will
be copied to the destination key. If False, the
destination key will have the default ACL. Note that
preserving the ACL in the new key object will require two
additional API calls to S3, one to retrieve the current
ACL and one to set that ACL on the new object. If you
don't care about the ACL, a value of False will be
significantly more efficient.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
:type validate_dst_bucket: bool
:param validate_dst_bucket: If True, will validate the dst_bucket
by using an extra list request.
:rtype: :class:`boto.s3.key.Key` or subclass
:returns: An instance of the newly created key object
"""
dst_bucket = self.bucket.connection.lookup(dst_bucket,
validate_dst_bucket)
if reduced_redundancy:
storage_class = 'REDUCED_REDUNDANCY'
else:
storage_class = self.storage_class
return dst_bucket.copy_key(dst_key, self.bucket.name,
self.name, metadata,
storage_class=storage_class,
preserve_acl=preserve_acl,
encrypt_key=encrypt_key,
src_version_id=self.version_id)
def startElement(self, name, attrs, connection):
if name == 'Owner':
self.owner = User(self)
return self.owner
else:
return None
def endElement(self, name, value, connection):
if name == 'Key':
self.name = value
elif name == 'ETag':
self.etag = value
elif name == 'IsLatest':
if value == 'true':
self.is_latest = True
else:
self.is_latest = False
elif name == 'LastModified':
self.last_modified = value
elif name == 'Size':
self.size = int(value)
elif name == 'StorageClass':
self.storage_class = value
elif name == 'Owner':
pass
elif name == 'VersionId':
self.version_id = value
else:
setattr(self, name, value)
def exists(self, headers=None):
"""
Returns True if the key exists
:rtype: bool
:return: Whether the key exists on S3
"""
return bool(self.bucket.lookup(self.name, headers=headers))
def delete(self, headers=None):
"""
Delete this key from S3
"""
return self.bucket.delete_key(self.name, version_id=self.version_id,
headers=headers)
def get_metadata(self, name):
return self.metadata.get(name)
def set_metadata(self, name, value):
# Ensure that metadata that is vital to signing is in the correct
# case. Applies to ``Content-Type`` & ``Content-MD5``.
if name.lower() == 'content-type':
self.metadata['Content-Type'] = value
elif name.lower() == 'content-md5':
self.metadata['Content-MD5'] = value
else:
self.metadata[name] = value
if name.lower() in Key.base_user_settable_fields:
self.__dict__[name.lower().replace('-', '_')] = value
def update_metadata(self, d):
self.metadata.update(d)
# convenience methods for setting/getting ACL
def set_acl(self, acl_str, headers=None):
if self.bucket is not None:
self.bucket.set_acl(acl_str, self.name, headers=headers)
def get_acl(self, headers=None):
if self.bucket is not None:
return self.bucket.get_acl(self.name, headers=headers)
def get_xml_acl(self, headers=None):
if self.bucket is not None:
return self.bucket.get_xml_acl(self.name, headers=headers)
def set_xml_acl(self, acl_str, headers=None):
if self.bucket is not None:
return self.bucket.set_xml_acl(acl_str, self.name, headers=headers)
def set_canned_acl(self, acl_str, headers=None):
return self.bucket.set_canned_acl(acl_str, self.name, headers)
def get_redirect(self):
"""Return the redirect location configured for this key.
If no redirect is configured (via set_redirect), then None
will be returned.
"""
response = self.bucket.connection.make_request(
'HEAD', self.bucket.name, self.name)
if response.status == 200:
return response.getheader('x-amz-website-redirect-location')
else:
raise self.provider.storage_response_error(
response.status, response.reason, response.read())
def set_redirect(self, redirect_location, headers=None):
"""Configure this key to redirect to another location.
When the bucket associated with this key is accessed from the website
endpoint, a 301 redirect will be issued to the specified
`redirect_location`.
:type redirect_location: string
:param redirect_location: The location to redirect.
"""
if headers is None:
headers = {}
else:
headers = headers.copy()
headers['x-amz-website-redirect-location'] = redirect_location
response = self.bucket.connection.make_request('PUT', self.bucket.name,
self.name, headers)
if response.status == 200:
return True
else:
raise self.provider.storage_response_error(
response.status, response.reason, response.read())
def make_public(self, headers=None):
return self.bucket.set_canned_acl('public-read', self.name, headers)
def generate_url(self, expires_in, method='GET', headers=None,
query_auth=True, force_http=False, response_headers=None,
expires_in_absolute=False, version_id=None,
policy=None, reduced_redundancy=False, encrypt_key=False):
"""
Generate a URL to access this key.
:type expires_in: int
:param expires_in: How long the url is valid for, in seconds.
:type method: string
:param method: The method to use for retrieving the file
(default is GET).
:type headers: dict
:param headers: Any headers to pass along in the request.
:type query_auth: bool
:param query_auth: If True, signs the request in the URL.
:type force_http: bool
:param force_http: If True, http will be used instead of https.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type expires_in_absolute: bool
:param expires_in_absolute:
:type version_id: string
:param version_id: The version_id of the object to GET. If specified
this overrides any value in the key.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
:rtype: string
:return: The URL to access the key
"""
provider = self.bucket.connection.provider
version_id = version_id or self.version_id
if headers is None:
headers = {}
else:
headers = headers.copy()
# add headers accordingly (usually PUT case)
if policy:
headers[provider.acl_header] = policy
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
if encrypt_key:
headers[provider.server_side_encryption_header] = 'AES256'
headers = boto.utils.merge_meta(headers, self.metadata, provider)
return self.bucket.connection.generate_url(expires_in, method,
self.bucket.name, self.name,
headers, query_auth,
force_http,
response_headers,
expires_in_absolute,
version_id)
def send_file(self, fp, headers=None, cb=None, num_cb=10,
query_args=None, chunked_transfer=False, size=None):
"""
Upload a file to a key into a bucket on S3.
:type fp: file
:param fp: The file pointer to upload. The file pointer must
point point at the offset from which you wish to upload.
ie. if uploading the full file, it should point at the
start of the file. Normally when a file is opened for
reading, the fp will point at the first byte. See the
bytes parameter below for more info.
:type headers: dict
:param headers: The headers to pass along with the PUT request
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file
transfer. Providing a negative integer will cause your
callback to be called with each buffer read.
:type query_args: string
:param query_args: (optional) Arguments to pass in the query string.
:type chunked_transfer: boolean
:param chunked_transfer: (optional) If true, we use chunked
Transfer-Encoding.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the file
up into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
"""
self._send_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
query_args=query_args,
chunked_transfer=chunked_transfer, size=size)
def _send_file_internal(self, fp, headers=None, cb=None, num_cb=10,
query_args=None, chunked_transfer=False, size=None,
hash_algs=None):
provider = self.bucket.connection.provider
try:
spos = fp.tell()
except IOError:
spos = None
self.read_from_stream = False
# If hash_algs is unset and the MD5 hasn't already been computed,
# default to an MD5 hash_alg to hash the data on-the-fly.
if hash_algs is None and not self.md5:
hash_algs = {'md5': md5}
digesters = dict((alg, hash_algs[alg]()) for alg in hash_algs or {})
def sender(http_conn, method, path, data, headers):
# This function is called repeatedly for temporary retries
# so we must be sure the file pointer is pointing at the
# start of the data.
if spos is not None and spos != fp.tell():
fp.seek(spos)
elif spos is None and self.read_from_stream:
# if seek is not supported, and we've read from this
# stream already, then we need to abort retries to
# avoid setting bad data.
raise provider.storage_data_error(
'Cannot retry failed request. fp does not support seeking.')
# If the caller explicitly specified host header, tell putrequest
# not to add a second host header. Similarly for accept-encoding.
skips = {}
if boto.utils.find_matching_headers('host', headers):
skips['skip_host'] = 1
if boto.utils.find_matching_headers('accept-encoding', headers):
skips['skip_accept_encoding'] = 1
http_conn.putrequest(method, path, **skips)
for key in headers:
http_conn.putheader(key, headers[key])
http_conn.endheaders()
save_debug = self.bucket.connection.debug
self.bucket.connection.debug = 0
# If the debuglevel < 4 we don't want to show connection
# payload, so turn off HTTP connection-level debug output (to
# be restored below).
# Use the getattr approach to allow this to work in AppEngine.
if getattr(http_conn, 'debuglevel', 0) < 4:
http_conn.set_debuglevel(0)
data_len = 0
if cb:
if size:
cb_size = size
elif self.size:
cb_size = self.size
else:
cb_size = 0
if chunked_transfer and cb_size == 0:
# For chunked Transfer, we call the cb for every 1MB
# of data transferred, except when we know size.
cb_count = (1024 * 1024) / self.BufferSize
elif num_cb > 1:
cb_count = int(
math.ceil(cb_size / self.BufferSize / (num_cb - 1.0)))
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
i = 0
cb(data_len, cb_size)
bytes_togo = size
if bytes_togo and bytes_togo < self.BufferSize:
chunk = fp.read(bytes_togo)
else:
chunk = fp.read(self.BufferSize)
if not isinstance(chunk, bytes):
chunk = chunk.encode('utf-8')
if spos is None:
# read at least something from a non-seekable fp.
self.read_from_stream = True
while chunk:
chunk_len = len(chunk)
data_len += chunk_len
if chunked_transfer:
http_conn.send('%x;\r\n' % chunk_len)
http_conn.send(chunk)
http_conn.send('\r\n')
else:
http_conn.send(chunk)
for alg in digesters:
digesters[alg].update(chunk)
if bytes_togo:
bytes_togo -= chunk_len
if bytes_togo <= 0:
break
if cb:
i += 1
if i == cb_count or cb_count == -1:
cb(data_len, cb_size)
i = 0
if bytes_togo and bytes_togo < self.BufferSize:
chunk = fp.read(bytes_togo)
else:
chunk = fp.read(self.BufferSize)
if not isinstance(chunk, bytes):
chunk = chunk.encode('utf-8')
self.size = data_len
for alg in digesters:
self.local_hashes[alg] = digesters[alg].digest()
if chunked_transfer:
http_conn.send('0\r\n')
# http_conn.send("Content-MD5: %s\r\n" % self.base64md5)
http_conn.send('\r\n')
if cb and (cb_count <= 1 or i > 0) and data_len > 0:
cb(data_len, cb_size)
http_conn.set_debuglevel(save_debug)
self.bucket.connection.debug = save_debug
response = http_conn.getresponse()
body = response.read()
if not self.should_retry(response, chunked_transfer):
raise provider.storage_response_error(
response.status, response.reason, body)
return response
if not headers:
headers = {}
else:
headers = headers.copy()
# Overwrite user-supplied user-agent.
for header in find_matching_headers('User-Agent', headers):
del headers[header]
headers['User-Agent'] = UserAgent
# If storage_class is None, then a user has not explicitly requested
# a storage class, so we can assume STANDARD here
if self._storage_class not in [None, 'STANDARD']:
headers[provider.storage_class_header] = self.storage_class
if find_matching_headers('Content-Encoding', headers):
self.content_encoding = merge_headers_by_name(
'Content-Encoding', headers)
if find_matching_headers('Content-Language', headers):
self.content_language = merge_headers_by_name(
'Content-Language', headers)
content_type_headers = find_matching_headers('Content-Type', headers)
if content_type_headers:
# Some use cases need to suppress sending of the Content-Type
# header and depend on the receiving server to set the content
# type. This can be achieved by setting headers['Content-Type']
# to None when calling this method.
if (len(content_type_headers) == 1 and
headers[content_type_headers[0]] is None):
# Delete null Content-Type value to skip sending that header.
del headers[content_type_headers[0]]
else:
self.content_type = merge_headers_by_name(
'Content-Type', headers)
elif self.path:
self.content_type = mimetypes.guess_type(self.path)[0]
if self.content_type is None:
self.content_type = self.DefaultContentType
headers['Content-Type'] = self.content_type
else:
headers['Content-Type'] = self.content_type
if self.base64md5:
headers['Content-MD5'] = self.base64md5
if chunked_transfer:
headers['Transfer-Encoding'] = 'chunked'
#if not self.base64md5:
# headers['Trailer'] = "Content-MD5"
else:
headers['Content-Length'] = str(self.size)
# This is terrible. We need a SHA256 of the body for SigV4, but to do
# the chunked ``sender`` behavior above, the ``fp`` isn't available to
# the auth mechanism (because closures). Detect if it's SigV4 & embelish
# while we can before the auth calculations occur.
if 'hmac-v4-s3' in self.bucket.connection._required_auth_capability():
kwargs = {'fp': fp, 'hash_algorithm': hashlib.sha256}
if size is not None:
kwargs['size'] = size
headers['_sha256'] = compute_hash(**kwargs)[0]
headers['Expect'] = '100-Continue'
headers = boto.utils.merge_meta(headers, self.metadata, provider)
resp = self.bucket.connection.make_request(
'PUT',
self.bucket.name,
self.name,
headers,
sender=sender,
query_args=query_args
)
self.handle_version_headers(resp, force=True)
self.handle_addl_headers(resp.getheaders())
def should_retry(self, response, chunked_transfer=False):
provider = self.bucket.connection.provider
if not chunked_transfer:
if response.status in [500, 503]:
# 500 & 503 can be plain retries.
return True
if response.getheader('location'):
# If there's a redirect, plain retry.
return True
if 200 <= response.status <= 299:
self.etag = response.getheader('etag')
md5 = self.md5
if isinstance(md5, bytes):
md5 = md5.decode('utf-8')
# If you use customer-provided encryption keys, the ETag value that
# Amazon S3 returns in the response will not be the MD5 of the
# object.
server_side_encryption_customer_algorithm = response.getheader(
'x-amz-server-side-encryption-customer-algorithm', None)
if server_side_encryption_customer_algorithm is None:
if self.etag != '"%s"' % md5:
raise provider.storage_data_error(
'ETag from S3 did not match computed MD5. '
'%s vs. %s' % (self.etag, self.md5))
return True
if response.status == 400:
# The 400 must be trapped so the retry handler can check to
# see if it was a timeout.
# If ``RequestTimeout`` is present, we'll retry. Otherwise, bomb
# out.
body = response.read()
err = provider.storage_response_error(
response.status,
response.reason,
body
)
if err.error_code in ['RequestTimeout']:
raise PleaseRetryException(
"Saw %s, retrying" % err.error_code,
response=response
)
return False
def compute_md5(self, fp, size=None):
"""
:type fp: file
:param fp: File pointer to the file to MD5 hash. The file
pointer will be reset to the same position before the
method returns.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where the file is being split
in place into different parts. Less bytes may be available.
"""
hex_digest, b64_digest, data_size = compute_md5(fp, size=size)
# Returned values are MD5 hash, base64 encoded MD5 hash, and data size.
# The internal implementation of compute_md5() needs to return the
# data size but we don't want to return that value to the external
# caller because it changes the class interface (i.e. it might
# break some code) so we consume the third tuple value here and
# return the remainder of the tuple to the caller, thereby preserving
# the existing interface.
self.size = data_size
return (hex_digest, b64_digest)
def set_contents_from_stream(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None,
reduced_redundancy=False, query_args=None,
size=None):
"""
Store an object using the name of the Key object as the key in
cloud and the contents of the data stream pointed to by 'fp' as
the contents.
The stream object is not seekable and total size is not known.
This has the implication that we can't specify the
Content-Size and Content-MD5 in the header. So for huge
uploads, the delay in calculating MD5 is avoided but with a
penalty of inability to verify the integrity of the uploaded
data.
:type fp: file
:param fp: the file whose contents are to be uploaded
:type headers: dict
:param headers: additional HTTP headers to be sent with the
PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will first check
to see if an object exists in the bucket with the same key. If it
does, it won't overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted to GS and the second representing the
total number of bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter, this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type size: int
:param size: (optional) The Maximum number of bytes to read from
the file pointer (fp). This is useful when uploading a
file in multiple parts where you are splitting the file up
into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
"""
provider = self.bucket.connection.provider
if not provider.supports_chunked_transfer():
raise BotoClientError('%s does not support chunked transfer'
% provider.get_provider_name())
# Name of the Object should be specified explicitly for Streams.
if not self.name or self.name == '':
raise BotoClientError('Cannot determine the destination '
'object name for the given stream')
if headers is None:
headers = {}
if policy:
headers[provider.acl_header] = policy
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
if self.bucket is not None:
if not replace:
if self.bucket.lookup(self.name):
return
self.send_file(fp, headers, cb, num_cb, query_args,
chunked_transfer=True, size=size)
def set_contents_from_file(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False, query_args=None,
encrypt_key=False, size=None, rewind=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the contents of the file pointed to by 'fp' as the
contents. The data is read from 'fp' from its current position until
'size' bytes have been read or EOF.
:type fp: file
:param fp: the file whose contents to upload
:type headers: dict
:param headers: Additional HTTP headers that will be sent with
the PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will
first check to see if an object exists in the bucket with
the same key. If it does, it won't overwrite it. The
default value is True which will overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the second
element. This is the same format returned by the
compute_md5 method.
:param md5: If you need to compute the MD5 for any reason
prior to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values of the
file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the file
up into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
:type rewind: bool
:param rewind: (optional) If True, the file pointer (fp) will
be rewound to the start before any bytes are read from
it. The default behaviour is False which reads from the
current position of the file pointer (fp).
:rtype: int
:return: The number of bytes written to the key.
"""
provider = self.bucket.connection.provider
headers = headers or {}
if policy:
headers[provider.acl_header] = policy
if encrypt_key:
headers[provider.server_side_encryption_header] = 'AES256'
if rewind:
# caller requests reading from beginning of fp.
fp.seek(0, os.SEEK_SET)
else:
# The following seek/tell/seek logic is intended
# to detect applications using the older interface to
# set_contents_from_file(), which automatically rewound the
# file each time the Key was reused. This changed with commit
# 14ee2d03f4665fe20d19a85286f78d39d924237e, to support uploads
# split into multiple parts and uploaded in parallel, and at
# the time of that commit this check was added because otherwise
# older programs would get a success status and upload an empty
# object. Unfortuantely, it's very inefficient for fp's implemented
# by KeyFile (used, for example, by gsutil when copying between
# providers). So, we skip the check for the KeyFile case.
# TODO: At some point consider removing this seek/tell/seek
# logic, after enough time has passed that it's unlikely any
# programs remain that assume the older auto-rewind interface.
if not isinstance(fp, KeyFile):
spos = fp.tell()
fp.seek(0, os.SEEK_END)
if fp.tell() == spos:
fp.seek(0, os.SEEK_SET)
if fp.tell() != spos:
# Raise an exception as this is likely a programming
# error whereby there is data before the fp but nothing
# after it.
fp.seek(spos)
raise AttributeError('fp is at EOF. Use rewind option '
'or seek() to data start.')
# seek back to the correct position.
fp.seek(spos)
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
# TODO - What if provider doesn't support reduced reduncancy?
# What if different providers provide different classes?
if hasattr(fp, 'name'):
self.path = fp.name
if self.bucket is not None:
if not md5 and provider.supports_chunked_transfer():
# defer md5 calculation to on the fly and
# we don't know anything about size yet.
chunked_transfer = True
self.size = None
else:
chunked_transfer = False
if isinstance(fp, KeyFile):
# Avoid EOF seek for KeyFile case as it's very inefficient.
key = fp.getkey()
size = key.size - fp.tell()
self.size = size
# At present both GCS and S3 use MD5 for the etag for
# non-multipart-uploaded objects. If the etag is 32 hex
# chars use it as an MD5, to avoid having to read the file
# twice while transferring.
if (re.match('^"[a-fA-F0-9]{32}"$', key.etag)):
etag = key.etag.strip('"')
md5 = (etag, base64.b64encode(binascii.unhexlify(etag)))
if not md5:
# compute_md5() and also set self.size to actual
# size of the bytes read computing the md5.
md5 = self.compute_md5(fp, size)
# adjust size if required
size = self.size
elif size:
self.size = size
else:
# If md5 is provided, still need to size so
# calculate based on bytes to end of content
spos = fp.tell()
fp.seek(0, os.SEEK_END)
self.size = fp.tell() - spos
fp.seek(spos)
size = self.size
self.md5 = md5[0]
self.base64md5 = md5[1]
if self.name is None:
self.name = self.md5
if not replace:
if self.bucket.lookup(self.name):
return
self.send_file(fp, headers=headers, cb=cb, num_cb=num_cb,
query_args=query_args,
chunked_transfer=chunked_transfer, size=size)
# return number of bytes written.
return self.size
def set_contents_from_filename(self, filename, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False,
encrypt_key=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the contents of the file named by 'filename'.
See set_contents_from_file method for details about the
parameters.
:type filename: string
:param filename: The name of the file that you want to put onto S3
:type headers: dict
:param headers: Additional headers to pass along with the
request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file
if it already exists.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the second
element. This is the same format returned by the
compute_md5 method.
:param md5: If you need to compute the MD5 for any reason
prior to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values of the
file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost. :type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object
will be encrypted on the server-side by S3 and will be
stored in an encrypted form while at rest in S3.
:rtype: int
:return: The number of bytes written to the key.
"""
with open(filename, 'rb') as fp:
return self.set_contents_from_file(fp, headers, replace, cb,
num_cb, policy, md5,
reduced_redundancy,
encrypt_key=encrypt_key)
def set_contents_from_string(self, string_data, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False,
encrypt_key=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the string 's' as the contents.
See set_contents_from_file method for details about the
parameters.
:type headers: dict
:param headers: Additional headers to pass along with the
request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file if
it already exists.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the second
element. This is the same format returned by the
compute_md5 method.
:param md5: If you need to compute the MD5 for any reason
prior to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values of the
file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
"""
if not isinstance(string_data, bytes):
string_data = string_data.encode("utf-8")
fp = BytesIO(string_data)
r = self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5, reduced_redundancy,
encrypt_key=encrypt_key)
fp.close()
return r
def get_file(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
response_headers=None):
"""
Retrieves a file from an S3 Key
:type fp: file
:param fp: File pointer to put the data into
:type headers: string
:param: headers to send when retrieving the files
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: Flag for whether to get a torrent for the file
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type version_id: str
:param version_id: The ID of a particular version of the object.
If this parameter is not supplied but the Key object has
a ``version_id`` attribute, that value will be used when
retrieving the object. You can set the Key object's
``version_id`` attribute to None to always grab the latest
version from a version-enabled bucket.
"""
self._get_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
torrent=torrent, version_id=version_id,
override_num_retries=override_num_retries,
response_headers=response_headers,
hash_algs=None,
query_args=None)
def _get_file_internal(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
response_headers=None, hash_algs=None, query_args=None):
if headers is None:
headers = {}
save_debug = self.bucket.connection.debug
if self.bucket.connection.debug == 1:
self.bucket.connection.debug = 0
query_args = query_args or []
if torrent:
query_args.append('torrent')
if hash_algs is None and not torrent:
hash_algs = {'md5': md5}
digesters = dict((alg, hash_algs[alg]()) for alg in hash_algs or {})
# If a version_id is passed in, use that. If not, check to see
# if the Key object has an explicit version_id and, if so, use that.
# Otherwise, don't pass a version_id query param.
if version_id is None:
version_id = self.version_id
if version_id:
query_args.append('versionId=%s' % version_id)
if response_headers:
for key in response_headers:
query_args.append('%s=%s' % (
key, urllib.parse.quote(response_headers[key])))
query_args = '&'.join(query_args)
self.open('r', headers, query_args=query_args,
override_num_retries=override_num_retries)
data_len = 0
if cb:
if self.size is None:
cb_size = 0
else:
cb_size = self.size
if self.size is None and num_cb != -1:
# If size is not available due to chunked transfer for example,
# we'll call the cb for every 1MB of data transferred.
cb_count = (1024 * 1024) / self.BufferSize
elif num_cb > 1:
cb_count = int(math.ceil(cb_size/self.BufferSize/(num_cb-1.0)))
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
i = 0
cb(data_len, cb_size)
try:
for bytes in self:
fp.write(bytes)
data_len += len(bytes)
for alg in digesters:
digesters[alg].update(bytes)
if cb:
if cb_size > 0 and data_len >= cb_size:
break
i += 1
if i == cb_count or cb_count == -1:
cb(data_len, cb_size)
i = 0
except IOError as e:
if e.errno == errno.ENOSPC:
raise StorageDataError('Out of space for destination file '
'%s' % fp.name)
raise
if cb and (cb_count <= 1 or i > 0) and data_len > 0:
cb(data_len, cb_size)
for alg in digesters:
self.local_hashes[alg] = digesters[alg].digest()
if self.size is None and not torrent and "Range" not in headers:
self.size = data_len
self.close()
self.bucket.connection.debug = save_debug
def get_torrent_file(self, fp, headers=None, cb=None, num_cb=10):
"""
Get a torrent file (see to get_file)
:type fp: file
:param fp: The file pointer of where to put the torrent
:type headers: dict
:param headers: Headers to be passed
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
"""
return self.get_file(fp, headers, cb, num_cb, torrent=True)
def get_contents_to_file(self, fp, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
res_download_handler=None,
response_headers=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Write the contents of the object to the file pointed
to by 'fp'.
:type fp: File -like object
:param fp:
:type headers: dict
:param headers: additional HTTP headers that will be sent with
the GET request.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent
file as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
perform the download.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type version_id: str
:param version_id: The ID of a particular version of the object.
If this parameter is not supplied but the Key object has
a ``version_id`` attribute, that value will be used when
retrieving the object. You can set the Key object's
``version_id`` attribute to None to always grab the latest
version from a version-enabled bucket.
"""
if self.bucket is not None:
if res_download_handler:
res_download_handler.get_file(self, fp, headers, cb, num_cb,
torrent=torrent,
version_id=version_id)
else:
self.get_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
response_headers=response_headers)
def get_contents_to_filename(self, filename, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
res_download_handler=None,
response_headers=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Store contents of the object to a file named by 'filename'.
See get_contents_to_file method for details about the
parameters.
:type filename: string
:param filename: The filename of where to put the file contents
:type headers: dict
:param headers: Any additional headers to send in the request
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent file
as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
perform the download.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type version_id: str
:param version_id: The ID of a particular version of the object.
If this parameter is not supplied but the Key object has
a ``version_id`` attribute, that value will be used when
retrieving the object. You can set the Key object's
``version_id`` attribute to None to always grab the latest
version from a version-enabled bucket.
"""
try:
with open(filename, 'wb') as fp:
self.get_contents_to_file(fp, headers, cb, num_cb,
torrent=torrent,
version_id=version_id,
res_download_handler=res_download_handler,
response_headers=response_headers)
except Exception:
os.remove(filename)
raise
# if last_modified date was sent from s3, try to set file's timestamp
if self.last_modified is not None:
try:
modified_tuple = email.utils.parsedate_tz(self.last_modified)
modified_stamp = int(email.utils.mktime_tz(modified_tuple))
os.utime(fp.name, (modified_stamp, modified_stamp))
except Exception:
pass
def get_contents_as_string(self, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
response_headers=None, encoding=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Return the contents of the object as a string.
See get_contents_to_file method for details about the
parameters.
:type headers: dict
:param headers: Any additional headers to send in the request
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent file
as a string.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type version_id: str
:param version_id: The ID of a particular version of the object.
If this parameter is not supplied but the Key object has
a ``version_id`` attribute, that value will be used when
retrieving the object. You can set the Key object's
``version_id`` attribute to None to always grab the latest
version from a version-enabled bucket.
:type encoding: str
:param encoding: The text encoding to use, such as ``utf-8``
or ``iso-8859-1``. If set, then a string will be returned.
Defaults to ``None`` and returns bytes.
:rtype: bytes or str
:returns: The contents of the file as bytes or a string
"""
fp = BytesIO()
self.get_contents_to_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
response_headers=response_headers)
value = fp.getvalue()
if encoding is not None:
value = value.decode(encoding)
return value
def add_email_grant(self, permission, email_address, headers=None):
"""
Convenience method that provides a quick way to add an email grant
to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL
and then PUT's the new ACL back to S3.
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
:type email_address: string
:param email_address: The email address associated with the AWS
account your are granting the permission to.
:type recursive: boolean
:param recursive: A boolean value to controls whether the
command will apply the grant to all keys within the bucket
or not. The default value is False. By passing a True
value, the call will iterate through all keys in the
bucket and apply the same grant to each key. CAUTION: If
you have a lot of keys, this could take a long time!
"""
policy = self.get_acl(headers=headers)
policy.acl.add_email_grant(permission, email_address)
self.set_acl(policy, headers=headers)
def add_user_grant(self, permission, user_id, headers=None,
display_name=None):
"""
Convenience method that provides a quick way to add a canonical
user grant to a key. This method retrieves the current ACL,
creates a new grant based on the parameters passed in, adds that
grant to the ACL and then PUT's the new ACL back to S3.
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
:type user_id: string
:param user_id: The canonical user id associated with the AWS
account your are granting the permission to.
:type display_name: string
:param display_name: An option string containing the user's
Display Name. Only required on Walrus.
"""
policy = self.get_acl(headers=headers)
policy.acl.add_user_grant(permission, user_id,
display_name=display_name)
self.set_acl(policy, headers=headers)
def _normalize_metadata(self, metadata):
if type(metadata) == set:
norm_metadata = set()
for k in metadata:
norm_metadata.add(k.lower())
else:
norm_metadata = {}
for k in metadata:
norm_metadata[k.lower()] = metadata[k]
return norm_metadata
def _get_remote_metadata(self, headers=None):
"""
Extracts metadata from existing URI into a dict, so we can
overwrite/delete from it to form the new set of metadata to apply to a
key.
"""
metadata = {}
for underscore_name in self._underscore_base_user_settable_fields:
if hasattr(self, underscore_name):
value = getattr(self, underscore_name)
if value:
# Generate HTTP field name corresponding to "_" named field.
field_name = underscore_name.replace('_', '-')
metadata[field_name.lower()] = value
# self.metadata contains custom metadata, which are all user-settable.
prefix = self.provider.metadata_prefix
for underscore_name in self.metadata:
field_name = underscore_name.replace('_', '-')
metadata['%s%s' % (prefix, field_name.lower())] = (
self.metadata[underscore_name])
return metadata
def set_remote_metadata(self, metadata_plus, metadata_minus, preserve_acl,
headers=None):
metadata_plus = self._normalize_metadata(metadata_plus)
metadata_minus = self._normalize_metadata(metadata_minus)
metadata = self._get_remote_metadata()
metadata.update(metadata_plus)
for h in metadata_minus:
if h in metadata:
del metadata[h]
src_bucket = self.bucket
# Boto prepends the meta prefix when adding headers, so strip prefix in
# metadata before sending back in to copy_key() call.
rewritten_metadata = {}
for h in metadata:
if (h.startswith('x-goog-meta-') or h.startswith('x-amz-meta-')):
rewritten_h = (h.replace('x-goog-meta-', '')
.replace('x-amz-meta-', ''))
else:
rewritten_h = h
rewritten_metadata[rewritten_h] = metadata[h]
metadata = rewritten_metadata
src_bucket.copy_key(self.name, self.bucket.name, self.name,
metadata=metadata, preserve_acl=preserve_acl,
headers=headers)
def restore(self, days, headers=None):
"""Restore an object from an archive.
:type days: int
:param days: The lifetime of the restored object (must
be at least 1 day). If the object is already restored
then this parameter can be used to readjust the lifetime
of the restored object. In this case, the days
param is with respect to the initial time of the request.
If the object has not been restored, this param is with
respect to the completion time of the request.
"""
response = self.bucket.connection.make_request(
'POST', self.bucket.name, self.name,
data=self.RestoreBody % days,
headers=headers, query_args='restore')
if response.status not in (200, 202):
provider = self.bucket.connection.provider
raise provider.storage_response_error(response.status,
response.reason,
response.read())
| mit |
DMCsys/smartalkaudio | oss-survey/xmms2-0.8DrO_o/doc/tutorial/python/tut2.py | 1 | 1951 | #!/usr/bin/env python
# XMMS2 - X Music Multiplexer System
# Copyright (C) 2003-2006 XMMS2 Team
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# This file is a part of the XMMS2 client tutorial #2
# Here we will learn to retrieve results from a command
import xmmsclient
import os
import sys
"""
The first part of this program is
commented in tut1.py See that one for
instructions
"""
xmms = xmmsclient.XMMS("tutorial2")
try:
xmms.connect(os.getenv("XMMS_PATH"))
except IOError, detail:
print "Connection failed:", detail
sys.exit(1)
"""
Now we send a command that will return
a result. Let's find out which entry
is currently playing.
Note that this program has be run while
xmms2 is playing something, otherwise
XMMS.playback_current_id will return 0.
"""
result = xmms.playback_current_id()
"""
We are still doing sync operations, wait for the
answer and block.
"""
result.wait()
"""
Also this time we need to check for errors.
Errors can occur on all commands, but not signals
and broadcasts. We will talk about these later.
"""
if result.iserror():
print "playback current id returns error, %s" % result.get_error()
"""
Let's retrieve the value from the XMMSResult object.
You don't have to know what type of value is returned
in response to which command - simply call
XMMSResult.value()
In this case XMMS.playback_current_id will return a UINT
"""
id = result.value()
"""Print the value"""
print "Currently playing id is %d" % id
| gpl-3.0 |
twilio/twilio-python | twilio/rest/preview/understand/assistant/task/sample.py | 2 | 18621 | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class SampleList(ListResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact [email protected]. """
def __init__(self, version, assistant_sid, task_sid):
"""
Initialize the SampleList
:param Version version: Version that contains the resource
:param assistant_sid: The unique ID of the Assistant.
:param task_sid: The unique ID of the Task associated with this Sample.
:returns: twilio.rest.preview.understand.assistant.task.sample.SampleList
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleList
"""
super(SampleList, self).__init__(version)
# Path Solution
self._solution = {'assistant_sid': assistant_sid, 'task_sid': task_sid, }
self._uri = '/Assistants/{assistant_sid}/Tasks/{task_sid}/Samples'.format(**self._solution)
def stream(self, language=values.unset, limit=None, page_size=None):
"""
Streams SampleInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param unicode language: An ISO language-country string of the sample.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.preview.understand.assistant.task.sample.SampleInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(language=language, page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'])
def list(self, language=values.unset, limit=None, page_size=None):
"""
Lists SampleInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param unicode language: An ISO language-country string of the sample.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.preview.understand.assistant.task.sample.SampleInstance]
"""
return list(self.stream(language=language, limit=limit, page_size=page_size, ))
def page(self, language=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
"""
Retrieve a single page of SampleInstance records from the API.
Request is executed immediately
:param unicode language: An ISO language-country string of the sample.
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of SampleInstance
:rtype: twilio.rest.preview.understand.assistant.task.sample.SamplePage
"""
data = values.of({
'Language': language,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(method='GET', uri=self._uri, params=data, )
return SamplePage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of SampleInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of SampleInstance
:rtype: twilio.rest.preview.understand.assistant.task.sample.SamplePage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return SamplePage(self._version, response, self._solution)
def create(self, language, tagged_text, source_channel=values.unset):
"""
Create the SampleInstance
:param unicode language: An ISO language-country string of the sample.
:param unicode tagged_text: The text example of how end-users may express this task. The sample may contain Field tag blocks.
:param unicode source_channel: The communication channel the sample was captured. It can be: voice, sms, chat, alexa, google-assistant, or slack. If not included the value will be null
:returns: The created SampleInstance
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleInstance
"""
data = values.of({'Language': language, 'TaggedText': tagged_text, 'SourceChannel': source_channel, })
payload = self._version.create(method='POST', uri=self._uri, data=data, )
return SampleInstance(
self._version,
payload,
assistant_sid=self._solution['assistant_sid'],
task_sid=self._solution['task_sid'],
)
def get(self, sid):
"""
Constructs a SampleContext
:param sid: A 34 character string that uniquely identifies this resource.
:returns: twilio.rest.preview.understand.assistant.task.sample.SampleContext
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleContext
"""
return SampleContext(
self._version,
assistant_sid=self._solution['assistant_sid'],
task_sid=self._solution['task_sid'],
sid=sid,
)
def __call__(self, sid):
"""
Constructs a SampleContext
:param sid: A 34 character string that uniquely identifies this resource.
:returns: twilio.rest.preview.understand.assistant.task.sample.SampleContext
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleContext
"""
return SampleContext(
self._version,
assistant_sid=self._solution['assistant_sid'],
task_sid=self._solution['task_sid'],
sid=sid,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview.Understand.SampleList>'
class SamplePage(Page):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact [email protected]. """
def __init__(self, version, response, solution):
"""
Initialize the SamplePage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param assistant_sid: The unique ID of the Assistant.
:param task_sid: The unique ID of the Task associated with this Sample.
:returns: twilio.rest.preview.understand.assistant.task.sample.SamplePage
:rtype: twilio.rest.preview.understand.assistant.task.sample.SamplePage
"""
super(SamplePage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of SampleInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.preview.understand.assistant.task.sample.SampleInstance
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleInstance
"""
return SampleInstance(
self._version,
payload,
assistant_sid=self._solution['assistant_sid'],
task_sid=self._solution['task_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview.Understand.SamplePage>'
class SampleContext(InstanceContext):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact [email protected]. """
def __init__(self, version, assistant_sid, task_sid, sid):
"""
Initialize the SampleContext
:param Version version: Version that contains the resource
:param assistant_sid: The unique ID of the Assistant.
:param task_sid: The unique ID of the Task associated with this Sample.
:param sid: A 34 character string that uniquely identifies this resource.
:returns: twilio.rest.preview.understand.assistant.task.sample.SampleContext
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleContext
"""
super(SampleContext, self).__init__(version)
# Path Solution
self._solution = {'assistant_sid': assistant_sid, 'task_sid': task_sid, 'sid': sid, }
self._uri = '/Assistants/{assistant_sid}/Tasks/{task_sid}/Samples/{sid}'.format(**self._solution)
def fetch(self):
"""
Fetch the SampleInstance
:returns: The fetched SampleInstance
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return SampleInstance(
self._version,
payload,
assistant_sid=self._solution['assistant_sid'],
task_sid=self._solution['task_sid'],
sid=self._solution['sid'],
)
def update(self, language=values.unset, tagged_text=values.unset,
source_channel=values.unset):
"""
Update the SampleInstance
:param unicode language: An ISO language-country string of the sample.
:param unicode tagged_text: The text example of how end-users may express this task. The sample may contain Field tag blocks.
:param unicode source_channel: The communication channel the sample was captured. It can be: voice, sms, chat, alexa, google-assistant, or slack. If not included the value will be null
:returns: The updated SampleInstance
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleInstance
"""
data = values.of({'Language': language, 'TaggedText': tagged_text, 'SourceChannel': source_channel, })
payload = self._version.update(method='POST', uri=self._uri, data=data, )
return SampleInstance(
self._version,
payload,
assistant_sid=self._solution['assistant_sid'],
task_sid=self._solution['task_sid'],
sid=self._solution['sid'],
)
def delete(self):
"""
Deletes the SampleInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete(method='DELETE', uri=self._uri, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Preview.Understand.SampleContext {}>'.format(context)
class SampleInstance(InstanceResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact [email protected]. """
def __init__(self, version, payload, assistant_sid, task_sid, sid=None):
"""
Initialize the SampleInstance
:returns: twilio.rest.preview.understand.assistant.task.sample.SampleInstance
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleInstance
"""
super(SampleInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload.get('account_sid'),
'date_created': deserialize.iso8601_datetime(payload.get('date_created')),
'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')),
'task_sid': payload.get('task_sid'),
'language': payload.get('language'),
'assistant_sid': payload.get('assistant_sid'),
'sid': payload.get('sid'),
'tagged_text': payload.get('tagged_text'),
'url': payload.get('url'),
'source_channel': payload.get('source_channel'),
}
# Context
self._context = None
self._solution = {
'assistant_sid': assistant_sid,
'task_sid': task_sid,
'sid': sid or self._properties['sid'],
}
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: SampleContext for this SampleInstance
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleContext
"""
if self._context is None:
self._context = SampleContext(
self._version,
assistant_sid=self._solution['assistant_sid'],
task_sid=self._solution['task_sid'],
sid=self._solution['sid'],
)
return self._context
@property
def account_sid(self):
"""
:returns: The unique ID of the Account that created this Sample.
:rtype: unicode
"""
return self._properties['account_sid']
@property
def date_created(self):
"""
:returns: The date that this resource was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The date that this resource was last updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def task_sid(self):
"""
:returns: The unique ID of the Task associated with this Sample.
:rtype: unicode
"""
return self._properties['task_sid']
@property
def language(self):
"""
:returns: An ISO language-country string of the sample.
:rtype: unicode
"""
return self._properties['language']
@property
def assistant_sid(self):
"""
:returns: The unique ID of the Assistant.
:rtype: unicode
"""
return self._properties['assistant_sid']
@property
def sid(self):
"""
:returns: A 34 character string that uniquely identifies this resource.
:rtype: unicode
"""
return self._properties['sid']
@property
def tagged_text(self):
"""
:returns: The text example of how end-users may express this task. The sample may contain Field tag blocks.
:rtype: unicode
"""
return self._properties['tagged_text']
@property
def url(self):
"""
:returns: The url
:rtype: unicode
"""
return self._properties['url']
@property
def source_channel(self):
"""
:returns: The communication channel the sample was captured. It can be: voice, sms, chat, alexa, google-assistant, or slack. If not included the value will be null
:rtype: unicode
"""
return self._properties['source_channel']
def fetch(self):
"""
Fetch the SampleInstance
:returns: The fetched SampleInstance
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleInstance
"""
return self._proxy.fetch()
def update(self, language=values.unset, tagged_text=values.unset,
source_channel=values.unset):
"""
Update the SampleInstance
:param unicode language: An ISO language-country string of the sample.
:param unicode tagged_text: The text example of how end-users may express this task. The sample may contain Field tag blocks.
:param unicode source_channel: The communication channel the sample was captured. It can be: voice, sms, chat, alexa, google-assistant, or slack. If not included the value will be null
:returns: The updated SampleInstance
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleInstance
"""
return self._proxy.update(language=language, tagged_text=tagged_text, source_channel=source_channel, )
def delete(self):
"""
Deletes the SampleInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Preview.Understand.SampleInstance {}>'.format(context)
| mit |
gaddman/ansible | lib/ansible/modules/network/aci/aci_config_rollback.py | 2 | 9612 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: aci_config_rollback
short_description: Provides rollback and rollback preview functionality (config:ImportP)
description:
- Provides rollback and rollback preview functionality for Cisco ACI fabrics.
- Config Rollbacks are done using snapshots C(aci_snapshot) with the configImportP class.
seealso:
- module: aci_config_snapshot
- name: APIC Management Information Model reference
description: More information about the internal APIC class B(config:ImportP).
link: https://developer.cisco.com/docs/apic-mim-ref/
author:
- Jacob McGill (@jmcgill298)
version_added: '2.4'
options:
compare_export_policy:
description:
- The export policy that the C(compare_snapshot) is associated to.
compare_snapshot:
description:
- The name of the snapshot to compare with C(snapshot).
description:
description:
- The description for the Import Policy.
aliases: [ descr ]
export_policy:
description:
- The export policy that the C(snapshot) is associated to.
required: yes
fail_on_decrypt:
description:
- Determines if the APIC should fail the rollback if unable to decrypt secured data.
- The APIC defaults to C(yes) when unset.
type: bool
import_mode:
description:
- Determines how the import should be handled by the APIC.
- The APIC defaults to C(atomic) when unset.
choices: [ atomic, best-effort ]
import_policy:
description:
- The name of the Import Policy to use for config rollback.
import_type:
description:
- Determines how the current and snapshot configuration should be compared for replacement.
- The APIC defaults to C(replace) when unset.
choices: [ merge, replace ]
snapshot:
description:
- The name of the snapshot to rollback to, or the base snapshot to use for comparison.
- The C(aci_snapshot) module can be used to query the list of available snapshots.
required: yes
state:
description:
- Use C(preview) for previewing the diff between two snapshots.
- Use C(rollback) for reverting the configuration to a previous snapshot.
choices: [ preview, rollback ]
default: rollback
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
---
- name: Create a Snapshot
aci_config_snapshot:
host: apic
username: admin
password: SomeSecretPassword
export_policy: config_backup
state: present
delegate_to: localhost
- name: Query Existing Snapshots
aci_config_snapshot:
host: apic
username: admin
password: SomeSecretPassword
export_policy: config_backup
state: query
delegate_to: localhost
- name: Compare Snapshot Files
aci_config_rollback:
host: apic
username: admin
password: SomeSecretPassword
export_policy: config_backup
snapshot: run-2017-08-28T06-24-01
compare_export_policy: config_backup
compare_snapshot: run-2017-08-27T23-43-56
state: preview
delegate_to: localhost
- name: Rollback Configuration
aci_config_rollback:
host: apic
username: admin
password: SomeSecretPassword
import_policy: rollback_config
export_policy: config_backup
snapshot: run-2017-08-28T06-24-01
state: rollback
delegate_to: localhost
- name: Rollback Configuration
aci_config_rollback:
host: apic
username: admin
password: SomeSecretPassword
import_policy: rollback_config
export_policy: config_backup
snapshot: run-2017-08-28T06-24-01
description: Rollback 8-27 changes
import_mode: atomic
import_type: replace
fail_on_decrypt: yes
state: rollback
delegate_to: localhost
'''
RETURN = r'''
preview:
description: A preview between two snapshots
returned: when state is preview
type: string
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: string
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
filter_string:
description: The filter string used for the request
returned: failure or debug
type: string
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: string
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: string
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: string
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes
from ansible.module_utils.urls import fetch_url
# Optional, only used for rollback preview
try:
import lxml.etree
from xmljson import cobra
XML_TO_JSON = True
except ImportError:
XML_TO_JSON = False
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
compare_export_policy=dict(type='str'),
compare_snapshot=dict(type='str'),
description=dict(type='str', aliases=['descr']),
export_policy=dict(type='str'),
fail_on_decrypt=dict(type='bool'),
import_mode=dict(type='str', choices=['atomic', 'best-effort']),
import_policy=dict(type='str'),
import_type=dict(type='str', choices=['merge', 'replace']),
snapshot=dict(type='str', required=True),
state=dict(type='str', default='rollback', choices=['preview', 'rollback']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=False,
required_if=[
['state', 'preview', ['compare_export_policy', 'compare_snapshot']],
['state', 'rollback', ['import_policy']],
],
)
aci = ACIModule(module)
description = module.params['description']
export_policy = module.params['export_policy']
fail_on_decrypt = aci.boolean(module.params['fail_on_decrypt'])
import_mode = module.params['import_mode']
import_policy = module.params['import_policy']
import_type = module.params['import_type']
snapshot = module.params['snapshot']
state = module.params['state']
if state == 'rollback':
if snapshot.startswith('run-'):
snapshot = snapshot.replace('run-', '', 1)
if not snapshot.endswith('.tar.gz'):
snapshot += '.tar.gz'
filename = 'ce2_{0}-{1}'.format(export_policy, snapshot)
aci.construct_url(
root_class=dict(
aci_class='configImportP',
aci_rn='fabric/configimp-{0}'.format(import_policy),
module_object=import_policy,
target_filter={'name': import_policy},
),
)
aci.get_existing()
aci.payload(
aci_class='configImportP',
class_config=dict(
adminSt='triggered',
descr=description,
failOnDecryptErrors=fail_on_decrypt,
fileName=filename,
importMode=import_mode,
importType=import_type,
name=import_policy,
snapshot='yes',
),
)
aci.get_diff(aci_class='configImportP')
aci.post_config()
elif state == 'preview':
aci.url = '%(protocol)s://%(host)s/mqapi2/snapshots.diff.xml' % module.params
aci.filter_string = (
'?s1dn=uni/backupst/snapshots-[uni/fabric/configexp-%(export_policy)s]/snapshot-%(snapshot)s&'
's2dn=uni/backupst/snapshots-[uni/fabric/configexp-%(compare_export_policy)s]/snapshot-%(compare_snapshot)s'
) % module.params
# Generate rollback comparison
get_preview(aci)
aci.exit_json()
def get_preview(aci):
'''
This function is used to generate a preview between two snapshots and add the parsed results to the aci module return data.
'''
uri = aci.url + aci.filter_string
resp, info = fetch_url(aci.module, uri, headers=aci.headers, method='GET', timeout=aci.module.params['timeout'], use_proxy=aci.module.params['use_proxy'])
aci.method = 'GET'
aci.response = info['msg']
aci.status = info['status']
# Handle APIC response
if info['status'] == 200:
xml_to_json(aci, resp.read())
else:
aci.result['raw'] = resp.read()
aci.fail_json(msg="Request failed: %(code)s %(text)s (see 'raw' output)" % aci.error)
def xml_to_json(aci, response_data):
'''
This function is used to convert preview XML data into JSON.
'''
if XML_TO_JSON:
xml = lxml.etree.fromstring(to_bytes(response_data))
xmldata = cobra.data(xml)
aci.result['preview'] = xmldata
else:
aci.result['preview'] = response_data
if __name__ == "__main__":
main()
| gpl-3.0 |
tempbottle/kbengine | kbe/res/scripts/common/Lib/idlelib/ScrolledList.py | 76 | 4159 | from tkinter import *
class ScrolledList:
default = "(None)"
def __init__(self, master, **options):
# Create top frame, with scrollbar and listbox
self.master = master
self.frame = frame = Frame(master)
self.frame.pack(fill="both", expand=1)
self.vbar = vbar = Scrollbar(frame, name="vbar")
self.vbar.pack(side="right", fill="y")
self.listbox = listbox = Listbox(frame, exportselection=0,
background="white")
if options:
listbox.configure(options)
listbox.pack(expand=1, fill="both")
# Tie listbox and scrollbar together
vbar["command"] = listbox.yview
listbox["yscrollcommand"] = vbar.set
# Bind events to the list box
listbox.bind("<ButtonRelease-1>", self.click_event)
listbox.bind("<Double-ButtonRelease-1>", self.double_click_event)
listbox.bind("<ButtonPress-3>", self.popup_event)
listbox.bind("<Key-Up>", self.up_event)
listbox.bind("<Key-Down>", self.down_event)
# Mark as empty
self.clear()
def close(self):
self.frame.destroy()
def clear(self):
self.listbox.delete(0, "end")
self.empty = 1
self.listbox.insert("end", self.default)
def append(self, item):
if self.empty:
self.listbox.delete(0, "end")
self.empty = 0
self.listbox.insert("end", str(item))
def get(self, index):
return self.listbox.get(index)
def click_event(self, event):
self.listbox.activate("@%d,%d" % (event.x, event.y))
index = self.listbox.index("active")
self.select(index)
self.on_select(index)
return "break"
def double_click_event(self, event):
index = self.listbox.index("active")
self.select(index)
self.on_double(index)
return "break"
menu = None
def popup_event(self, event):
if not self.menu:
self.make_menu()
menu = self.menu
self.listbox.activate("@%d,%d" % (event.x, event.y))
index = self.listbox.index("active")
self.select(index)
menu.tk_popup(event.x_root, event.y_root)
def make_menu(self):
menu = Menu(self.listbox, tearoff=0)
self.menu = menu
self.fill_menu()
def up_event(self, event):
index = self.listbox.index("active")
if self.listbox.selection_includes(index):
index = index - 1
else:
index = self.listbox.size() - 1
if index < 0:
self.listbox.bell()
else:
self.select(index)
self.on_select(index)
return "break"
def down_event(self, event):
index = self.listbox.index("active")
if self.listbox.selection_includes(index):
index = index + 1
else:
index = 0
if index >= self.listbox.size():
self.listbox.bell()
else:
self.select(index)
self.on_select(index)
return "break"
def select(self, index):
self.listbox.focus_set()
self.listbox.activate(index)
self.listbox.selection_clear(0, "end")
self.listbox.selection_set(index)
self.listbox.see(index)
# Methods to override for specific actions
def fill_menu(self):
pass
def on_select(self, index):
pass
def on_double(self, index):
pass
def _scrolled_list(parent):
root = Tk()
root.title("Test ScrolledList")
width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
root.geometry("+%d+%d"%(x, y + 150))
class MyScrolledList(ScrolledList):
def fill_menu(self): self.menu.add_command(label="right click")
def on_select(self, index): print("select", self.get(index))
def on_double(self, index): print("double", self.get(index))
scrolled_list = MyScrolledList(root)
for i in range(30):
scrolled_list.append("Item %02d" % i)
root.mainloop()
if __name__ == '__main__':
from idlelib.idle_test.htest import run
run(_scrolled_list)
| lgpl-3.0 |
cuilishen/cuilishenMissionPlanner | Lib/lib2to3/btm_utils.py | 374 | 10011 | "Utility functions used by the btm_matcher module"
from . import pytree
from .pgen2 import grammar, token
from .pygram import pattern_symbols, python_symbols
syms = pattern_symbols
pysyms = python_symbols
tokens = grammar.opmap
token_labels = token
TYPE_ANY = -1
TYPE_ALTERNATIVES = -2
TYPE_GROUP = -3
class MinNode(object):
"""This class serves as an intermediate representation of the
pattern tree during the conversion to sets of leaf-to-root
subpatterns"""
def __init__(self, type=None, name=None):
self.type = type
self.name = name
self.children = []
self.leaf = False
self.parent = None
self.alternatives = []
self.group = []
def __repr__(self):
return str(self.type) + ' ' + str(self.name)
def leaf_to_root(self):
"""Internal method. Returns a characteristic path of the
pattern tree. This method must be run for all leaves until the
linear subpatterns are merged into a single"""
node = self
subp = []
while node:
if node.type == TYPE_ALTERNATIVES:
node.alternatives.append(subp)
if len(node.alternatives) == len(node.children):
#last alternative
subp = [tuple(node.alternatives)]
node.alternatives = []
node = node.parent
continue
else:
node = node.parent
subp = None
break
if node.type == TYPE_GROUP:
node.group.append(subp)
#probably should check the number of leaves
if len(node.group) == len(node.children):
subp = get_characteristic_subpattern(node.group)
node.group = []
node = node.parent
continue
else:
node = node.parent
subp = None
break
if node.type == token_labels.NAME and node.name:
#in case of type=name, use the name instead
subp.append(node.name)
else:
subp.append(node.type)
node = node.parent
return subp
def get_linear_subpattern(self):
"""Drives the leaf_to_root method. The reason that
leaf_to_root must be run multiple times is because we need to
reject 'group' matches; for example the alternative form
(a | b c) creates a group [b c] that needs to be matched. Since
matching multiple linear patterns overcomes the automaton's
capabilities, leaf_to_root merges each group into a single
choice based on 'characteristic'ity,
i.e. (a|b c) -> (a|b) if b more characteristic than c
Returns: The most 'characteristic'(as defined by
get_characteristic_subpattern) path for the compiled pattern
tree.
"""
for l in self.leaves():
subp = l.leaf_to_root()
if subp:
return subp
def leaves(self):
"Generator that returns the leaves of the tree"
for child in self.children:
for x in child.leaves():
yield x
if not self.children:
yield self
def reduce_tree(node, parent=None):
"""
Internal function. Reduces a compiled pattern tree to an
intermediate representation suitable for feeding the
automaton. This also trims off any optional pattern elements(like
[a], a*).
"""
new_node = None
#switch on the node type
if node.type == syms.Matcher:
#skip
node = node.children[0]
if node.type == syms.Alternatives :
#2 cases
if len(node.children) <= 2:
#just a single 'Alternative', skip this node
new_node = reduce_tree(node.children[0], parent)
else:
#real alternatives
new_node = MinNode(type=TYPE_ALTERNATIVES)
#skip odd children('|' tokens)
for child in node.children:
if node.children.index(child)%2:
continue
reduced = reduce_tree(child, new_node)
if reduced is not None:
new_node.children.append(reduced)
elif node.type == syms.Alternative:
if len(node.children) > 1:
new_node = MinNode(type=TYPE_GROUP)
for child in node.children:
reduced = reduce_tree(child, new_node)
if reduced:
new_node.children.append(reduced)
if not new_node.children:
# delete the group if all of the children were reduced to None
new_node = None
else:
new_node = reduce_tree(node.children[0], parent)
elif node.type == syms.Unit:
if (isinstance(node.children[0], pytree.Leaf) and
node.children[0].value == '('):
#skip parentheses
return reduce_tree(node.children[1], parent)
if ((isinstance(node.children[0], pytree.Leaf) and
node.children[0].value == '[')
or
(len(node.children)>1 and
hasattr(node.children[1], "value") and
node.children[1].value == '[')):
#skip whole unit if its optional
return None
leaf = True
details_node = None
alternatives_node = None
has_repeater = False
repeater_node = None
has_variable_name = False
for child in node.children:
if child.type == syms.Details:
leaf = False
details_node = child
elif child.type == syms.Repeater:
has_repeater = True
repeater_node = child
elif child.type == syms.Alternatives:
alternatives_node = child
if hasattr(child, 'value') and child.value == '=': # variable name
has_variable_name = True
#skip variable name
if has_variable_name:
#skip variable name, '='
name_leaf = node.children[2]
if hasattr(name_leaf, 'value') and name_leaf.value == '(':
# skip parenthesis
name_leaf = node.children[3]
else:
name_leaf = node.children[0]
#set node type
if name_leaf.type == token_labels.NAME:
#(python) non-name or wildcard
if name_leaf.value == 'any':
new_node = MinNode(type=TYPE_ANY)
else:
if hasattr(token_labels, name_leaf.value):
new_node = MinNode(type=getattr(token_labels, name_leaf.value))
else:
new_node = MinNode(type=getattr(pysyms, name_leaf.value))
elif name_leaf.type == token_labels.STRING:
#(python) name or character; remove the apostrophes from
#the string value
name = name_leaf.value.strip("'")
if name in tokens:
new_node = MinNode(type=tokens[name])
else:
new_node = MinNode(type=token_labels.NAME, name=name)
elif name_leaf.type == syms.Alternatives:
new_node = reduce_tree(alternatives_node, parent)
#handle repeaters
if has_repeater:
if repeater_node.children[0].value == '*':
#reduce to None
new_node = None
elif repeater_node.children[0].value == '+':
#reduce to a single occurence i.e. do nothing
pass
else:
#TODO: handle {min, max} repeaters
raise NotImplementedError
pass
#add children
if details_node and new_node is not None:
for child in details_node.children[1:-1]:
#skip '<', '>' markers
reduced = reduce_tree(child, new_node)
if reduced is not None:
new_node.children.append(reduced)
if new_node:
new_node.parent = parent
return new_node
def get_characteristic_subpattern(subpatterns):
"""Picks the most characteristic from a list of linear patterns
Current order used is:
names > common_names > common_chars
"""
if not isinstance(subpatterns, list):
return subpatterns
if len(subpatterns)==1:
return subpatterns[0]
# first pick out the ones containing variable names
subpatterns_with_names = []
subpatterns_with_common_names = []
common_names = ['in', 'for', 'if' , 'not', 'None']
subpatterns_with_common_chars = []
common_chars = "[]().,:"
for subpattern in subpatterns:
if any(rec_test(subpattern, lambda x: type(x) is str)):
if any(rec_test(subpattern,
lambda x: isinstance(x, str) and x in common_chars)):
subpatterns_with_common_chars.append(subpattern)
elif any(rec_test(subpattern,
lambda x: isinstance(x, str) and x in common_names)):
subpatterns_with_common_names.append(subpattern)
else:
subpatterns_with_names.append(subpattern)
if subpatterns_with_names:
subpatterns = subpatterns_with_names
elif subpatterns_with_common_names:
subpatterns = subpatterns_with_common_names
elif subpatterns_with_common_chars:
subpatterns = subpatterns_with_common_chars
# of the remaining subpatterns pick out the longest one
return max(subpatterns, key=len)
def rec_test(sequence, test_func):
"""Tests test_func on all items of sequence and items of included
sub-iterables"""
for x in sequence:
if isinstance(x, (list, tuple)):
for y in rec_test(x, test_func):
yield y
else:
yield test_func(x)
| gpl-3.0 |
jjshoe/ansible-modules-core | network/eos/eos_command.py | 5 | 5315 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = """
---
module: eos_command
version_added: "2.1"
author: "Peter sprygada (@privateip)"
short_description: Run arbitrary command on EOS device
description:
- Sends an aribtrary set of commands to and EOS node and returns the results
read from the device. The M(eos_command) modulule includes an
argument that will cause the module to wait for a specific condition
before returning or timing out if the condition is not met.
extends_documentation_fragment: eos
options:
commands:
description:
- The commands to send to the remote EOS device over the
configured provider. The resulting output from the command
is returned. If the I(waitfor) argument is provided, the
module is not returned until the condition is satisfied or
the number of retires as expired.
required: true
waitfor:
description:
- Specifies what to evaluate from the output of the command
and what conditionals to apply. This argument will cause
the task to wait for a particular conditional to be true
before moving forward. If the conditional is not true
by the configured retries, the task fails. See examples.
required: false
default: null
retries:
description:
- Specifies the number of retries a command should by tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the waitfor
conditionals
required: false
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries
of the command. If the command does not pass the specified
conditional, the interval indicates how to long to wait before
trying the command again.
required: false
default: 1
"""
EXAMPLES = """
- eos_command:
commands: "{{ lookup('file', 'commands.txt') }}"
- eos_command:
commands:
- show interface {{ item }}
with_items: interfaces
- eos_command:
commands:
- show version
waitfor:
- "result[0] contains 4.15.0F"
- eos_command:
commands:
- show version | json
- show interfaces | json
- show version
waitfor:
- "result[2] contains '4.15.0F'"
- "result[1].interfaces.Management1.interfaceAddress[0].primaryIp.maskLen eq 24"
- "result[0].modelName == 'vEOS'"
"""
RETURN = """
stdout:
description: the set of responses from the commands
returned: always
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always
type: list
sample: [['...', '...'], ['...'], ['...']]
failed_conditions:
description: the conditionals that failed
retured: failed
type: list
sample: ['...', '...']
"""
import time
import shlex
import re
import json
INDEX_RE = re.compile(r'(\[\d+\])')
def to_lines(stdout):
for item in stdout:
if isinstance(item, basestring):
item = str(item).split('\n')
yield item
def main():
spec = dict(
commands=dict(type='list'),
waitfor=dict(type='list'),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int')
)
module = get_module(argument_spec=spec,
supports_check_mode=True)
commands = module.params['commands']
retries = module.params['retries']
interval = module.params['interval']
try:
queue = set()
for entry in (module.params['waitfor'] or list()):
queue.add(Conditional(entry))
except AttributeError, exc:
module.fail_json(msg=exc.message)
result = dict(changed=False)
while retries > 0:
response = module.execute(commands)
result['stdout'] = response
for index, cmd in enumerate(commands):
if cmd.endswith('json'):
response[index] = json.loads(response[index])
for item in list(queue):
if item(response):
queue.remove(item)
if not queue:
break
time.sleep(interval)
retries -= 1
else:
failed_conditions = [item.raw for item in queue]
module.fail_json(msg='timeout waiting for value', failed_conditions=failed_conditions)
result['stdout_lines'] = list(to_lines(result['stdout']))
return module.exit_json(**result)
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
from ansible.module_utils.shell import *
from ansible.module_utils.netcfg import *
from ansible.module_utils.eos import *
if __name__ == '__main__':
main()
| gpl-3.0 |
jimi-c/ansible | lib/ansible/modules/monitoring/monit.py | 102 | 7631 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Darryl Stoflet <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: monit
short_description: Manage the state of a program monitored via Monit
description:
- Manage the state of a program monitored via I(Monit)
version_added: "1.2"
options:
name:
description:
- The name of the I(monit) program/process to manage
required: true
state:
description:
- The state of service
required: true
choices: [ "present", "started", "stopped", "restarted", "monitored", "unmonitored", "reloaded" ]
timeout:
description:
- If there are pending actions for the service monitored by monit, then Ansible will check
for up to this many seconds to verify the requested action has been performed.
Ansible will sleep for five seconds between each check.
default: 300
version_added: "2.1"
author: "Darryl Stoflet (@dstoflet)"
'''
EXAMPLES = '''
# Manage the state of program "httpd" to be in "started" state.
- monit:
name: httpd
state: started
'''
import time
import re
from ansible.module_utils.basic import AnsibleModule
def main():
arg_spec = dict(
name=dict(required=True),
timeout=dict(default=300, type='int'),
state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped', 'monitored', 'unmonitored', 'reloaded'])
)
module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
name = module.params['name']
state = module.params['state']
timeout = module.params['timeout']
MONIT = module.get_bin_path('monit', True)
def monit_version():
rc, out, err = module.run_command('%s -V' % MONIT, check_rc=True)
version_line = out.split('\n')[0]
version = re.search(r"[0-9]+\.[0-9]+", version_line).group().split('.')
# Use only major and minor even if there are more these should be enough
return int(version[0]), int(version[1])
def is_version_higher_than_5_18():
return (MONIT_MAJOR_VERSION, MONIT_MINOR_VERSION) > (5, 18)
def parse(parts):
if is_version_higher_than_5_18():
return parse_current(parts)
else:
return parse_older_versions(parts)
def parse_older_versions(parts):
if len(parts) > 2 and parts[0].lower() == 'process' and parts[1] == "'%s'" % name:
return ' '.join(parts[2:]).lower()
else:
return ''
def parse_current(parts):
if len(parts) > 2 and parts[2].lower() == 'process' and parts[0] == name:
return ''.join(parts[1]).lower()
else:
return ''
def get_status():
"""Return the status of the process in monit, or the empty string if not present."""
rc, out, err = module.run_command('%s %s' % (MONIT, SUMMARY_COMMAND), check_rc=True)
for line in out.split('\n'):
# Sample output lines:
# Process 'name' Running
# Process 'name' Running - restart pending
parts = parse(line.split())
if parts != '':
return parts
return ''
def run_command(command):
"""Runs a monit command, and returns the new status."""
module.run_command('%s %s %s' % (MONIT, command, name), check_rc=True)
return get_status()
def wait_for_monit_to_stop_pending():
"""Fails this run if there is no status or it's pending/initializing for timeout"""
timeout_time = time.time() + timeout
sleep_time = 5
running_status = get_status()
while running_status == '' or 'pending' in running_status or 'initializing' in running_status:
if time.time() >= timeout_time:
module.fail_json(
msg='waited too long for "pending", or "initiating" status to go away ({0})'.format(
running_status
),
state=state
)
time.sleep(sleep_time)
running_status = get_status()
MONIT_MAJOR_VERSION, MONIT_MINOR_VERSION = monit_version()
SUMMARY_COMMAND = ('summary', 'summary -B')[is_version_higher_than_5_18()]
if state == 'reloaded':
if module.check_mode:
module.exit_json(changed=True)
rc, out, err = module.run_command('%s reload' % MONIT)
if rc != 0:
module.fail_json(msg='monit reload failed', stdout=out, stderr=err)
wait_for_monit_to_stop_pending()
module.exit_json(changed=True, name=name, state=state)
present = get_status() != ''
if not present and not state == 'present':
module.fail_json(msg='%s process not presently configured with monit' % name, name=name, state=state)
if state == 'present':
if not present:
if module.check_mode:
module.exit_json(changed=True)
status = run_command('reload')
if status == '':
wait_for_monit_to_stop_pending()
module.exit_json(changed=True, name=name, state=state)
module.exit_json(changed=False, name=name, state=state)
wait_for_monit_to_stop_pending()
running = 'running' in get_status()
if running and state in ['started', 'monitored']:
module.exit_json(changed=False, name=name, state=state)
if running and state == 'stopped':
if module.check_mode:
module.exit_json(changed=True)
status = run_command('stop')
if status in ['not monitored'] or 'stop pending' in status:
module.exit_json(changed=True, name=name, state=state)
module.fail_json(msg='%s process not stopped' % name, status=status)
if running and state == 'unmonitored':
if module.check_mode:
module.exit_json(changed=True)
status = run_command('unmonitor')
if status in ['not monitored'] or 'unmonitor pending' in status:
module.exit_json(changed=True, name=name, state=state)
module.fail_json(msg='%s process not unmonitored' % name, status=status)
elif state == 'restarted':
if module.check_mode:
module.exit_json(changed=True)
status = run_command('restart')
if status in ['initializing', 'running'] or 'restart pending' in status:
module.exit_json(changed=True, name=name, state=state)
module.fail_json(msg='%s process not restarted' % name, status=status)
elif not running and state == 'started':
if module.check_mode:
module.exit_json(changed=True)
status = run_command('start')
if status in ['initializing', 'running'] or 'start pending' in status:
module.exit_json(changed=True, name=name, state=state)
module.fail_json(msg='%s process not started' % name, status=status)
elif not running and state == 'monitored':
if module.check_mode:
module.exit_json(changed=True)
status = run_command('monitor')
if status not in ['not monitored']:
module.exit_json(changed=True, name=name, state=state)
module.fail_json(msg='%s process not monitored' % name, status=status)
module.exit_json(changed=False, name=name, state=state)
if __name__ == '__main__':
main()
| gpl-3.0 |
jeanlinux/calibre | src/calibre/gui2/tag_browser/model.py | 11 | 60438 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
from future_builtins import map
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
import traceback, cPickle, copy, os
from PyQt5.Qt import (QAbstractItemModel, QIcon, QFont, Qt,
QMimeData, QModelIndex, pyqtSignal, QObject)
from calibre.constants import config_dir
from calibre.gui2 import gprefs, config, error_dialog, file_icon_provider
from calibre.db.categories import Tag
from calibre.utils.config import tweaks
from calibre.utils.icu import sort_key, lower, strcmp, collation_order
from calibre.library.field_metadata import TagsIcons, category_icon_map
from calibre.gui2.dialogs.confirm_delete import confirm
from calibre.utils.formatter import EvalFormatter
TAG_SEARCH_STATES = {'clear': 0, 'mark_plus': 1, 'mark_plusplus': 2,
'mark_minus': 3, 'mark_minusminus': 4}
DRAG_IMAGE_ROLE = Qt.UserRole + 1000
_bf = None
def bf():
global _bf
if _bf is None:
_bf = QFont()
_bf.setBold(True)
_bf = (_bf)
return _bf
class TagTreeItem(object): # {{{
CATEGORY = 0
TAG = 1
ROOT = 2
def __init__(self, data=None, category_icon=None, icon_map=None,
parent=None, tooltip=None, category_key=None, temporary=False):
self.parent = parent
self.children = []
self.blank = QIcon()
self.id_set = set()
self.is_gst = False
self.boxed = False
self.icon_state_map = list(icon_map)
if self.parent is not None:
self.parent.append(self)
if data is None:
self.type = self.ROOT
else:
self.type = self.TAG if category_icon is None else self.CATEGORY
if self.type == self.CATEGORY:
self.name, self.icon = data, category_icon
self.py_name = data
self.category_key = category_key
self.temporary = temporary
self.tag = Tag(data, category=category_key,
is_editable=category_key not in
['news', 'search', 'identifiers', 'languages'],
is_searchable=category_key not in ['search'])
elif self.type == self.TAG:
self.icon_state_map[0] = data.icon
self.tag = data
self.tooltip = (tooltip + ' ') if tooltip else ''
def break_cycles(self):
del self.parent
del self.children
def __str__(self):
if self.type == self.ROOT:
return 'ROOT'
if self.type == self.CATEGORY:
return 'CATEGORY:'+str(
self.name)+':%d'%len(getattr(self,
'children', []))
return 'TAG: %s'%self.tag.name
def row(self):
if self.parent is not None:
return self.parent.children.index(self)
return 0
def append(self, child):
child.parent = self
self.children.append(child)
def data(self, role):
if role == Qt.UserRole:
return self
if self.type == self.TAG:
return self.tag_data(role)
if self.type == self.CATEGORY:
return self.category_data(role)
return None
def category_data(self, role):
if role == Qt.DisplayRole:
return (self.py_name + ' [%d]'%len(self.child_tags()))
if role == Qt.EditRole:
return (self.py_name)
if role == Qt.DecorationRole:
if self.tag.state:
return self.icon_state_map[self.tag.state]
return self.icon
if role == Qt.FontRole:
return bf()
if role == Qt.ToolTipRole and self.tooltip is not None:
return (self.tooltip)
if role == DRAG_IMAGE_ROLE:
return self.icon
return None
def tag_data(self, role):
tag = self.tag
if tag.use_sort_as_name:
name = tag.sort
tt_author = True
else:
p = self
while p.parent.type != self.ROOT:
p = p.parent
if not tag.is_hierarchical:
name = tag.original_name
else:
name = tag.name
tt_author = False
if role == Qt.DisplayRole:
count = len(self.id_set)
count = count if count > 0 else tag.count
if count == 0:
return ('%s'%(name))
else:
return ('[%d] %s'%(count, name))
if role == Qt.EditRole:
return (tag.original_name)
if role == Qt.DecorationRole:
return self.icon_state_map[tag.state]
if role == Qt.ToolTipRole:
if tt_author:
if tag.tooltip is not None:
return ('(%s) %s'%(tag.name, tag.tooltip))
else:
return (tag.name)
if tag.tooltip:
return (self.tooltip + tag.tooltip)
else:
return (self.tooltip)
if role == DRAG_IMAGE_ROLE:
return self.icon_state_map[0]
return None
def toggle(self, set_to=None):
'''
set_to: None => advance the state, otherwise a value from TAG_SEARCH_STATES
'''
if set_to is None:
while True:
self.tag.state = (self.tag.state + 1)%5
if self.tag.state == TAG_SEARCH_STATES['mark_plus'] or \
self.tag.state == TAG_SEARCH_STATES['mark_minus']:
if self.tag.is_searchable:
break
elif self.tag.state == TAG_SEARCH_STATES['mark_plusplus'] or\
self.tag.state == TAG_SEARCH_STATES['mark_minusminus']:
if self.tag.is_searchable and len(self.children) and \
self.tag.is_hierarchical == '5state':
break
else:
break
else:
self.tag.state = set_to
def all_children(self):
res = []
def recurse(nodes, res):
for t in nodes:
res.append(t)
recurse(t.children, res)
recurse(self.children, res)
return res
def child_tags(self):
res = []
def recurse(nodes, res, depth):
if depth > 100:
return
for t in nodes:
if t.type != TagTreeItem.CATEGORY:
res.append(t)
recurse(t.children, res, depth+1)
recurse(self.children, res, 1)
return res
# }}}
class TagsModel(QAbstractItemModel): # {{{
search_item_renamed = pyqtSignal()
tag_item_renamed = pyqtSignal()
refresh_required = pyqtSignal()
restriction_error = pyqtSignal()
drag_drop_finished = pyqtSignal(object)
user_categories_edited = pyqtSignal(object, object)
def __init__(self, parent):
QAbstractItemModel.__init__(self, parent)
self.node_map = {}
self.category_nodes = []
iconmap = {}
for key in category_icon_map:
iconmap[key] = QIcon(I(category_icon_map[key]))
self.category_icon_map = TagsIcons(iconmap)
self.category_custom_icons = dict()
for k, v in gprefs['tags_browser_category_icons'].iteritems():
icon = QIcon(os.path.join(config_dir, 'tb_icons', v))
if len(icon.availableSizes()) > 0:
self.category_custom_icons[k] = icon
self.categories_with_ratings = ['authors', 'series', 'publisher', 'tags']
self.icon_state_map = [None, QIcon(I('plus.png')), QIcon(I('plusplus.png')),
QIcon(I('minus.png')), QIcon(I('minusminus.png'))]
self.hidden_categories = set()
self.search_restriction = None
self.filter_categories_by = None
self.collapse_model = 'disable'
self.row_map = []
self.root_item = self.create_node(icon_map=self.icon_state_map)
self.db = None
self._build_in_progress = False
self.reread_collapse_model({}, rebuild=False)
@property
def gui_parent(self):
return QObject.parent(self)
def set_custom_category_icon(self, key, path):
d = gprefs['tags_browser_category_icons']
if path:
d[key] = path
self.category_custom_icons[key] = QIcon(os.path.join(config_dir,
'tb_icons', path))
else:
if key in d:
path = os.path.join(config_dir, 'tb_icons', d[key])
try:
os.remove(path)
except:
pass
del d[key]
del self.category_custom_icons[key]
gprefs['tags_browser_category_icons'] = d
def reread_collapse_model(self, state_map, rebuild=True):
if gprefs['tags_browser_collapse_at'] == 0:
self.collapse_model = 'disable'
else:
self.collapse_model = gprefs['tags_browser_partition_method']
if rebuild:
self.rebuild_node_tree(state_map)
def set_database(self, db):
self.beginResetModel()
hidden_cats = db.new_api.pref('tag_browser_hidden_categories', None)
# migrate from config to db prefs
if hidden_cats is None:
hidden_cats = config['tag_browser_hidden_categories']
self.hidden_categories = set()
# strip out any non-existence field keys
for cat in hidden_cats:
if cat in db.field_metadata:
self.hidden_categories.add(cat)
db.new_api.set_pref('tag_browser_hidden_categories', list(self.hidden_categories))
self.db = db
self._run_rebuild()
self.endResetModel()
def rebuild_node_tree(self, state_map={}):
if self._build_in_progress:
print ('Tag Browser build already in progress')
traceback.print_stack()
return
# traceback.print_stack()
# print ()
self._build_in_progress = True
self.beginResetModel()
self._run_rebuild(state_map=state_map)
self.endResetModel()
self._build_in_progress = False
def _run_rebuild(self, state_map={}):
for node in self.node_map.itervalues():
node.break_cycles()
del node # Clear reference to node in the current frame
self.node_map.clear()
self.category_nodes = []
self.root_item = self.create_node(icon_map=self.icon_state_map)
self._rebuild_node_tree(state_map=state_map)
def _rebuild_node_tree(self, state_map):
# Note that _get_category_nodes can indirectly change the
# user_categories dict.
data = self._get_category_nodes(config['sort_tags_by'])
gst = self.db.prefs.get('grouped_search_terms', {})
last_category_node = None
category_node_map = {}
self.category_node_tree = {}
for i, key in enumerate(self.row_map):
if self.hidden_categories:
if key in self.hidden_categories:
continue
found = False
for cat in self.hidden_categories:
if cat.startswith('@') and key.startswith(cat + '.'):
found = True
if found:
continue
is_gst = False
if key.startswith('@') and key[1:] in gst:
tt = _(u'The grouped search term name is "{0}"').format(key)
is_gst = True
elif key == 'news':
tt = ''
else:
cust_desc = ''
fm = self.db.field_metadata[key]
if fm['is_custom']:
cust_desc = fm['display'].get('description', '')
if cust_desc:
cust_desc = '\n' + _('Description:') + ' ' + cust_desc
tt = _(u'The lookup/search name is "{0}"{1}').format(key, cust_desc)
if self.category_custom_icons.get(key, None) is None:
self.category_custom_icons[key] = (
self.category_icon_map['gst'] if is_gst else
self.category_icon_map.get(key, self.category_icon_map['custom:']))
if key.startswith('@'):
path_parts = [p for p in key.split('.')]
path = ''
last_category_node = self.root_item
tree_root = self.category_node_tree
for i,p in enumerate(path_parts):
path += p
if path not in category_node_map:
node = self.create_node(parent=last_category_node,
data=p[1:] if i == 0 else p,
category_icon=self.category_custom_icons[key],
tooltip=tt if path == key else path,
category_key=path,
icon_map=self.icon_state_map)
last_category_node = node
category_node_map[path] = node
self.category_nodes.append(node)
node.can_be_edited = (not is_gst) and (i == (len(path_parts)-1))
node.is_gst = is_gst
if not is_gst:
node.tag.is_hierarchical = '5state'
tree_root[p] = {}
tree_root = tree_root[p]
else:
last_category_node = category_node_map[path]
tree_root = tree_root[p]
path += '.'
else:
node = self.create_node(parent=self.root_item,
data=self.categories[key],
category_icon=self.category_custom_icons[key],
tooltip=tt, category_key=key,
icon_map=self.icon_state_map)
node.is_gst = False
category_node_map[key] = node
last_category_node = node
self.category_nodes.append(node)
self._create_node_tree(data, state_map)
def _create_node_tree(self, data, state_map):
sort_by = config['sort_tags_by']
eval_formatter = EvalFormatter()
if data is None:
print ('_create_node_tree: no data!')
traceback.print_stack()
return
collapse = gprefs['tags_browser_collapse_at']
collapse_model = self.collapse_model
if collapse == 0:
collapse_model = 'disable'
elif collapse_model != 'disable':
if sort_by == 'name':
collapse_template = tweaks['categories_collapsed_name_template']
elif sort_by == 'rating':
collapse_model = 'partition'
collapse_template = tweaks['categories_collapsed_rating_template']
else:
collapse_model = 'partition'
collapse_template = tweaks['categories_collapsed_popularity_template']
def get_name_components(name):
components = [t.strip() for t in name.split('.') if t.strip()]
if len(components) == 0 or '.'.join(components) != name:
components = [name]
return components
def process_one_node(category, collapse_model, state_map): # {{{
collapse_letter = None
category_node = category
key = category_node.category_key
is_gst = category_node.is_gst
if key not in data:
return
if key in gprefs['tag_browser_dont_collapse']:
collapse_model = 'disable'
cat_len = len(data[key])
if cat_len <= 0:
return
category_child_map = {}
fm = self.db.field_metadata[key]
clear_rating = True if key not in self.categories_with_ratings and \
not fm['is_custom'] and \
not fm['kind'] == 'user' \
else False
in_uc = fm['kind'] == 'user' and not is_gst
tt = key if in_uc else None
if collapse_model == 'first letter':
# Build a list of 'equal' first letters by noticing changes
# in ICU's 'ordinal' for the first letter. In this case, the
# first letter can actually be more than one letter long.
cl_list = [None] * len(data[key])
last_ordnum = 0
last_c = ' '
for idx,tag in enumerate(data[key]):
if not tag.sort:
c = ' '
else:
c = icu_upper(tag.sort)
ordnum, ordlen = collation_order(c)
if last_ordnum != ordnum:
last_c = c[0:ordlen]
last_ordnum = ordnum
cl_list[idx] = last_c
top_level_component = 'z' + data[key][0].original_name
last_idx = -collapse
category_is_hierarchical = not (
key in ['authors', 'publisher', 'news', 'formats', 'rating'] or
key not in self.db.prefs.get('categories_using_hierarchy', []) or
config['sort_tags_by'] != 'name')
is_formats = key == 'formats'
if is_formats:
fip = file_icon_provider().icon_from_ext
for idx,tag in enumerate(data[key]):
components = None
if clear_rating:
tag.avg_rating = None
tag.state = state_map.get((tag.name, tag.category), 0)
if collapse_model != 'disable' and cat_len > collapse:
if collapse_model == 'partition':
# Only partition at the top level. This means that we must
# not do a break until the outermost component changes.
if idx >= last_idx + collapse and \
not tag.original_name.startswith(top_level_component+'.'):
if cat_len > idx + collapse:
last = idx + collapse - 1
else:
last = cat_len - 1
if category_is_hierarchical:
ct = copy.copy(data[key][last])
components = get_name_components(ct.original_name)
ct.sort = ct.name = components[0]
d = {'last': ct}
# Do the first node after the last node so that
# the components array contains the right values
# to be used later
ct2 = copy.copy(tag)
components = get_name_components(ct2.original_name)
ct2.sort = ct2.name = components[0]
d['first'] = ct2
else:
d = {'first': tag}
d['last'] = data[key][last]
name = eval_formatter.safe_format(collapse_template,
d, '##TAG_VIEW##', None)
if name.startswith('##TAG_VIEW##'):
# Formatter threw an exception. Don't create subnode
node_parent = sub_cat = category
else:
sub_cat = self.create_node(parent=category, data=name,
tooltip=None, temporary=True,
category_icon=category_node.icon,
category_key=category_node.category_key,
icon_map=self.icon_state_map)
sub_cat.tag.is_searchable = False
sub_cat.is_gst = is_gst
node_parent = sub_cat
last_idx = idx # remember where we last partitioned
else:
node_parent = sub_cat
else: # by 'first letter'
cl = cl_list[idx]
if cl != collapse_letter:
collapse_letter = cl
sub_cat = self.create_node(parent=category,
data=collapse_letter,
category_icon=category_node.icon,
tooltip=None, temporary=True,
category_key=category_node.category_key,
icon_map=self.icon_state_map)
sub_cat.is_gst = is_gst
node_parent = sub_cat
else:
node_parent = category
# category display order is important here. The following works
# only if all the non-user categories are displayed before the
# user categories
if category_is_hierarchical or tag.is_hierarchical:
components = get_name_components(tag.original_name)
else:
components = [tag.original_name]
if (not tag.is_hierarchical) and (in_uc or
(fm['is_custom'] and fm['display'].get('is_names', False)) or
not category_is_hierarchical or len(components) == 1):
if is_formats:
try:
tag.icon = fip(tag.name.replace('ORIGINAL_', ''))
except Exception:
tag.icon = self.category_custom_icons[key]
else:
tag.icon = self.category_custom_icons[key]
n = self.create_node(parent=node_parent, data=tag, tooltip=tt,
icon_map=self.icon_state_map)
if tag.id_set is not None:
n.id_set |= tag.id_set
category_child_map[tag.name, tag.category] = n
else:
for i,comp in enumerate(components):
if i == 0:
child_map = category_child_map
top_level_component = comp
else:
child_map = dict([((t.tag.name, t.tag.category), t)
for t in node_parent.children
if t.type != TagTreeItem.CATEGORY])
if (comp,tag.category) in child_map:
node_parent = child_map[(comp,tag.category)]
node_parent.tag.is_hierarchical = \
'5state' if tag.category != 'search' else '3state'
else:
if i < len(components)-1:
t = copy.copy(tag)
t.original_name = '.'.join(components[:i+1])
t.count = 0
if key != 'search':
# This 'manufactured' intermediate node can
# be searched, but cannot be edited.
t.is_editable = False
else:
t.is_searchable = t.is_editable = False
else:
t = tag
if not in_uc:
t.original_name = t.name
t.is_hierarchical = \
'5state' if t.category != 'search' else '3state'
t.name = comp
t.icon = self.category_custom_icons[key]
node_parent = self.create_node(parent=node_parent, data=t,
tooltip=tt, icon_map=self.icon_state_map)
child_map[(comp,tag.category)] = node_parent
# This id_set must not be None
node_parent.id_set |= tag.id_set
return
# }}}
for category in self.category_nodes:
process_one_node(category, collapse_model,
state_map.get(category.category_key, {}))
def get_category_editor_data(self, category):
for cat in self.root_item.children:
if cat.category_key == category:
return [(t.tag.id, t.tag.original_name, t.tag.count)
for t in cat.child_tags() if t.tag.count > 0]
def is_in_user_category(self, index):
if not index.isValid():
return False
p = self.get_node(index)
while p.type != TagTreeItem.CATEGORY:
p = p.parent
return p.tag.category.startswith('@')
# Drag'n Drop {{{
def mimeTypes(self):
return ["application/calibre+from_library",
'application/calibre+from_tag_browser']
def mimeData(self, indexes):
data = []
for idx in indexes:
if idx.isValid():
# get some useful serializable data
node = self.get_node(idx)
path = self.path_for_index(idx)
if node.type == TagTreeItem.CATEGORY:
d = (node.type, node.py_name, node.category_key)
else:
t = node.tag
p = node
while p.type != TagTreeItem.CATEGORY:
p = p.parent
d = (node.type, p.category_key, p.is_gst, t.original_name,
t.category, path)
data.append(d)
else:
data.append(None)
raw = bytearray(cPickle.dumps(data, -1))
ans = QMimeData()
ans.setData('application/calibre+from_tag_browser', raw)
return ans
def dropMimeData(self, md, action, row, column, parent):
fmts = set([unicode(x) for x in md.formats()])
if not fmts.intersection(set(self.mimeTypes())):
return False
if "application/calibre+from_library" in fmts:
if action != Qt.CopyAction:
return False
return self.do_drop_from_library(md, action, row, column, parent)
elif 'application/calibre+from_tag_browser' in fmts:
return self.do_drop_from_tag_browser(md, action, row, column, parent)
def do_drop_from_tag_browser(self, md, action, row, column, parent):
if not parent.isValid():
return False
dest = self.get_node(parent)
if dest.type != TagTreeItem.CATEGORY:
return False
if not md.hasFormat('application/calibre+from_tag_browser'):
return False
data = str(md.data('application/calibre+from_tag_browser'))
src = cPickle.loads(data)
for s in src:
if s[0] != TagTreeItem.TAG:
return False
return self.move_or_copy_item_to_user_category(src, dest, action)
def move_or_copy_item_to_user_category(self, src, dest, action):
'''
src is a list of tuples representing items to copy. The tuple is
(type, containing category key, category key is global search term,
full name, category key, path to node)
The type must be TagTreeItem.TAG
dest is the TagTreeItem node to receive the items
action is Qt.CopyAction or Qt.MoveAction
'''
def process_source_node(user_cats, src_parent, src_parent_is_gst,
is_uc, dest_key, idx):
'''
Copy/move an item and all its children to the destination
'''
copied = False
src_name = idx.tag.original_name
src_cat = idx.tag.category
# delete the item if the source is a user category and action is move
if is_uc and not src_parent_is_gst and src_parent in user_cats and \
action == Qt.MoveAction:
new_cat = []
for tup in user_cats[src_parent]:
if src_name == tup[0] and src_cat == tup[1]:
continue
new_cat.append(list(tup))
user_cats[src_parent] = new_cat
else:
copied = True
# Now add the item to the destination user category
add_it = True
if not is_uc and src_cat == 'news':
src_cat = 'tags'
for tup in user_cats[dest_key]:
if src_name == tup[0] and src_cat == tup[1]:
add_it = False
if add_it:
user_cats[dest_key].append([src_name, src_cat, 0])
for c in idx.children:
copied = process_source_node(user_cats, src_parent, src_parent_is_gst,
is_uc, dest_key, c)
return copied
user_cats = self.db.prefs.get('user_categories', {})
path = None
for s in src:
src_parent, src_parent_is_gst = s[1:3]
path = s[5]
if src_parent.startswith('@'):
is_uc = True
src_parent = src_parent[1:]
else:
is_uc = False
dest_key = dest.category_key[1:]
if dest_key not in user_cats:
continue
idx = self.index_for_path(path)
if idx.isValid():
process_source_node(user_cats, src_parent, src_parent_is_gst,
is_uc, dest_key,
self.get_node(idx))
self.db.new_api.set_pref('user_categories', user_cats)
self.refresh_required.emit()
return True
def do_drop_from_library(self, md, action, row, column, parent):
idx = parent
if idx.isValid():
node = self.data(idx, Qt.UserRole)
if node.type == TagTreeItem.TAG:
fm = self.db.metadata_for_field(node.tag.category)
if node.tag.category in \
('tags', 'series', 'authors', 'rating', 'publisher', 'languages') or \
(fm['is_custom'] and (
fm['datatype'] in ['text', 'rating', 'series',
'enumeration'] or (
fm['datatype'] == 'composite' and
fm['display'].get('make_category', False)))):
mime = 'application/calibre+from_library'
ids = list(map(int, str(md.data(mime)).split()))
self.handle_drop(node, ids)
return True
elif node.type == TagTreeItem.CATEGORY:
fm_dest = self.db.metadata_for_field(node.category_key)
if fm_dest['kind'] == 'user':
fm_src = self.db.metadata_for_field(md.column_name)
if md.column_name in ['authors', 'publisher', 'series'] or \
(fm_src['is_custom'] and (
(fm_src['datatype'] in ['series', 'text', 'enumeration'] and
not fm_src['is_multiple']))or
(fm_src['datatype'] == 'composite' and
fm_src['display'].get('make_category', False))):
mime = 'application/calibre+from_library'
ids = list(map(int, str(md.data(mime)).split()))
self.handle_user_category_drop(node, ids, md.column_name)
return True
return False
def handle_user_category_drop(self, on_node, ids, column):
categories = self.db.prefs.get('user_categories', {})
cat_contents = categories.get(on_node.category_key[1:], None)
if cat_contents is None:
return
cat_contents = set([(v, c) for v,c,ign in cat_contents])
fm_src = self.db.metadata_for_field(column)
label = fm_src['label']
for id in ids:
if not fm_src['is_custom']:
if label == 'authors':
value = self.db.authors(id, index_is_id=True)
value = [v.replace('|', ',') for v in value.split(',')]
elif label == 'publisher':
value = self.db.publisher(id, index_is_id=True)
elif label == 'series':
value = self.db.series(id, index_is_id=True)
else:
if fm_src['datatype'] != 'composite':
value = self.db.get_custom(id, label=label, index_is_id=True)
else:
value = self.db.get_property(id, loc=fm_src['rec_index'],
index_is_id=True)
if value:
if not isinstance(value, list):
value = [value]
cat_contents |= set([(v, column) for v in value])
categories[on_node.category_key[1:]] = [[v, c, 0] for v,c in cat_contents]
self.db.new_api.set_pref('user_categories', categories)
self.refresh_required.emit()
def handle_drop(self, on_node, ids):
# print 'Dropped ids:', ids, on_node.tag
key = on_node.tag.category
if (key == 'authors' and len(ids) >= 5):
if not confirm('<p>'+_('Changing the authors for several books can '
'take a while. Are you sure?') +
'</p>', 'tag_browser_drop_authors', self.gui_parent):
return
elif len(ids) > 15:
if not confirm('<p>'+_('Changing the metadata for that many books '
'can take a while. Are you sure?') +
'</p>', 'tag_browser_many_changes', self.gui_parent):
return
fm = self.db.metadata_for_field(key)
is_multiple = fm['is_multiple']
val = on_node.tag.original_name
for id in ids:
mi = self.db.get_metadata(id, index_is_id=True)
# Prepare to ignore the author, unless it is changed. Title is
# always ignored -- see the call to set_metadata
set_authors = False
# Author_sort cannot change explicitly. Changing the author might
# change it.
mi.author_sort = None # Never will change by itself.
if key == 'authors':
mi.authors = [val]
set_authors=True
elif fm['datatype'] == 'rating':
mi.set(key, len(val) * 2)
elif fm['is_custom'] and fm['datatype'] == 'series':
mi.set(key, val, extra=1.0)
elif is_multiple:
new_val = mi.get(key, [])
if val in new_val:
# Fortunately, only one field can change, so the continue
# won't break anything
continue
new_val.append(val)
mi.set(key, new_val)
else:
mi.set(key, val)
self.db.set_metadata(id, mi, set_title=False,
set_authors=set_authors, commit=False)
self.db.commit()
self.drag_drop_finished.emit(ids)
# }}}
def get_in_vl(self):
return self.db.data.get_base_restriction() or self.db.data.get_search_restriction()
def get_book_ids_to_use(self):
if self.db.data.get_base_restriction() or self.db.data.get_search_restriction():
return self.db.search('', return_matches=True, sort_results=False)
return None
def _get_category_nodes(self, sort):
'''
Called by __init__. Do not directly call this method.
'''
self.row_map = []
self.categories = {}
# Get the categories
try:
data = self.db.new_api.get_categories(sort=sort,
icon_map=self.category_icon_map,
book_ids=self.get_book_ids_to_use(),
first_letter_sort=self.collapse_model == 'first letter')
except:
import traceback
traceback.print_exc()
data = self.db.new_api.get_categories(sort=sort, icon_map=self.category_icon_map,
first_letter_sort=self.collapse_model == 'first letter')
self.restriction_error.emit()
# Reconstruct the user categories, putting them into metadata
self.db.field_metadata.remove_dynamic_categories()
tb_cats = self.db.field_metadata
for user_cat in sorted(self.db.prefs.get('user_categories', {}).keys(),
key=sort_key):
cat_name = '@' + user_cat # add the '@' to avoid name collision
while True:
try:
tb_cats.add_user_category(label=cat_name, name=user_cat)
dot = cat_name.rfind('.')
if dot < 0:
break
cat_name = cat_name[:dot]
except ValueError:
break
for cat in sorted(self.db.prefs.get('grouped_search_terms', {}).keys(),
key=sort_key):
if (u'@' + cat) in data:
try:
tb_cats.add_user_category(label=u'@' + cat, name=cat)
except ValueError:
traceback.print_exc()
self.db.new_api.refresh_search_locations()
if len(self.db.saved_search_names()):
tb_cats.add_search_category(label='search', name=_('Searches'))
if self.filter_categories_by:
for category in data.keys():
data[category] = [t for t in data[category]
if lower(t.name).find(self.filter_categories_by) >= 0]
tb_categories = self.db.field_metadata
order = tweaks['tag_browser_category_order']
defvalue = order.get('*', 100)
tb_keys = sorted(tb_categories.keys(), key=lambda x: order.get(x, defvalue))
for category in tb_keys:
if category in data: # The search category can come and go
self.row_map.append(category)
self.categories[category] = tb_categories[category]['name']
return data
def set_categories_filter(self, txt):
if txt:
self.filter_categories_by = icu_lower(txt)
else:
self.filter_categories_by = None
def get_categories_filter(self):
return self.filter_categories_by
def refresh(self, data=None):
'''
Here to trap usages of refresh in the old architecture. Can eventually
be removed.
'''
print ('TagsModel: refresh called!')
traceback.print_stack()
return False
def create_node(self, *args, **kwargs):
node = TagTreeItem(*args, **kwargs)
self.node_map[id(node)] = node
return node
def get_node(self, idx):
ans = self.node_map.get(idx.internalId(), self.root_item)
return ans
def createIndex(self, row, column, internal_pointer=None):
idx = QAbstractItemModel.createIndex(self, row, column,
id(internal_pointer))
return idx
def index_for_category(self, name):
for row, category in enumerate(self.category_nodes):
if category.category_key == name:
return self.index(row, 0, QModelIndex())
def columnCount(self, parent):
return 1
def data(self, index, role):
if not index.isValid():
return None
item = self.get_node(index)
return item.data(role)
def setData(self, index, value, role=Qt.EditRole):
if not index.isValid():
return False
# set up to reposition at the same item. We can do this except if
# working with the last item and that item is deleted, in which case
# we position at the parent label
val = unicode(value or '').strip()
if not val:
error_dialog(self.gui_parent, _('Item is blank'),
_('An item cannot be set to nothing. Delete it instead.')).exec_()
return False
item = self.get_node(index)
if item.type == TagTreeItem.CATEGORY and item.category_key.startswith('@'):
if val.find('.') >= 0:
error_dialog(self.gui_parent, _('Rename user category'),
_('You cannot use periods in the name when '
'renaming user categories'), show=True)
return False
user_cats = self.db.prefs.get('user_categories', {})
user_cat_keys_lower = [icu_lower(k) for k in user_cats]
ckey = item.category_key[1:]
ckey_lower = icu_lower(ckey)
dotpos = ckey.rfind('.')
if dotpos < 0:
nkey = val
else:
nkey = ckey[:dotpos+1] + val
nkey_lower = icu_lower(nkey)
if ckey == nkey:
return True
for c in sorted(user_cats.keys(), key=sort_key):
if icu_lower(c).startswith(ckey_lower):
if len(c) == len(ckey):
if strcmp(ckey, nkey) != 0 and \
nkey_lower in user_cat_keys_lower:
error_dialog(self.gui_parent, _('Rename user category'),
_('The name %s is already used')%nkey, show=True)
return False
user_cats[nkey] = user_cats[ckey]
del user_cats[ckey]
elif c[len(ckey)] == '.':
rest = c[len(ckey):]
if strcmp(ckey, nkey) != 0 and \
icu_lower(nkey + rest) in user_cat_keys_lower:
error_dialog(self.gui_parent, _('Rename user category'),
_('The name %s is already used')%(nkey+rest), show=True)
return False
user_cats[nkey + rest] = user_cats[ckey + rest]
del user_cats[ckey + rest]
self.user_categories_edited.emit(user_cats, nkey) # Does a refresh
return True
key = item.tag.category
name = item.tag.original_name
# make certain we know about the item's category
if key not in self.db.field_metadata:
return False
if key == 'authors':
if val.find('&') >= 0:
error_dialog(self.gui_parent, _('Invalid author name'),
_('Author names cannot contain & characters.')).exec_()
return False
if key == 'search':
if val in self.db.saved_search_names():
error_dialog(self.gui_parent, _('Duplicate search name'),
_('The saved search name %s is already used.')%val).exec_()
return False
self.db.saved_search_rename(unicode(item.data(role) or ''), val)
item.tag.name = val
self.search_item_renamed.emit() # Does a refresh
else:
restrict_to_book_ids=self.get_book_ids_to_use() if item.use_vl else None
self.db.new_api.rename_items(key, {item.tag.id: val},
restrict_to_book_ids=restrict_to_book_ids)
self.tag_item_renamed.emit()
item.tag.name = val
item.tag.state = TAG_SEARCH_STATES['clear']
if not restrict_to_book_ids:
self.rename_item_in_all_user_categories(name, key, val)
self.refresh_required.emit()
return True
def rename_item_in_all_user_categories(self, item_name, item_category, new_name):
'''
Search all user categories for items named item_name with category
item_category and rename them to new_name. The caller must arrange to
redisplay the tree as appropriate.
'''
user_cats = self.db.prefs.get('user_categories', {})
for k in user_cats.keys():
new_contents = []
for tup in user_cats[k]:
if tup[0] == item_name and tup[1] == item_category:
new_contents.append([new_name, item_category, 0])
else:
new_contents.append(tup)
user_cats[k] = new_contents
self.db.new_api.set_pref('user_categories', user_cats)
def delete_item_from_all_user_categories(self, item_name, item_category):
'''
Search all user categories for items named item_name with category
item_category and delete them. The caller must arrange to redisplay the
tree as appropriate.
'''
user_cats = self.db.prefs.get('user_categories', {})
for cat in user_cats.keys():
self.delete_item_from_user_category(cat, item_name, item_category,
user_categories=user_cats)
self.db.new_api.set_pref('user_categories', user_cats)
def delete_item_from_user_category(self, category, item_name, item_category,
user_categories=None):
if user_categories is not None:
user_cats = user_categories
else:
user_cats = self.db.prefs.get('user_categories', {})
new_contents = []
for tup in user_cats[category]:
if tup[0] != item_name or tup[1] != item_category:
new_contents.append(tup)
user_cats[category] = new_contents
if user_categories is None:
self.db.new_api.set_pref('user_categories', user_cats)
def headerData(self, *args):
return None
def flags(self, index, *args):
ans = Qt.ItemIsEnabled|Qt.ItemIsEditable
if index.isValid():
node = self.data(index, Qt.UserRole)
if node.type == TagTreeItem.TAG:
if node.tag.is_editable:
ans |= Qt.ItemIsDragEnabled
fm = self.db.metadata_for_field(node.tag.category)
if node.tag.category in \
('tags', 'series', 'authors', 'rating', 'publisher', 'languages') or \
(fm['is_custom'] and
fm['datatype'] in ['text', 'rating', 'series', 'enumeration']):
ans |= Qt.ItemIsDropEnabled
else:
ans |= Qt.ItemIsDropEnabled
return ans
def supportedDropActions(self):
return Qt.CopyAction|Qt.MoveAction
def path_for_index(self, index):
ans = []
while index.isValid():
ans.append(index.row())
index = self.parent(index)
ans.reverse()
return ans
def index_for_path(self, path):
parent = QModelIndex()
for idx,v in enumerate(path):
tparent = self.index(v, 0, parent)
if not tparent.isValid():
if v > 0 and idx == len(path) - 1:
# Probably the last item went away. Use the one before it
tparent = self.index(v-1, 0, parent)
if not tparent.isValid():
# Not valid. Use the last valid index
break
else:
# There isn't one before it. Use the last valid index
break
parent = tparent
return parent
def index(self, row, column, parent):
if not self.hasIndex(row, column, parent):
return QModelIndex()
if not parent.isValid():
parent_item = self.root_item
else:
parent_item = self.get_node(parent)
try:
child_item = parent_item.children[row]
except IndexError:
return QModelIndex()
ans = self.createIndex(row, column, child_item)
return ans
def parent(self, index):
if not index.isValid():
return QModelIndex()
child_item = self.get_node(index)
parent_item = getattr(child_item, 'parent', None)
if parent_item is self.root_item or parent_item is None:
return QModelIndex()
ans = self.createIndex(parent_item.row(), 0, parent_item)
if not ans.isValid():
return QModelIndex()
return ans
def rowCount(self, parent):
if parent.column() > 0:
return 0
if not parent.isValid():
parent_item = self.root_item
else:
parent_item = self.get_node(parent)
return len(parent_item.children)
def reset_all_states(self, except_=None):
update_list = []
def process_tag(tag_item):
tag = tag_item.tag
if tag is except_:
tag_index = self.createIndex(tag_item.row(), 0, tag_item)
self.dataChanged.emit(tag_index, tag_index)
elif tag.state != 0 or tag in update_list:
tag_index = self.createIndex(tag_item.row(), 0, tag_item)
tag.state = 0
update_list.append(tag)
self.dataChanged.emit(tag_index, tag_index)
for t in tag_item.children:
process_tag(t)
for t in self.root_item.children:
process_tag(t)
def clear_state(self):
self.reset_all_states()
def toggle(self, index, exclusive, set_to=None):
'''
exclusive: clear all states before applying this one
set_to: None => advance the state, otherwise a value from TAG_SEARCH_STATES
'''
if not index.isValid():
return False
item = self.get_node(index)
item.toggle(set_to=set_to)
if exclusive:
self.reset_all_states(except_=item.tag)
self.dataChanged.emit(index, index)
return True
def tokens(self):
ans = []
# Tags can be in the news and the tags categories. However, because of
# the desire to use two different icons (tags and news), the nodes are
# not shared, which can lead to the possibility of searching twice for
# the same tag. The tags_seen set helps us prevent that
tags_seen = set()
# Tag nodes are in their own category and possibly in user categories.
# They will be 'checked' in both places, but we want to put the node
# into the search string only once. The nodes_seen set helps us do that
nodes_seen = set()
node_searches = {TAG_SEARCH_STATES['mark_plus'] : 'true',
TAG_SEARCH_STATES['mark_plusplus'] : '.true',
TAG_SEARCH_STATES['mark_minus'] : 'false',
TAG_SEARCH_STATES['mark_minusminus'] : '.false'}
for node in self.category_nodes:
if node.tag.state:
if node.category_key == "news":
if node_searches[node.tag.state] == 'true':
ans.append('tags:"=' + _('News') + '"')
else:
ans.append('( not tags:"=' + _('News') + '")')
else:
ans.append('%s:%s'%(node.category_key, node_searches[node.tag.state]))
key = node.category_key
for tag_item in node.all_children():
if tag_item.type == TagTreeItem.CATEGORY:
if self.collapse_model == 'first letter' and \
tag_item.temporary and not key.startswith('@') \
and tag_item.tag.state:
k = 'author_sort' if key == 'authors' else key
letters_seen = {}
for subnode in tag_item.children:
if subnode.tag.sort:
letters_seen[subnode.tag.sort[0]] = True
if letters_seen:
charclass = ''.join(letters_seen)
if k == 'author_sort':
expr = r'%s:"~(^[%s])|(&\s*[%s])"'%(k, charclass, charclass)
elif k == 'series':
expr = r'series_sort:"~^[%s]"'%(charclass)
else:
expr = r'%s:"~^[%s]"'%(k, charclass)
else:
expr = r'%s:false'%(k)
if node_searches[tag_item.tag.state] == 'true':
ans.append(expr)
else:
ans.append('(not ' + expr + ')')
continue
tag = tag_item.tag
if tag.state != TAG_SEARCH_STATES['clear']:
if tag.state == TAG_SEARCH_STATES['mark_minus'] or \
tag.state == TAG_SEARCH_STATES['mark_minusminus']:
prefix = ' not '
else:
prefix = ''
if node.is_gst:
category = key
else:
category = tag.category if key != 'news' else 'tag'
add_colon = False
if self.db.field_metadata[tag.category]['is_csp']:
add_colon = True
if tag.name and tag.name[0] == u'\u2605': # char is a star. Assume rating
ans.append('%s%s:%s'%(prefix, category, len(tag.name)))
else:
name = tag.original_name
use_prefix = tag.state in [TAG_SEARCH_STATES['mark_plusplus'],
TAG_SEARCH_STATES['mark_minusminus']]
if category == 'tags':
if name in tags_seen:
continue
tags_seen.add(name)
if tag in nodes_seen:
continue
nodes_seen.add(tag)
n = name.replace(r'"', r'\"')
if name.startswith('.'):
n = '.' + n
ans.append('%s%s:"=%s%s%s"'%(prefix, category,
'.' if use_prefix else '', n,
':' if add_colon else ''))
return ans
def find_item_node(self, key, txt, start_path, equals_match=False):
'''
Search for an item (a node) in the tags browser list that matches both
the key (exact case-insensitive match) and txt (not equals_match =>
case-insensitive contains match; equals_match => case_insensitive
equal match). Returns the path to the node. Note that paths are to a
location (second item, fourth item, 25 item), not to a node. If
start_path is None, the search starts with the topmost node. If the tree
is changed subsequent to calling this method, the path can easily refer
to a different node or no node at all.
'''
if not txt:
return None
txt = lower(txt) if not equals_match else txt
self.path_found = None
if start_path is None:
start_path = []
def process_tag(depth, tag_index, tag_item, start_path):
path = self.path_for_index(tag_index)
if depth < len(start_path) and path[depth] <= start_path[depth]:
return False
tag = tag_item.tag
if tag is None:
return False
name = tag.original_name
if (equals_match and strcmp(name, txt) == 0) or \
(not equals_match and lower(name).find(txt) >= 0):
self.path_found = path
return True
for i,c in enumerate(tag_item.children):
if process_tag(depth+1, self.createIndex(i, 0, c), c, start_path):
return True
return False
def process_level(depth, category_index, start_path):
path = self.path_for_index(category_index)
if depth < len(start_path):
if path[depth] < start_path[depth]:
return False
if path[depth] > start_path[depth]:
start_path = path
my_key = self.get_node(category_index).category_key
for j in xrange(self.rowCount(category_index)):
tag_index = self.index(j, 0, category_index)
tag_item = self.get_node(tag_index)
if tag_item.type == TagTreeItem.CATEGORY:
if process_level(depth+1, tag_index, start_path):
return True
elif not key or strcmp(key, my_key) == 0:
if process_tag(depth+1, tag_index, tag_item, start_path):
return True
return False
for i in xrange(self.rowCount(QModelIndex())):
if process_level(0, self.index(i, 0, QModelIndex()), start_path):
break
return self.path_found
def find_category_node(self, key, parent=QModelIndex()):
'''
Search for an category node (a top-level node) in the tags browser list
that matches the key (exact case-insensitive match). Returns the path to
the node. Paths are as in find_item_node.
'''
if not key:
return None
for i in xrange(self.rowCount(parent)):
idx = self.index(i, 0, parent)
node = self.get_node(idx)
if node.type == TagTreeItem.CATEGORY:
ckey = node.category_key
if strcmp(ckey, key) == 0:
return self.path_for_index(idx)
if len(node.children):
v = self.find_category_node(key, idx)
if v is not None:
return v
return None
def set_boxed(self, idx):
tag_item = self.get_node(idx)
tag_item.boxed = True
self.dataChanged.emit(idx, idx)
def clear_boxed(self):
'''
Clear all boxes around items.
'''
def process_tag(tag_index, tag_item):
if tag_item.boxed:
tag_item.boxed = False
self.dataChanged.emit(tag_index, tag_index)
for i,c in enumerate(tag_item.children):
process_tag(self.index(i, 0, tag_index), c)
def process_level(category_index):
for j in xrange(self.rowCount(category_index)):
tag_index = self.index(j, 0, category_index)
tag_item = self.get_node(tag_index)
if tag_item.boxed:
tag_item.boxed = False
self.dataChanged.emit(tag_index, tag_index)
if tag_item.type == TagTreeItem.CATEGORY:
process_level(tag_index)
else:
process_tag(tag_index, tag_item)
for i in xrange(self.rowCount(QModelIndex())):
process_level(self.index(i, 0, QModelIndex()))
# }}}
| gpl-3.0 |
qgis/QGIS | python/plugins/db_manager/db_plugins/oracle/plugin.py | 29 | 22869 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS (Oracle)
Date : Aug 27, 2014
copyright : (C) 2014 by Médéric RIBREUX
email : [email protected]
The content of this file is based on
- PG_Manager by Martin Dobias <[email protected]> (GPLv2 license)
- DB Manager by Giuseppe Sucameli <[email protected]> (GPLv2 license)
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from builtins import str
from builtins import range
# this will disable the dbplugin if the connector raise an ImportError
from .connector import OracleDBConnector
from qgis.PyQt.QtCore import Qt, QCoreApplication
from qgis.PyQt.QtGui import QIcon, QKeySequence
from qgis.PyQt.QtWidgets import QAction, QApplication, QMessageBox
from qgis.core import QgsApplication, QgsVectorLayer, NULL, QgsSettings
from ..plugin import ConnectionError, InvalidDataException, DBPlugin, \
Database, Schema, Table, VectorTable, TableField, TableConstraint, \
TableIndex, TableTrigger
from qgis.core import QgsCredentials
def classFactory():
return OracleDBPlugin
class OracleDBPlugin(DBPlugin):
@classmethod
def icon(self):
return QgsApplication.getThemeIcon("/mIconOracle.svg")
@classmethod
def typeName(self):
return 'oracle'
@classmethod
def typeNameString(self):
return QCoreApplication.translate('db_manager', 'Oracle Spatial')
@classmethod
def providerName(self):
return 'oracle'
@classmethod
def connectionSettingsKey(self):
return '/Oracle/connections'
def connectToUri(self, uri):
self.db = self.databasesFactory(self, uri)
if self.db:
return True
return False
def databasesFactory(self, connection, uri):
return ORDatabase(connection, uri)
def connect(self, parent=None):
conn_name = self.connectionName()
settings = QgsSettings()
settings.beginGroup(u"/{0}/{1}".format(
self.connectionSettingsKey(), conn_name))
if not settings.contains("database"): # non-existent entry?
raise InvalidDataException(
self.tr('There is no defined database connection "{0}".'.format(
conn_name)))
from qgis.core import QgsDataSourceUri
uri = QgsDataSourceUri()
settingsList = ["host", "port", "database", "username", "password"]
host, port, database, username, password = [
settings.value(x, "", type=str) for x in settingsList]
# get all of the connection options
useEstimatedMetadata = settings.value(
"estimatedMetadata", False, type=bool)
uri.setParam('userTablesOnly', str(
settings.value("userTablesOnly", False, type=bool)))
uri.setParam('geometryColumnsOnly', str(
settings.value("geometryColumnsOnly", False, type=bool)))
uri.setParam('allowGeometrylessTables', str(
settings.value("allowGeometrylessTables", False, type=bool)))
uri.setParam('onlyExistingTypes', str(
settings.value("onlyExistingTypes", False, type=bool)))
uri.setParam('includeGeoAttributes', str(
settings.value("includeGeoAttributes", False, type=bool)))
settings.endGroup()
uri.setConnection(host, port, database, username, password)
uri.setUseEstimatedMetadata(useEstimatedMetadata)
err = u""
try:
return self.connectToUri(uri)
except ConnectionError as e:
err = str(e)
# ask for valid credentials
max_attempts = 3
for i in range(max_attempts):
(ok, username, password) = QgsCredentials.instance().get(
uri.connectionInfo(False), username, password, err)
if not ok:
return False
uri.setConnection(host, port, database, username, password)
try:
self.connectToUri(uri)
except ConnectionError as e:
if i == max_attempts - 1: # failed the last attempt
raise e
err = str(e)
continue
QgsCredentials.instance().put(
uri.connectionInfo(False), username, password)
return True
return False
class ORDatabase(Database):
def __init__(self, connection, uri):
self.connName = connection.connectionName()
Database.__init__(self, connection, uri)
def connectorsFactory(self, uri):
return OracleDBConnector(uri, self.connName)
def dataTablesFactory(self, row, db, schema=None):
return ORTable(row, db, schema)
def vectorTablesFactory(self, row, db, schema=None):
return ORVectorTable(row, db, schema)
def info(self):
from .info_model import ORDatabaseInfo
return ORDatabaseInfo(self)
def schemasFactory(self, row, db):
return ORSchema(row, db)
def columnUniqueValuesModel(self, col, table, limit=10):
l = u""
if limit:
l = u"WHERE ROWNUM < {:d}".format(limit)
con = self.database().connector
# Prevent geometry column show
tableName = table.replace(u'"', u"").split(u".")
if len(tableName) == 0:
tableName = [None, tableName[0]]
colName = col.replace(u'"', u"").split(u".")[-1]
if con.isGeometryColumn(tableName, colName):
return None
query = u"SELECT DISTINCT {} FROM {} {}".format(col, table, l)
return self.sqlResultModel(query, self)
def sqlResultModel(self, sql, parent):
from .data_model import ORSqlResultModel
return ORSqlResultModel(self, sql, parent)
def sqlResultModelAsync(self, sql, parent):
from .data_model import ORSqlResultModelAsync
return ORSqlResultModelAsync(self, sql, parent)
def toSqlLayer(self, sql, geomCol, uniqueCol,
layerName=u"QueryLayer", layerType=None,
avoidSelectById=False, filter=""):
uri = self.uri()
con = self.database().connector
if uniqueCol is not None:
uniqueCol = uniqueCol.strip('"').replace('""', '"')
uri.setDataSource(u"", u"({}\n)".format(
sql), geomCol, filter, uniqueCol)
if avoidSelectById:
uri.disableSelectAtId(True)
provider = self.dbplugin().providerName()
vlayer = QgsVectorLayer(uri.uri(False), layerName, provider)
# handling undetermined geometry type
if not vlayer.isValid():
wkbType, srid = con.getTableMainGeomType(
u"({}\n)".format(sql), geomCol)
uri.setWkbType(wkbType)
if srid:
uri.setSrid(str(srid))
vlayer = QgsVectorLayer(uri.uri(False), layerName, provider)
return vlayer
def registerDatabaseActions(self, mainWindow):
action = QAction(QApplication.translate(
"DBManagerPlugin", "&Re-connect"), self)
mainWindow.registerAction(action, QApplication.translate(
"DBManagerPlugin", "&Database"), self.reconnectActionSlot)
if self.schemas():
action = QAction(QApplication.translate(
"DBManagerPlugin", "&Create Schema…"), self)
mainWindow.registerAction(action, QApplication.translate(
"DBManagerPlugin", "&Schema"), self.createSchemaActionSlot)
action = QAction(QApplication.translate(
"DBManagerPlugin", "&Delete (Empty) Schema…"), self)
mainWindow.registerAction(action, QApplication.translate(
"DBManagerPlugin", "&Schema"), self.deleteSchemaActionSlot)
action = QAction(QApplication.translate(
"DBManagerPlugin", "Delete Selected Item"), self)
mainWindow.registerAction(action, None, self.deleteActionSlot)
action.setShortcuts(QKeySequence.Delete)
action = QAction(QgsApplication.getThemeIcon("/mActionCreateTable.svg"),
QApplication.translate(
"DBManagerPlugin", "&Create Table…"), self)
mainWindow.registerAction(action, QApplication.translate(
"DBManagerPlugin", "&Table"), self.createTableActionSlot)
action = QAction(QgsApplication.getThemeIcon("/mActionEditTable.svg"),
QApplication.translate(
"DBManagerPlugin", "&Edit Table…"), self)
mainWindow.registerAction(action, QApplication.translate(
"DBManagerPlugin", "&Table"), self.editTableActionSlot)
action = QAction(QgsApplication.getThemeIcon("/mActionDeleteTable.svg"),
QApplication.translate(
"DBManagerPlugin", "&Delete Table/View…"), self)
mainWindow.registerAction(action, QApplication.translate(
"DBManagerPlugin", "&Table"), self.deleteTableActionSlot)
action = QAction(QApplication.translate(
"DBManagerPlugin", "&Empty Table…"), self)
mainWindow.registerAction(action, QApplication.translate(
"DBManagerPlugin", "&Table"), self.emptyTableActionSlot)
def supportsComment(self):
return False
class ORSchema(Schema):
def __init__(self, row, db):
Schema.__init__(self, db)
# self.oid, self.name, self.owner, self.perms, self.comment = row
self.name = row[0]
class ORTable(Table):
def __init__(self, row, db, schema=None):
Table.__init__(self, db, schema)
self.name, self.owner, isView = row
self.estimatedRowCount = None
self.objectType = None
self.isView = False
self.isMaterializedView = False
if isView == 1:
self.isView = True
self.creationDate = None
self.modificationDate = None
def getDates(self):
"""Grab the creation/modification dates of the table"""
self.creationDate, self.modificationDate = (
self.database().connector.getTableDates((self.schemaName(),
self.name)))
def refreshRowEstimation(self):
"""Use ALL_ALL_TABLE to get an estimation of rows"""
if self.isView:
self.estimatedRowCount = 0
self.estimatedRowCount = (
self.database().connector.getTableRowEstimation(
(self.schemaName(), self.name)))
def getType(self):
"""Grab the type of object for the table"""
self.objectType = self.database().connector.getTableType(
(self.schemaName(), self.name))
def getComment(self):
"""Grab the general comment of the table/view"""
self.comment = self.database().connector.getTableComment(
(self.schemaName(), self.name), self.objectType)
def getDefinition(self):
return self.database().connector.getDefinition(
(self.schemaName(), self.name), self.objectType)
def getMViewInfo(self):
if self.objectType == u"MATERIALIZED VIEW":
return self.database().connector.getMViewInfo(
(self.schemaName(), self.name))
else:
return None
def runAction(self, action):
action = str(action)
if action.startswith("rows/"):
if action == "rows/recount":
self.refreshRowCount()
return True
elif action.startswith("index/"):
parts = action.split('/')
index_name = parts[1]
index_action = parts[2]
msg = QApplication.translate(
"DBManagerPlugin",
"Do you want to {} index {}?".format(
index_action, index_name))
QApplication.restoreOverrideCursor()
try:
if QMessageBox.question(
None,
QApplication.translate(
"DBManagerPlugin", "Table Index"),
msg,
QMessageBox.Yes | QMessageBox.No) == QMessageBox.No:
return False
finally:
QApplication.setOverrideCursor(Qt.WaitCursor)
if index_action == "rebuild":
self.aboutToChange.emit()
self.database().connector.rebuildTableIndex(
(self.schemaName(), self.name), index_name)
self.refreshIndexes()
return True
elif action.startswith(u"mview/"):
if action == "mview/refresh":
self.aboutToChange.emit()
self.database().connector.refreshMView(
(self.schemaName(), self.name))
return True
return Table.runAction(self, action)
def tableFieldsFactory(self, row, table):
return ORTableField(row, table)
def tableConstraintsFactory(self, row, table):
return ORTableConstraint(row, table)
def tableIndexesFactory(self, row, table):
return ORTableIndex(row, table)
def tableTriggersFactory(self, row, table):
return ORTableTrigger(row, table)
def info(self):
from .info_model import ORTableInfo
return ORTableInfo(self)
def tableDataModel(self, parent):
from .data_model import ORTableDataModel
return ORTableDataModel(self, parent)
def getValidQgisUniqueFields(self, onlyOne=False):
""" list of fields valid to load the table as layer in QGIS canvas.
QGIS automatically search for a valid unique field, so it's
needed only for queries and views.
"""
ret = []
# add the pk
pkcols = [x for x in self.fields() if x.primaryKey]
if len(pkcols) == 1:
ret.append(pkcols[0])
# then add integer fields with an unique index
indexes = self.indexes()
if indexes is not None:
for idx in indexes:
if idx.isUnique and len(idx.columns) == 1:
fld = idx.fields()[idx.columns[0]]
if (fld.dataType == u"NUMBER" and not fld.modifier and fld.notNull and fld not in ret):
ret.append(fld)
# and finally append the other suitable fields
for fld in self.fields():
if (fld.dataType == u"NUMBER" and not fld.modifier and fld.notNull and fld not in ret):
ret.append(fld)
if onlyOne:
return ret[0] if len(ret) > 0 else None
return ret
def uri(self):
uri = self.database().uri()
schema = self.schemaName() if self.schemaName() else ''
geomCol = self.geomColumn if self.type in [
Table.VectorType, Table.RasterType] else ""
uniqueCol = self.getValidQgisUniqueFields(
True) if self.isView else None
uri.setDataSource(schema, self.name, geomCol if geomCol else None,
None, uniqueCol.name if uniqueCol else "")
# Handle geographic table
if geomCol:
uri.setWkbType(self.wkbType)
uri.setSrid(str(self.srid))
return uri
class ORVectorTable(ORTable, VectorTable):
def __init__(self, row, db, schema=None):
ORTable.__init__(self, row[0:3], db, schema)
VectorTable.__init__(self, db, schema)
self.geomColumn, self.geomType, self.wkbType, self.geomDim, \
self.srid = row[-7:-2]
def info(self):
from .info_model import ORVectorTableInfo
return ORVectorTableInfo(self)
def runAction(self, action):
if action.startswith("extent/"):
if action == "extent/update":
self.aboutToChange.emit()
self.updateExtent()
return True
if ORTable.runAction(self, action):
return True
return VectorTable.runAction(self, action)
def canUpdateMetadata(self):
return self.database().connector.canUpdateMetadata((self.schemaName(),
self.name))
def updateExtent(self):
self.database().connector.updateMetadata(
(self.schemaName(), self.name),
self.geomColumn, extent=self.extent)
self.refreshTableEstimatedExtent()
self.refresh()
def hasSpatialIndex(self, geom_column=None):
geom_column = geom_column if geom_column else self.geomColumn
for idx in self.indexes():
if geom_column == idx.column:
return True
return False
class ORTableField(TableField):
def __init__(self, row, table):
""" build fields information from query and find primary key """
TableField.__init__(self, table)
self.num, self.name, self.dataType, self.charMaxLen, \
self.modifier, self.notNull, self.hasDefault, \
self.default, typeStr, self.comment = row
self.primaryKey = False
self.num = int(self.num)
if self.charMaxLen == NULL:
self.charMaxLen = None
else:
self.charMaxLen = int(self.charMaxLen)
if self.modifier == NULL:
self.modifier = None
else:
self.modifier = int(self.modifier)
if self.notNull.upper() == u"Y":
self.notNull = False
else:
self.notNull = True
if self.comment == NULL:
self.comment = u""
# find out whether fields are part of primary key
for con in self.table().constraints():
if con.type == ORTableConstraint.TypePrimaryKey and self.name == con.column:
self.primaryKey = True
break
def type2String(self):
if (u"TIMESTAMP" in self.dataType or self.dataType in [u"DATE", u"SDO_GEOMETRY", u"BINARY_FLOAT", u"BINARY_DOUBLE"]):
return u"{}".format(self.dataType)
if self.charMaxLen in [None, -1]:
return u"{}".format(self.dataType)
elif self.modifier in [None, -1, 0]:
return u"{}({})".format(self.dataType, self.charMaxLen)
return u"{}({},{})".format(self.dataType, self.charMaxLen,
self.modifier)
def update(self, new_name, new_type_str=None, new_not_null=None,
new_default_str=None):
self.table().aboutToChange.emit()
if self.name == new_name:
new_name = None
if self.type2String() == new_type_str:
new_type_str = None
if self.notNull == new_not_null:
new_not_null = None
if self.default2String() == new_default_str:
new_default_str = None
ret = self.table().database().connector.updateTableColumn(
(self.table().schemaName(), self.table().name),
self.name, new_name, new_type_str,
new_not_null, new_default_str)
# When changing a field, refresh also constraints and
# indexes.
if ret is not False:
self.table().refreshFields()
self.table().refreshConstraints()
self.table().refreshIndexes()
return ret
class ORTableConstraint(TableConstraint):
TypeCheck, TypeForeignKey, TypePrimaryKey, \
TypeUnique, TypeUnknown = list(range(5))
types = {"c": TypeCheck, "r": TypeForeignKey,
"p": TypePrimaryKey, "u": TypeUnique}
def __init__(self, row, table):
""" build constraints info from query """
TableConstraint.__init__(self, table)
self.name, constr_type_str, self.column, self.validated, \
self.generated, self.status = row[0:6]
constr_type_str = constr_type_str.lower()
if constr_type_str in ORTableConstraint.types:
self.type = ORTableConstraint.types[constr_type_str]
else:
self.type = ORTableConstraint.TypeUnknown
if row[6] == NULL:
self.checkSource = u""
else:
self.checkSource = row[6]
if row[8] == NULL:
self.foreignTable = u""
else:
self.foreignTable = row[8]
if row[7] == NULL:
self.foreignOnDelete = u""
else:
self.foreignOnDelete = row[7]
if row[9] == NULL:
self.foreignKey = u""
else:
self.foreignKey = row[9]
def type2String(self):
if self.type == ORTableConstraint.TypeCheck:
return QApplication.translate("DBManagerPlugin", "Check")
if self.type == ORTableConstraint.TypePrimaryKey:
return QApplication.translate("DBManagerPlugin", "Primary key")
if self.type == ORTableConstraint.TypeForeignKey:
return QApplication.translate("DBManagerPlugin", "Foreign key")
if self.type == ORTableConstraint.TypeUnique:
return QApplication.translate("DBManagerPlugin", "Unique")
return QApplication.translate("DBManagerPlugin", 'Unknown')
def fields(self):
""" Hack to make edit dialog box work """
fields = self.table().fields()
field = None
for fld in fields:
if fld.name == self.column:
field = fld
cols = {}
cols[0] = field
return cols
class ORTableIndex(TableIndex):
def __init__(self, row, table):
TableIndex.__init__(self, table)
self.name, self.column, self.indexType, self.status, \
self.analyzed, self.compression, self.isUnique = row
def fields(self):
""" Hack to make edit dialog box work """
self.table().refreshFields()
fields = self.table().fields()
field = None
for fld in fields:
if fld.name == self.column:
field = fld
cols = {}
cols[0] = field
return cols
class ORTableTrigger(TableTrigger):
def __init__(self, row, table):
TableTrigger.__init__(self, table)
self.name, self.event, self.type, self.enabled = row
| gpl-2.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.