repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
myrtleTree33/lego-mosaic | quantization.py | 1 | 3006 | import numpy as np
import cv2
import xlwt
import csv
GRID_SPACE = 7
LENGTH = 96
# def base16(n):
# a = np.base_repr(n, 16)
# if len(a) == 1:
# return "0" + a
# else:
# return a
#
# def rgbToHex(r,g,b):
# print "#" + base16(r) + base16(g) + base16(b)
#
# rgbToHex(50,255,0)
### The 48 * 48 project
img = cv2.imread('logo17.jpg')
img = cv2.resize(img, (LENGTH, LENGTH))
Z = img.reshape((-1,3))
# convert to np.float32
Z = np.float32(Z)
# define criteria, number of clusters(K) and apply kmeans()
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
K = 7
ret,label,center=cv2.kmeans(Z,K,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS)
# Now convert back into uint8, and make original image
center = np.uint8(center)
res = center[label.flatten()]
res2 = res.reshape((img.shape))
res3 = res2
# res3 = cv2.resize(res2, (96,96))
palette = []
build_palette('Lego-colors-palette-2010.gpl.csv')
def fit_palette(pic, palette):
def nearestColor(pixel, palette):
MAX_DIFF = pow(pow(255, 2) * 3, .5)
nearest_color, smallest_val = 0, MAX_DIFF
r,g,b = pixel
for p in palette:
r2, g2, b2 = p
val = pow(pow(r - r2, 2) + pow(g - g2, 2) + pow(b - b2, 2), .5)
if (smallest_val > val):
# print smallest_val
smallest_val = val
nearest_color = p
return nearest_color
h,w = len(pic[0]), len(pic)
print h, w
print len(pic[0])
output = np.zeros((w,h,3), np.uint8)
for y, row in enumerate(pic):
for x, color in enumerate(row):
# print len(output[y][x])
# print x
a = nearestColor(pic[y][x], palette)
output[y][x] = a
return output
# res4 = fit_palette(res3, palette)
# cv2.imshow('output', res4)
# cv2.waitKey(0)
def gen_xls(colors, pic, grid_size):
# Create a black image
h,w = len(pic[0]) * grid_size, len(pic) * grid_size
output = np.zeros((w,h,3), np.uint8)
for y, row in enumerate(pic):
for x, color in enumerate(row):
r,g,b = color
r = int(r)
g = int(g)
b = int(b)
cv2.rectangle(output,
(x * grid_size, y * grid_size),
((x + 1) * grid_size,(y + 1) * grid_size)
,(r,g,b),-2)
return output
# gen_xls(center, res3, 10)
res4 = fit_palette(res3, palette)
def draw_grid(pic, grid_size, grid_color):
h,w = len(pic[0]), len(pic)
for y in xrange(0, h, grid_size):
cv2.line(pic, (0, y), (w, y), grid_color)
for x in xrange(0, w, grid_size):
cv2.line(pic, (x, 0), (x, h), grid_color)
res5 = gen_xls(center, res4, GRID_SPACE)
draw_grid(res5, GRID_SPACE, (200,200,200))
draw_grid(res5, GRID_SPACE * 10, (0,0,200))
cv2.imshow('output', res5)
cv2.imwrite('plan.png',res5)
cv2.waitKey(0)
# cv2.imwrite('output.png',res3)
# cv2.imshow('res3',res3)
# cv2.waitKey(0)
cv2.destroyAllWindows()
| gpl-3.0 |
damiencalloway/djtut | mysite/env/lib/python2.7/site-packages/django/template/debug.py | 110 | 3602 | from django.template.base import Lexer, Parser, tag_re, NodeList, VariableNode, TemplateSyntaxError
from django.utils.encoding import force_text
from django.utils.html import escape
from django.utils.safestring import SafeData, EscapeData
from django.utils.formats import localize
from django.utils.timezone import template_localtime
class DebugLexer(Lexer):
def __init__(self, template_string, origin):
super(DebugLexer, self).__init__(template_string, origin)
def tokenize(self):
"Return a list of tokens from a given template_string"
result, upto = [], 0
for match in tag_re.finditer(self.template_string):
start, end = match.span()
if start > upto:
result.append(self.create_token(self.template_string[upto:start], (upto, start), False))
upto = start
result.append(self.create_token(self.template_string[start:end], (start, end), True))
upto = end
last_bit = self.template_string[upto:]
if last_bit:
result.append(self.create_token(last_bit, (upto, upto + len(last_bit)), False))
return result
def create_token(self, token_string, source, in_tag):
token = super(DebugLexer, self).create_token(token_string, in_tag)
token.source = self.origin, source
return token
class DebugParser(Parser):
def __init__(self, lexer):
super(DebugParser, self).__init__(lexer)
self.command_stack = []
def enter_command(self, command, token):
self.command_stack.append( (command, token.source) )
def exit_command(self):
self.command_stack.pop()
def error(self, token, msg):
return self.source_error(token.source, msg)
def source_error(self, source, msg):
e = TemplateSyntaxError(msg)
e.django_template_source = source
return e
def create_nodelist(self):
return DebugNodeList()
def create_variable_node(self, contents):
return DebugVariableNode(contents)
def extend_nodelist(self, nodelist, node, token):
node.source = token.source
super(DebugParser, self).extend_nodelist(nodelist, node, token)
def unclosed_block_tag(self, parse_until):
command, source = self.command_stack.pop()
msg = "Unclosed tag '%s'. Looking for one of: %s " % (command, ', '.join(parse_until))
raise self.source_error(source, msg)
def compile_function_error(self, token, e):
if not hasattr(e, 'django_template_source'):
e.django_template_source = token.source
class DebugNodeList(NodeList):
def render_node(self, node, context):
try:
return node.render(context)
except Exception as e:
if not hasattr(e, 'django_template_source'):
e.django_template_source = node.source
raise
class DebugVariableNode(VariableNode):
def render(self, context):
try:
output = self.filter_expression.resolve(context)
output = template_localtime(output, use_tz=context.use_tz)
output = localize(output, use_l10n=context.use_l10n)
output = force_text(output)
except UnicodeDecodeError:
return ''
except Exception as e:
if not hasattr(e, 'django_template_source'):
e.django_template_source = self.source
raise
if (context.autoescape and not isinstance(output, SafeData)) or isinstance(output, EscapeData):
return escape(output)
else:
return output
| mit |
spoonysonny/SAKS-tutorials | saks-v1.x/digital-stopwatch/entities/tact.py | 8 | 4911 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 NXEZ.COM.
# http://www.nxez.com
#
# Licensed under the GNU General Public License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.gnu.org/licenses/gpl-2.0.html
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Spoony'
__version__ = 'version 0.0.1'
__license__ = 'Copyright (c) 2015 NXEZ.COM'
import RPi.GPIO as GPIO
import time
from threading import Thread
class Tact(object):
'''
Tact class
'''
__pin = 0
__real_true = GPIO.HIGH
__status = False
__observers = []
def __init__(self, pin, real_true = GPIO.HIGH):
'''
Init the tact
:param pin: pin number in array
:param real_true: GPIO.HIGH or GPIO.LOW
:return: void
'''
self.__pin = pin
self.__real_true = real_true
if self.__real_true:
self.__status = GPIO.input(self.__pin)
else:
self.__status = not GPIO.input(self.__pin)
GPIO.add_event_detect(pin, GPIO.BOTH, callback = self.make_event, bouncetime = 1)
try:
t1 = Thread(target = self.watching)
t1.setDaemon(True)
except:
print "Error: Unable to start thread by Tact"
#Stauts.
@property
def is_on(self):
'''
Get current of tact
'''
if self.__real_true:
if self.__status != GPIO.input(self.__pin):
self.__status = GPIO.input(self.__pin)
else:
if self.__status == GPIO.input(self.__pin):
self.__status = not GPIO.input(self.__pin)
return self.__status
#Events
def register(self, observer):
if observer not in self.__observers:
self.__observers.append(observer)
def deregister(self, observer):
if observer in self.__observers:
self.__observers.remove(observer)
def notify_observers(self, status):
for o in self.__observers:
o.on_tact_event(self.__pin, status)
def event(self, action):
self.notify_observers(action)
def make_event(self, channel):
self.notify_observers(self.__real_true if GPIO.input(self.__pin) else not self.__real_true)
if self.__real_true:
if self.__status != GPIO.input(self.__pin):
self.__status = GPIO.input(self.__pin)
#self.notify_observers(self.__real_true if self.__status else not self.__real_true)
else:
if self.__status == GPIO.input(self.__pin):
self.__status = not GPIO.input(self.__pin)
#self.notify_observers(self.__real_true if not self.__status else not self.__real_true)
def watching(self):
if self.__real_true:
while True:
if GPIO.input(self.__pin) != self.__status:
self.__status = GPIO.input(self.__pin)
self.notify_observers(self.__real_true if self.__status else not self.__real_true)
time.sleep(0.05)
else:
while True:
if GPIO.input(self.__pin) == self.__status:
self.__status = not GPIO.input(self.__pin)
self.notify_observers(self.__real_true if not self.__status else not self.__real_true)
time.sleep(0.05)
class TactRow(object):
'''
Class of tacts in row
'''
__tacts = []
__pins = []
__real_true = GPIO.HIGH
def __init__(self, pins, real_true = GPIO.HIGH):
'''
Init the tacts
:param pin: pin numbers in array
:param real_true: GPIO.HIGH or GPIO.LOW
:return: void
'''
self.__pins = pins
self.__real_true = real_true
for p in pins:
self.__tacts.append(Tact(p, real_true))
#Stauts.
def is_on(self, index):
'''
Get status of tact in tactrow by index
:param index: index of the tact
:return: status in boolean
'''
if index >= len(self.__tacts):
return False
return self.__tacts[index].is_on
@property
def row_status(self):
'''
Get status array of the tactrow
:return: status array
'''
r = []
for l in self.__tacts:
r.append(l.is_on)
return r
@property
def items(self):
'''
Get the instances of the tacts in tactrow
:return: instances array
'''
return self.__tacts | gpl-2.0 |
tito/pymt | examples/games/bubblebattle/bubblebattle.py | 2 | 10109 | # PYMT Plugin integration
IS_PYMT_PLUGIN = True
PLUGIN_TITLE = 'Bubble Battle !'
PLUGIN_AUTHOR = 'Mathieu Virbel'
PLUGIN_DESCRIPTION = 'Fight the bubbles !!!!! ]:X'
from pymt import *
from OpenGL.GL import *
from random import random, randint
class Basic(MTWidget):
def __init__(self, **kwargs):
super(Basic, self).__init__(**kwargs)
self.r = 30
class Enemy(Basic):
def __init__(self, **kwargs):
self.level = kwargs.get('level')
super(Enemy, self).__init__(**kwargs)
self.dx = self.level * (random() - 0.5) * 40
self.dy = self.level * 25 + random() * self.level
self.r = self.r - min(self.r - 1, random() * self.level)
def draw(self):
# backgroud
set_color(.75, 0, 0, .5)
drawCircle(pos=self.pos, radius=self.r)
#border
set_color(.90, 2, 2, .5)
drawCircle(pos=self.pos, radius=self.r, linewidth=3)
class Barier(Basic):
def __init__(self, **kwargs):
super(Barier, self).__init__(**kwargs)
self.lifetime = 3
self.initial_lifetime = self.lifetime
self.life = 1
self.start = True
self.label = MTLabel(font_size=20, font_bold=True, anchor_x='center',
anchor_y='center')
def draw(self):
# border
linewidth = self.r - (self.lifetime / float(self.initial_lifetime) * (self.r))
set_color(.4, .4, .75, .9)
drawCircle(pos=self.pos, radius=self.r + 3, linewidth=linewidth + 3)
# background
set_color(0, 0, .75, .7)
drawCircle(pos=self.pos, radius=self.r)
# text
self.label.label = str(int(self.life))
self.label.pos = (self.pos[0]-self.label.width/2,self.pos[1]-self.label.height/2)
self.label.draw()
def animate(self, world):
dt = getFrameDt()
if self.start:
oldlife = self.life
self.life += dt * world.managrow
d = int(self.life) - int(oldlife)
if d > 0:
if world.mana - world.manacost * d < 0:
self.life = oldlife
self.stop()
else:
world.mana -= world.manacost * d
if self.life > 5:
self.stop()
self.update_radius()
else:
self.lifetime -= dt
if self.lifetime < 0:
return
return True
def update_radius(self):
self.r = self.life * 20
def stop(self):
self.start = False
self.life = int(self.life)
self.lifetime = self.life * self.lifetime
self.initial_lifetime = self.lifetime
self.update_radius()
class GameOver(MTWidget):
def __init__(self, **kwargs):
self.world = kwargs.get('world')
super(GameOver, self).__init__(**kwargs)
self.layout = MTBoxLayout(orientation='vertical', uniform_width=True,
uniform_height=True, padding=100,
spacing=20, invert_y=True)
k = {'font_size': 48}
self.text = MTLabel(label='GAME OVER', **k)
self.textlevel = MTLabel(label='Your level is %d' % self.world.level, **k)
self.textscore = MTLabel(label='Your score is %d' % self.world.highscore, **k)
self.restart = MTButton(label='Restart')
self.layout.add_widget(self.text)
self.layout.add_widget(self.textlevel)
self.layout.add_widget(self.textscore)
self.layout.add_widget(self.restart)
self.restart.push_handlers(on_press=self.handle_restart)
self.add_widget(self.layout)
def handle_restart(self, *largs):
self.world.reset()
self.parent.remove_widget(self)
def on_touch_down(self, touch):
super(GameOver, self).on_touch_down(touch)
return True
def on_touch_move(self, touch):
super(GameOver, self).on_touch_move(touch)
return True
def on_touch_up(self, touch):
super(GameOver, self).on_touch_up(touch)
return True
def draw(self):
w = self.get_parent_window()
self.layout.x = (w.width - self.layout.width) / 2.
self.layout.y = (w.height - self.layout.height) / 2.
set_color(0.2, 0.2, 0.2, .5)
drawRectangle(size=w.size)
class World(MTWidget):
def __init__(self, **kwargs):
super(World, self).__init__(**kwargs)
self.reset()
def reset(self):
self.enemies = []
self.bariers = {}
self.score = 0
self.mana = 100
self.nextspawn = 0
self.spawnspeed = 1
self.regenspeed = 5
self.managrow = 3
self.manacost = 5
self.collidescore = 20
self.collidemanafactor = .5
self.levelscore = 100
self.level = 1
self.highscore = 0
self.isgameover = False
self.alphalevel = 1
def animate(self):
w = self.get_parent_window()
dt = getFrameDt()
e_delete = []
b_delete = []
# background
self.alphalevel -= getFrameDt() * 3
# animate enemies
for e in self.enemies:
# enemy collide on barier
for bid in self.bariers:
b = self.bariers[bid]
if Vector(e.center).distance(Vector(b.center)) > e.r + b.r:
continue
# collide happen !
e_delete.append(e)
# remove one life from barier
b.life -= 1
if b.life < 1:
b_delete.append(bid)
b.update_radius()
# update score + mana
self.score += self.collidescore
self.mana += self.manacost * self.collidemanafactor
# advance enemy
e.x += e.dx * dt
e.y -= e.dy * dt
if e.x < e.r:
if e.dx < 0:
e.dx = -e.dx
e.x = e.r + e.dx * dt
elif e.x > w.width - e.r:
if e.dx > 0:
e.dx = -e.dx
e.x = w.width - e.r + e.dx * dt
# enemy fall under screen
if e.y < e.r:
e_delete.append(e)
self.score -= 100 * self.level + self.collidescore
# animate barier
for bid in self.bariers:
b = self.bariers[bid]
if not b.animate(self):
b_delete.append(bid)
# delete objects
for e in e_delete:
if e in self.enemies:
self.enemies.remove(e)
for b in b_delete:
if b in self.bariers:
del self.bariers[b]
def regen(self):
self.mana += getFrameDt() * self.regenspeed
if self.mana > 100:
self.mana = 100
def spawn(self):
self.nextspawn -= (getFrameDt() * .5) * self.spawnspeed
if self.nextspawn < 0:
self.nextspawn = 1.
w = self.get_parent_window()
x = w.width * random()
y = w.height + 20
self.enemies.append(Enemy(pos=(x, y), level=self.level))
def nextlevel(self):
if self.score > self.highscore:
self.highscore = self.score
if self.score < 0:
self.gameover()
if self.score < self.levelscore:
return
self.level += 1
self.levelscore = self.levelscore * 2
self.spawnspeed += 1
self.regenspeed += 1
self.managrow += 1
self.alphalevel = 1
self.collidescore += 2
def gameover(self):
self.stop()
if not self.isgameover:
self.isgameover = True
self.get_parent_window().add_widget(GameOver(world=self))
def stop(self):
self.spawnspeed = 0
self.regenspeed = 0
def on_touch_down(self, touch):
if self.mana - self.manacost <= 0:
return
self.mana -= self.manacost
self.bariers[touch.id] = Barier(pos=(touch.x, touch.y))
def on_touch_move(self, touch):
if not touch.id in self.bariers:
return
self.bariers[touch.id].pos = touch.x, touch.y
def on_touch_up(self, touch):
if not touch.id in self.bariers:
return
self.bariers[touch.id].stop()
def draw(self):
# game
self.spawn()
self.animate()
self.regen()
self.nextlevel()
# background
w = self.get_parent_window()
if self.alphalevel > 0:
set_color(1, .4, .4, self.alphalevel)
drawRectangle(size=w.size)
# enemies + bariers
for e in reversed(self.enemies):
e.draw()
for bid in reversed(self.bariers.keys()):
self.bariers[bid].draw()
# ui score
w2 = w.width / 2.
s = self.score / float(self.levelscore)
set_color(.5, 0, 0, .8)
drawRectangle(pos=(20, 20), size=((w2-40) * s, 30))
set_color(.8, .2, .2, .8)
drawRectangle(pos=(20, 20), size=(w2-40, 30), style=GL_LINE_LOOP)
# ui mana
w = self.get_parent_window()
set_color(.1, .1, .7, .7)
drawRectangle(pos=(w2 + 20, 20), size=((w2-40) * self.mana / 100., 30))
set_color(.4, .4, 1, .9)
drawRectangle(pos=(w2 + 20, 20), size=(w2-40, 30), style=GL_LINE_LOOP)
# score
set_color(.5, .5, .5, .5)
drawRoundedRectangle(pos=(w2/2, w.height - 35), size=(w2, 50))
set_color(.5, .5, .5, .5)
drawRoundedRectangle(pos=(w2/2, w.height - 35), size=(w2, 50), style=GL_LINE_LOOP)
label = 'Level %d ~ Score: %-5d / %5d' % (self.level, self.score, self.levelscore)
drawLabel(label=label, pos=(w2, w.height - 15), color=(255, 255, 255, 200))
def pymt_plugin_activate(w, ctx):
ctx.c = World()
w.add_widget(ctx.c)
def pymt_plugin_deactivate(w, ctx):
w.remove_widget(ctx.c)
if __name__ == '__main__':
w = MTWindow()
ctx = MTContext()
pymt_plugin_activate(w, ctx)
runTouchApp()
pymt_plugin_deactivate(w, ctx)
| lgpl-3.0 |
lintzc/gpdb | src/test/tinc/tincrepo/mpp/gpdb/tests/storage/filerep/mpp18932/test_mpp18932.py | 9 | 2376 | """
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import tinctest
from tinctest.models.scenario import ScenarioTestCase
'''
Automation for MPP-18932
'''
class Mpp18932(ScenarioTestCase):
"""
@description test cases for MPP-18932
@created 2013-03-27 10:10:10
@gucs gp_create_table_random_default_distribution=off
@modified 2013-04-22 17:10:15
@tags long_running schedule_long-running
@product_version gpdb: [4.2.5.0- main]
"""
@classmethod
def setUpClass(cls):
tinctest.logger.info('Running test for MPP-18816')
def Dtest_scenario_setup(self):
"""
Skipping this test to run on CI. This is very specific to the machine we are running. Since the test checks for OOD scenario
"""
test_case_list1 = []
test_case_list1.append("mpp.gpdb.tests.storage.filerep.mpp18932.setup.setup.Setup")
self.test_case_scenario.append(test_case_list1)
test_case_list2 = []
test_case_list2.append("mpp.gpdb.tests.storage.filerep.mpp18932.steps.steps.TestSteps.test_change_tracking")
self.test_case_scenario.append(test_case_list2)
test_case_list3 = []
test_case_list3.append("mpp.gpdb.tests.storage.filerep.mpp18932.steps.steps.TestSteps.test_fillDisk")
self.test_case_scenario.append(test_case_list3)
test_case_list4 = []
test_case_list4.append("mpp.gpdb.tests.storage.filerep.mpp18932.runsql.sql.RunSql")
self.test_case_scenario.append(test_case_list4)
test_case_list5 = []
test_case_list5.append("mpp.gpdb.tests.storage.filerep.mpp18932.steps.steps.TestSteps.test_checkLog_recover")
self.test_case_scenario.append(test_case_list5)
| apache-2.0 |
rsjohnco/rez | src/rez/cli/view.py | 3 | 2091 | """
View the contents of a package.
"""
def setup_parser(parser, completions=False):
formats = ("py", "yaml")
parser.add_argument(
"-f", "--format", default="yaml", choices=formats,
help="format to print the package in")
parser.add_argument(
"-a", "--all", action="store_true",
help="show all package data, including release-related fields")
parser.add_argument(
"-b", "--brief", action="store_true",
help="do not print extraneous info, such as package uri")
parser.add_argument(
"-c", "--current", action="store_true",
help="show the package in the current context, if any")
PKG_action = parser.add_argument(
"PKG", type=str,
help="the package to view")
if completions:
from rez.cli._complete_util import PackageCompleter
PKG_action.completer = PackageCompleter
def command(opts, parser, extra_arg_groups=None):
from rez.utils.formatting import PackageRequest
from rez.serialise import FileFormat
from rez.packages_ import iter_packages
from rez.status import status
import sys
req = PackageRequest(opts.PKG)
if opts.current:
context = status.context
if context is None:
print >> sys.stderr, "not in a resolved environment context."
sys.exit(1)
variant = context.get_resolved_package(req.name)
if variant is None:
print >> sys.stderr, "Package %r is not in the current context" % req.name
sys.exit(1)
package = variant.parent
else:
it = iter_packages(req.name, req.range)
packages = sorted(it, key=lambda x: x.version)
if not packages:
print "no matches found"
sys.exit(1)
package = packages[-1]
if not opts.brief:
print "URI:"
print package.uri
print
print "CONTENTS:"
if opts.format == "py":
format_ = FileFormat.py
else:
format_ = FileFormat.yaml
package.print_info(format_=format_, include_release=opts.all)
| gpl-3.0 |
Elandril/SickRage | tornado/test/curl_httpclient_test.py | 39 | 4486 | from __future__ import absolute_import, division, print_function, with_statement
from hashlib import md5
from tornado.escape import utf8
from tornado.httpclient import HTTPRequest
from tornado.stack_context import ExceptionStackContext
from tornado.testing import AsyncHTTPTestCase
from tornado.test import httpclient_test
from tornado.test.util import unittest
from tornado.web import Application, RequestHandler, URLSpec
try:
import pycurl
except ImportError:
pycurl = None
if pycurl is not None:
from tornado.curl_httpclient import CurlAsyncHTTPClient
@unittest.skipIf(pycurl is None, "pycurl module not present")
class CurlHTTPClientCommonTestCase(httpclient_test.HTTPClientCommonTestCase):
def get_http_client(self):
client = CurlAsyncHTTPClient(io_loop=self.io_loop,
defaults=dict(allow_ipv6=False))
# make sure AsyncHTTPClient magic doesn't give us the wrong class
self.assertTrue(isinstance(client, CurlAsyncHTTPClient))
return client
class DigestAuthHandler(RequestHandler):
def get(self):
realm = 'test'
opaque = 'asdf'
# Real implementations would use a random nonce.
nonce = "1234"
username = 'foo'
password = 'bar'
auth_header = self.request.headers.get('Authorization', None)
if auth_header is not None:
auth_mode, params = auth_header.split(' ', 1)
assert auth_mode == 'Digest'
param_dict = {}
for pair in params.split(','):
k, v = pair.strip().split('=', 1)
if v[0] == '"' and v[-1] == '"':
v = v[1:-1]
param_dict[k] = v
assert param_dict['realm'] == realm
assert param_dict['opaque'] == opaque
assert param_dict['nonce'] == nonce
assert param_dict['username'] == username
assert param_dict['uri'] == self.request.path
h1 = md5(utf8('%s:%s:%s' % (username, realm, password))).hexdigest()
h2 = md5(utf8('%s:%s' % (self.request.method,
self.request.path))).hexdigest()
digest = md5(utf8('%s:%s:%s' % (h1, nonce, h2))).hexdigest()
if digest == param_dict['response']:
self.write('ok')
else:
self.write('fail')
else:
self.set_status(401)
self.set_header('WWW-Authenticate',
'Digest realm="%s", nonce="%s", opaque="%s"' %
(realm, nonce, opaque))
class CustomReasonHandler(RequestHandler):
def get(self):
self.set_status(200, "Custom reason")
class CustomFailReasonHandler(RequestHandler):
def get(self):
self.set_status(400, "Custom reason")
@unittest.skipIf(pycurl is None, "pycurl module not present")
class CurlHTTPClientTestCase(AsyncHTTPTestCase):
def setUp(self):
super(CurlHTTPClientTestCase, self).setUp()
self.http_client = CurlAsyncHTTPClient(self.io_loop,
defaults=dict(allow_ipv6=False))
def get_app(self):
return Application([
('/digest', DigestAuthHandler),
('/custom_reason', CustomReasonHandler),
('/custom_fail_reason', CustomFailReasonHandler),
])
def test_prepare_curl_callback_stack_context(self):
exc_info = []
def error_handler(typ, value, tb):
exc_info.append((typ, value, tb))
self.stop()
return True
with ExceptionStackContext(error_handler):
request = HTTPRequest(self.get_url('/'),
prepare_curl_callback=lambda curl: 1 / 0)
self.http_client.fetch(request, callback=self.stop)
self.wait()
self.assertEqual(1, len(exc_info))
self.assertIs(exc_info[0][0], ZeroDivisionError)
def test_digest_auth(self):
response = self.fetch('/digest', auth_mode='digest',
auth_username='foo', auth_password='bar')
self.assertEqual(response.body, b'ok')
def test_custom_reason(self):
response = self.fetch('/custom_reason')
self.assertEqual(response.reason, "Custom reason")
def test_fail_custom_reason(self):
response = self.fetch('/custom_fail_reason')
self.assertEqual(str(response.error), "HTTP 400: Custom reason")
| gpl-3.0 |
jrmanrique/ekalay-finance | finance/functions.py | 1 | 8287 | from calendar import monthrange
from datetime import date, datetime
from decimal import *
from django.contrib.auth.models import Group
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Sum
from . import forms
from .models import AccountTypes, CashInflow, CashOutflow, ChartOfAccounts
# Global Variables
num = 32
in_bank = 99510.20 # Currently not in use.
balance = 124795.70 # Starting balance from implementation (2016-10-01).
filtered = True # If True, shows Statement tab data from from_date to to_date.
test_mode = True # If True, shows test values in Summary tab.
now = datetime.now() # Defines current datetime. Do not change.
choice = now # Defines filter choice in Statement tab.
from_date = date(choice.year, choice.month, 1).strftime("%Y-%m-%d")
to_date = date(choice.year, choice.month, monthrange(choice.year, choice.month)[1]).strftime("%Y-%m-%d")
# Methods
def statement_filter(request):
""" Set filters for Statement tab. """
global from_date
global to_date
global choice
if request.method == "POST":
form = forms.StatementFilterForm(request.POST)
choice = form.data['month']
from_date = datetime.strptime(choice, "%Y-%m-%d")
to_date = datetime.strptime(choice, "%Y-%m-%d").replace(day=monthrange(from_date.year, from_date.month)[1])
else:
form = forms.StatementFilterForm()
return form, choice, from_date, to_date
def parse_date(iso_date):
""" Parse basic ISO 8601 date-only format into datetime.date format. """
if isinstance(iso_date, str):
date_obj = datetime.strptime(iso_date, "%Y-%m-%d")
else:
date_obj = iso_date
parsed_date = datetime.strftime(date_obj, "%d %B %Y")
return parsed_date
def reload_database():
""" Reloads the database after modifying models.py. """
all_outflows = CashOutflow.objects.all()
for outflow in all_outflows:
outflow.save()
all_inflows = CashInflow.objects.all()
for inflow in all_inflows:
inflow.save()
return True
def is_super(user):
""" Check user access level. """
users_in_council = Group.objects.get(name="Council").user_set.all()
users_in_finance = Group.objects.get(name="Finance").user_set.all()
if user.is_superuser:
access = True
elif user in users_in_finance:
access = False
elif user in users_in_council:
access = False
else:
access = False
return access
def is_finance(user):
""" Check user access level. """
users_in_council = Group.objects.get(name="Council").user_set.all()
users_in_finance = Group.objects.get(name="Finance").user_set.all()
if user.is_superuser:
access = True
elif user in users_in_finance:
access = True
elif user in users_in_council:
access = False
else:
access = False
return access
def is_council(user):
""" Check user access level. """
users_in_council = Group.objects.get(name="Council").user_set.all()
users_in_finance = Group.objects.get(name="Finance").user_set.all()
if user.is_superuser:
access = True
elif user in users_in_finance:
access = True
elif user in users_in_council:
access = True
else:
access = False
return access
def convert_none(value):
""" Convert Nonetype to 0. """
if value.get('amount__sum') is None:
return Decimal(0).quantize(Decimal('.01'))
else:
return Decimal(value.get('amount__sum')).quantize(Decimal('.01'))
def get_field(num, field='account_title'):
""" Get field from ref_num in models.ChartOfAccounts. """
try:
field = ChartOfAccounts.objects.values().get(ref_num=num).get(field)
except ObjectDoesNotExist:
field = None
return field
def get_type(num):
""" Get account_type from ref_num in models.ChartOfAccounts. """
try:
field_id = ChartOfAccounts.objects.values().get(ref_num=num).get('account_type_id')
field = AccountTypes.objects.values().get(id=field_id).get('account_type')
except ObjectDoesNotExist:
field = None
return field
def get_balance():
""" Get previous balance for filtering. """
prev_inflow = CashInflow.objects.filter(date__lt=from_date).aggregate(Sum('amount'))
prev_outflow = CashOutflow.objects.filter(date__lt=from_date).aggregate(Sum('amount'))
if filtered:
filter_bal = Decimal(balance).quantize(Decimal('.01')) + convert_none(prev_inflow) - convert_none(prev_outflow)
else:
filter_bal = Decimal(balance).quantize(Decimal('.01'))
return filter_bal
def sum_flow(model):
""" Get sum of amount in model. """
if filtered:
sum = model.objects.exclude(date__gt=to_date).filter(date__gte=from_date).aggregate(Sum('amount'))
else:
sum = model.objects.all().aggregate(Sum('amount'))
return convert_none(sum)
def sum_type(model, account_type):
""" Get sum of amount of flow_type in model. """
if filtered:
flow = model.objects.exclude(date__gt=to_date).filter(date__gte=from_date, flow_type=account_type).aggregate(Sum('amount'))
else:
flow = model.objects.filter(flow_type=account_type).aggregate(Sum('amount'))
return convert_none(flow)
def sum_type_net(account_type):
""" Get net sum of amount of flow_type. """
net_sum = sum_type(CashInflow, account_type) - sum_type(CashOutflow, account_type)
return net_sum
def sum_refnum(model, num):
""" Get sum of amount of ref_num in model. """
if filtered:
flow = model.objects.exclude(date__gt=to_date).filter(date__gte=from_date, ref_num=num).aggregate(Sum('amount'))
else:
flow = model.objects.filter(ref_num=num).aggregate(Sum('amount'))
return convert_none(flow)
def sum_refnum_net(num):
""" Get net sum of amount of ref_num. """
sum = sum_refnum(CashInflow, num)-sum_refnum(CashOutflow, num)
return sum
def list_accounts():
""" List accounts with respective balances. """
account_list = []
for account in ChartOfAccounts.objects.order_by('ref_num'):
account_details = {}
account_details['num'] = account.ref_num
account_details['title'] = account.account_title
account_details['type'] = str(account.account_type)
account_details['net'] = sum_refnum_net(account.ref_num)
account_details['inflow'] = sum_refnum(CashInflow, account.ref_num)
account_details['outflow'] = sum_refnum(CashOutflow, account.ref_num)
account_list.append(account_details)
return account_list
def list_types():
""" List account_types with respective balances. """
type_list = []
for account_type in AccountTypes.objects.all():
type_details = {}
type_details['type'] = account_type.account_type
type_details['net'] = sum_type_net(account_type)
type_details['inflow'] = sum_type(CashInflow, account_type)
type_details['outflow'] = sum_type(CashOutflow, account_type)
type_list.append(type_details)
return type_list
def list_months():
""" List months with cash flows for filtering. """
month_list = []
for month in CashInflow.objects.dates('date', 'month'):
month_item = []
month_item.append(month)
month_item.append(month.strftime("%B %Y"))
month_item_tuple = tuple(month_item)
month_list.append(month_item_tuple)
for month in CashOutflow.objects.dates('date', 'month'):
month_item = []
month_item.append(month)
month_item.append(month.strftime("%B %Y"))
month_item_tuple = tuple(month_item)
month_list.append(month_item_tuple)
seq = list(set(month_list))
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
def test_function():
""" Test function. """
prev_inflow = CashInflow.objects.filter(date__lte=from_date).aggregate(Sum('amount'))
prev_outflow = CashOutflow.objects.filter(date__lte=from_date).aggregate(Sum('amount'))
if filtered:
filter_bal = Decimal(balance).quantize(Decimal('.01')) + convert_none(prev_inflow) - convert_none(prev_outflow)
else:
filter_bal = Decimal(balance).quantize(Decimal('.01'))
return filter_bal
| mit |
aselle/tensorflow | tensorflow/contrib/estimator/python/estimator/early_stopping_test.py | 10 | 8408 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for early_stopping."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from absl.testing import parameterized
from tensorflow.contrib.estimator.python.estimator import early_stopping
from tensorflow.python.estimator import estimator
from tensorflow.python.estimator import run_config
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import test
from tensorflow.python.training import monitored_session
from tensorflow.python.training import training_util
class _FakeRunConfig(run_config.RunConfig):
def __init__(self, is_chief):
super(_FakeRunConfig, self).__init__()
self._is_chief = is_chief
@property
def is_chief(self):
return self._is_chief
def _dummy_model_fn(features, labels, params):
_, _, _ = features, labels, params
class _FakeEstimator(estimator.Estimator):
"""Fake estimator for testing."""
def __init__(self, config):
super(_FakeEstimator, self).__init__(
model_fn=_dummy_model_fn, config=config)
def _write_events(eval_dir, params):
"""Test helper to write events to summary files."""
for steps, loss, accuracy in params:
estimator._write_dict_to_summary(eval_dir, {
'loss': loss,
'accuracy': accuracy,
}, steps)
class ReadEvalMetricsTest(test.TestCase):
def test_read_eval_metrics(self):
eval_dir = tempfile.mkdtemp()
_write_events(
eval_dir,
[
# steps, loss, accuracy
(1000, 1, 2),
(2000, 3, 4),
(3000, 5, 6),
])
self.assertEqual({
1000: {
'loss': 1,
'accuracy': 2
},
2000: {
'loss': 3,
'accuracy': 4
},
3000: {
'loss': 5,
'accuracy': 6
},
}, early_stopping.read_eval_metrics(eval_dir))
def test_read_eval_metrics_when_no_events(self):
eval_dir = tempfile.mkdtemp()
self.assertTrue(os.path.exists(eval_dir))
# No error should be raised when eval directory exists with no event files.
self.assertEqual({}, early_stopping.read_eval_metrics(eval_dir))
os.rmdir(eval_dir)
self.assertFalse(os.path.exists(eval_dir))
# No error should be raised when eval directory does not exist.
self.assertEqual({}, early_stopping.read_eval_metrics(eval_dir))
class EarlyStoppingHooksTest(test.TestCase, parameterized.TestCase):
def setUp(self):
config = _FakeRunConfig(is_chief=True)
self._estimator = _FakeEstimator(config=config)
eval_dir = self._estimator.eval_dir()
os.makedirs(eval_dir)
_write_events(
eval_dir,
[
# steps, loss, accuracy
(1000, 0.8, 0.5),
(2000, 0.7, 0.6),
(3000, 0.4, 0.7),
(3500, 0.41, 0.68),
])
def run_session(self, hooks, should_stop):
hooks = hooks if isinstance(hooks, list) else [hooks]
with ops.Graph().as_default():
training_util.create_global_step()
no_op = control_flow_ops.no_op()
with monitored_session.SingularMonitoredSession(hooks=hooks) as mon_sess:
mon_sess.run(no_op)
self.assertEqual(mon_sess.should_stop(), should_stop)
@parameterized.parameters((0.8, 0, False), (0.6, 4000, False), (0.6, 0, True))
def test_stop_if_higher_hook(self, threshold, min_steps, should_stop):
self.run_session(
early_stopping.stop_if_higher_hook(
self._estimator,
metric_name='accuracy',
threshold=threshold,
min_steps=min_steps), should_stop)
@parameterized.parameters((0.3, 0, False), (0.5, 4000, False), (0.5, 0, True))
def test_stop_if_lower_hook(self, threshold, min_steps, should_stop):
self.run_session(
early_stopping.stop_if_lower_hook(
self._estimator,
metric_name='loss',
threshold=threshold,
min_steps=min_steps), should_stop)
@parameterized.parameters((1500, 0, False), (500, 4000, False),
(500, 0, True))
def test_stop_if_no_increase_hook(self, max_steps, min_steps, should_stop):
self.run_session(
early_stopping.stop_if_no_increase_hook(
self._estimator,
metric_name='accuracy',
max_steps_without_increase=max_steps,
min_steps=min_steps), should_stop)
@parameterized.parameters((1500, 0, False), (500, 4000, False),
(500, 0, True))
def test_stop_if_no_decrease_hook(self, max_steps, min_steps, should_stop):
self.run_session(
early_stopping.stop_if_no_decrease_hook(
self._estimator,
metric_name='loss',
max_steps_without_decrease=max_steps,
min_steps=min_steps), should_stop)
@parameterized.parameters((1500, 0.3, False), (1500, 0.5, True),
(500, 0.3, True))
def test_multiple_hooks(self, max_steps, loss_threshold, should_stop):
self.run_session([
early_stopping.stop_if_no_decrease_hook(
self._estimator,
metric_name='loss',
max_steps_without_decrease=max_steps),
early_stopping.stop_if_lower_hook(
self._estimator, metric_name='loss', threshold=loss_threshold)
], should_stop)
@parameterized.parameters(False, True)
def test_make_early_stopping_hook(self, should_stop):
self.run_session([
early_stopping.make_early_stopping_hook(
self._estimator, should_stop_fn=lambda: should_stop)
], should_stop)
def test_make_early_stopping_hook_typeerror(self):
with self.assertRaises(TypeError):
early_stopping.make_early_stopping_hook(
estimator=object(), should_stop_fn=lambda: True)
def test_make_early_stopping_hook_valueerror(self):
with self.assertRaises(ValueError):
early_stopping.make_early_stopping_hook(
self._estimator,
should_stop_fn=lambda: True,
run_every_secs=60,
run_every_steps=100)
class StopOnPredicateHookTest(test.TestCase):
def test_stop(self):
hook = early_stopping._StopOnPredicateHook(
should_stop_fn=lambda: False, run_every_secs=0)
with ops.Graph().as_default():
training_util.create_global_step()
no_op = control_flow_ops.no_op()
with monitored_session.SingularMonitoredSession(hooks=[hook]) as mon_sess:
mon_sess.run(no_op)
self.assertFalse(mon_sess.should_stop())
self.assertFalse(mon_sess.raw_session().run(hook._stop_var))
hook = early_stopping._StopOnPredicateHook(
should_stop_fn=lambda: True, run_every_secs=0)
with ops.Graph().as_default():
training_util.create_global_step()
no_op = control_flow_ops.no_op()
with monitored_session.SingularMonitoredSession(hooks=[hook]) as mon_sess:
mon_sess.run(no_op)
self.assertTrue(mon_sess.should_stop())
self.assertTrue(mon_sess.raw_session().run(hook._stop_var))
class CheckForStoppingHookTest(test.TestCase):
def test_stop(self):
hook = early_stopping._CheckForStoppingHook()
with ops.Graph().as_default():
no_op = control_flow_ops.no_op()
assign_op = state_ops.assign(early_stopping._get_or_create_stop_var(),
True)
with monitored_session.SingularMonitoredSession(hooks=[hook]) as mon_sess:
mon_sess.run(no_op)
self.assertFalse(mon_sess.should_stop())
mon_sess.run(assign_op)
self.assertTrue(mon_sess.should_stop())
if __name__ == '__main__':
test.main()
| apache-2.0 |
feigames/Odoo | addons/knowledge/__init__.py | 436 | 1064 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import res_config
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Tutakamimearitomomei/Kongcoin | share/qt/make_spinner.py | 4415 | 1035 | #!/usr/bin/env python
# W.J. van der Laan, 2011
# Make spinning .mng animation from a .png
# Requires imagemagick 6.7+
from __future__ import division
from os import path
from PIL import Image
from subprocess import Popen
SRC='img/reload_scaled.png'
DST='../../src/qt/res/movies/update_spinner.mng'
TMPDIR='/tmp'
TMPNAME='tmp-%03i.png'
NUMFRAMES=35
FRAMERATE=10.0
CONVERT='convert'
CLOCKWISE=True
DSIZE=(16,16)
im_src = Image.open(SRC)
if CLOCKWISE:
im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT)
def frame_to_filename(frame):
return path.join(TMPDIR, TMPNAME % frame)
frame_files = []
for frame in xrange(NUMFRAMES):
rotation = (frame + 0.5) / NUMFRAMES * 360.0
if CLOCKWISE:
rotation = -rotation
im_new = im_src.rotate(rotation, Image.BICUBIC)
im_new.thumbnail(DSIZE, Image.ANTIALIAS)
outfile = frame_to_filename(frame)
im_new.save(outfile, 'png')
frame_files.append(outfile)
p = Popen([CONVERT, "-delay", str(FRAMERATE), "-dispose", "2"] + frame_files + [DST])
p.communicate()
| mit |
ibressler/pyqtgraph | pyqtgraph/WidgetGroup.py | 14 | 10580 | # -*- coding: utf-8 -*-
"""
WidgetGroup.py - WidgetGroup class for easily managing lots of Qt widgets
Copyright 2010 Luke Campagnola
Distributed under MIT/X11 license. See license.txt for more infomation.
This class addresses the problem of having to save and restore the state
of a large group of widgets.
"""
from .Qt import QtCore, QtGui
import weakref, inspect
from .python2_3 import asUnicode
__all__ = ['WidgetGroup']
def splitterState(w):
s = str(w.saveState().toPercentEncoding())
return s
def restoreSplitter(w, s):
if type(s) is list:
w.setSizes(s)
elif type(s) is str:
w.restoreState(QtCore.QByteArray.fromPercentEncoding(s))
else:
print("Can't configure QSplitter using object of type", type(s))
if w.count() > 0: ## make sure at least one item is not collapsed
for i in w.sizes():
if i > 0:
return
w.setSizes([50] * w.count())
def comboState(w):
ind = w.currentIndex()
data = w.itemData(ind)
#if not data.isValid():
if data is not None:
try:
if not data.isValid():
data = None
else:
data = data.toInt()[0]
except AttributeError:
pass
if data is None:
return asUnicode(w.itemText(ind))
else:
return data
def setComboState(w, v):
if type(v) is int:
#ind = w.findData(QtCore.QVariant(v))
ind = w.findData(v)
if ind > -1:
w.setCurrentIndex(ind)
return
w.setCurrentIndex(w.findText(str(v)))
class WidgetGroup(QtCore.QObject):
"""This class takes a list of widgets and keeps an internal record of their state which is always up to date. Allows reading and writing from groups of widgets simultaneously."""
## List of widget types which can be handled by WidgetGroup.
## The value for each type is a tuple (change signal function, get function, set function, [auto-add children])
## The change signal function that takes an object and returns a signal that is emitted any time the state of the widget changes, not just
## when it is changed by user interaction. (for example, 'clicked' is not a valid signal here)
## If the change signal is None, the value of the widget is not cached.
## Custom widgets not in this list can be made to work with WidgetGroup by giving them a 'widgetGroupInterface' method
## which returns the tuple.
classes = {
QtGui.QSpinBox:
(lambda w: w.valueChanged,
QtGui.QSpinBox.value,
QtGui.QSpinBox.setValue),
QtGui.QDoubleSpinBox:
(lambda w: w.valueChanged,
QtGui.QDoubleSpinBox.value,
QtGui.QDoubleSpinBox.setValue),
QtGui.QSplitter:
(None,
splitterState,
restoreSplitter,
True),
QtGui.QCheckBox:
(lambda w: w.stateChanged,
QtGui.QCheckBox.isChecked,
QtGui.QCheckBox.setChecked),
QtGui.QComboBox:
(lambda w: w.currentIndexChanged,
comboState,
setComboState),
QtGui.QGroupBox:
(lambda w: w.toggled,
QtGui.QGroupBox.isChecked,
QtGui.QGroupBox.setChecked,
True),
QtGui.QLineEdit:
(lambda w: w.editingFinished,
lambda w: str(w.text()),
QtGui.QLineEdit.setText),
QtGui.QRadioButton:
(lambda w: w.toggled,
QtGui.QRadioButton.isChecked,
QtGui.QRadioButton.setChecked),
QtGui.QSlider:
(lambda w: w.valueChanged,
QtGui.QSlider.value,
QtGui.QSlider.setValue),
}
sigChanged = QtCore.Signal(str, object)
def __init__(self, widgetList=None):
"""Initialize WidgetGroup, adding specified widgets into this group.
widgetList can be:
- a list of widget specifications (widget, [name], [scale])
- a dict of name: widget pairs
- any QObject, and all compatible child widgets will be added recursively.
The 'scale' parameter for each widget allows QSpinBox to display a different value than the value recorded
in the group state (for example, the program may set a spin box value to 100e-6 and have it displayed as 100 to the user)
"""
QtCore.QObject.__init__(self)
self.widgetList = weakref.WeakKeyDictionary() # Make sure widgets don't stick around just because they are listed here
self.scales = weakref.WeakKeyDictionary()
self.cache = {} ## name:value pairs
self.uncachedWidgets = weakref.WeakKeyDictionary()
if isinstance(widgetList, QtCore.QObject):
self.autoAdd(widgetList)
elif isinstance(widgetList, list):
for w in widgetList:
self.addWidget(*w)
elif isinstance(widgetList, dict):
for name, w in widgetList.items():
self.addWidget(w, name)
elif widgetList is None:
return
else:
raise Exception("Wrong argument type %s" % type(widgetList))
def addWidget(self, w, name=None, scale=None):
if not self.acceptsType(w):
raise Exception("Widget type %s not supported by WidgetGroup" % type(w))
if name is None:
name = str(w.objectName())
if name == '':
raise Exception("Cannot add widget '%s' without a name." % str(w))
self.widgetList[w] = name
self.scales[w] = scale
self.readWidget(w)
if type(w) in WidgetGroup.classes:
signal = WidgetGroup.classes[type(w)][0]
else:
signal = w.widgetGroupInterface()[0]
if signal is not None:
if inspect.isfunction(signal) or inspect.ismethod(signal):
signal = signal(w)
signal.connect(self.mkChangeCallback(w))
else:
self.uncachedWidgets[w] = None
def findWidget(self, name):
for w in self.widgetList:
if self.widgetList[w] == name:
return w
return None
def interface(self, obj):
t = type(obj)
if t in WidgetGroup.classes:
return WidgetGroup.classes[t]
else:
return obj.widgetGroupInterface()
def checkForChildren(self, obj):
"""Return true if we should automatically search the children of this object for more."""
iface = self.interface(obj)
return (len(iface) > 3 and iface[3])
def autoAdd(self, obj):
## Find all children of this object and add them if possible.
accepted = self.acceptsType(obj)
if accepted:
#print "%s auto add %s" % (self.objectName(), obj.objectName())
self.addWidget(obj)
if not accepted or self.checkForChildren(obj):
for c in obj.children():
self.autoAdd(c)
def acceptsType(self, obj):
for c in WidgetGroup.classes:
if isinstance(obj, c):
return True
if hasattr(obj, 'widgetGroupInterface'):
return True
return False
#return (type(obj) in WidgetGroup.classes)
def setScale(self, widget, scale):
val = self.readWidget(widget)
self.scales[widget] = scale
self.setWidget(widget, val)
#print "scaling %f to %f" % (val, self.readWidget(widget))
def mkChangeCallback(self, w):
return lambda *args: self.widgetChanged(w, *args)
def widgetChanged(self, w, *args):
#print "widget changed"
n = self.widgetList[w]
v1 = self.cache[n]
v2 = self.readWidget(w)
if v1 != v2:
#print "widget", n, " = ", v2
self.emit(QtCore.SIGNAL('changed'), self.widgetList[w], v2)
self.sigChanged.emit(self.widgetList[w], v2)
def state(self):
for w in self.uncachedWidgets:
self.readWidget(w)
#cc = self.cache.copy()
#if 'averageGroup' in cc:
#val = cc['averageGroup']
#w = self.findWidget('averageGroup')
#self.readWidget(w)
#if val != self.cache['averageGroup']:
#print " AverageGroup did not match cached value!"
#else:
#print " AverageGroup OK"
return self.cache.copy()
def setState(self, s):
#print "SET STATE", self, s
for w in self.widgetList:
n = self.widgetList[w]
#print " restore %s?" % n
if n not in s:
continue
#print " restore state", w, n, s[n]
self.setWidget(w, s[n])
def readWidget(self, w):
if type(w) in WidgetGroup.classes:
getFunc = WidgetGroup.classes[type(w)][1]
else:
getFunc = w.widgetGroupInterface()[1]
if getFunc is None:
return None
## if the getter function provided in the interface is a bound method,
## then just call the method directly. Otherwise, pass in the widget as the first arg
## to the function.
if inspect.ismethod(getFunc) and getFunc.__self__ is not None:
val = getFunc()
else:
val = getFunc(w)
if self.scales[w] is not None:
val /= self.scales[w]
#if isinstance(val, QtCore.QString):
#val = str(val)
n = self.widgetList[w]
self.cache[n] = val
return val
def setWidget(self, w, v):
v1 = v
if self.scales[w] is not None:
v *= self.scales[w]
if type(w) in WidgetGroup.classes:
setFunc = WidgetGroup.classes[type(w)][2]
else:
setFunc = w.widgetGroupInterface()[2]
## if the setter function provided in the interface is a bound method,
## then just call the method directly. Otherwise, pass in the widget as the first arg
## to the function.
if inspect.ismethod(setFunc) and setFunc.__self__ is not None:
setFunc(v)
else:
setFunc(w, v)
#name = self.widgetList[w]
#if name in self.cache and (self.cache[name] != v1):
#print "%s: Cached value %s != set value %s" % (name, str(self.cache[name]), str(v1))
| mit |
epssy/hue | desktop/core/ext-py/django-openid-auth-0.5/example_consumer/settings.py | 42 | 5475 | # django-openid-auth - OpenID integration for django.contrib.auth
#
# Copyright (C) 2007 Simon Willison
# Copyright (C) 2008-2013 Canonical Ltd.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Django settings for example project.
import django
django_version = django.get_version()
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
if django_version >= "1.2":
csrf_middleware = 'django.middleware.csrf.CsrfViewMiddleware'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'sqlite.db'
}
}
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
else:
csrf_middleware = 'django.contrib.csrf.middleware.CsrfViewMiddleware'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
)
DATABASE_ENGINE = 'sqlite3' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'ado_mssql'.
DATABASE_NAME = 'sqlite.db' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://www.postgresql.org/docs/8.1/static/datetime-keywords.html#DATETIME-TIMEZONE-SET-TABLE
# although not all variations may be possible on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
# http://blogs.law.harvard.edu/tech/stories/storyReader$15
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '34958734985734985734985798437'
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
csrf_middleware,
)
ROOT_URLCONF = 'example_consumer.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'django_openid_auth',
)
AUTHENTICATION_BACKENDS = (
'django_openid_auth.auth.OpenIDBackend',
'django.contrib.auth.backends.ModelBackend',
)
# Should users be created when new OpenIDs are used to log in?
OPENID_CREATE_USERS = True
# When logging in again, should we overwrite user details based on
# data received via Simple Registration?
OPENID_UPDATE_DETAILS_FROM_SREG = True
# If set, always use this as the identity URL rather than asking the
# user. This only makes sense if it is a server URL.
OPENID_SSO_SERVER_URL = 'https://login.launchpad.net/'
# Tell django.contrib.auth to use the OpenID signin URLs.
LOGIN_URL = '/openid/login/'
LOGIN_REDIRECT_URL = '/'
# Should django_auth_openid be used to sign into the admin interface?
OPENID_USE_AS_ADMIN_LOGIN = False
| apache-2.0 |
rghe/ansible | lib/ansible/modules/monitoring/grafana_plugin.py | 27 | 7426 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Thierry Sallé (@seuf)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'
}
DOCUMENTATION = '''
---
module: grafana_plugin
author:
- Thierry Sallé (@tsalle)
version_added: "2.5"
short_description: Manage Grafana plugins via grafana-cli
description:
- Install and remove Grafana plugins.
options:
name:
description:
- Name of the plugin.
required: true
version:
description:
- Version of the plugin to install.
- Default to latest.
grafana_plugins_dir:
description:
- Directory where Grafana plugin will be installed.
grafana_repo:
description:
- Grafana repository. If not set, gafana-cli will use the default value C(https://grafana.net/api/plugins).
grafana_plugin_url:
description:
- Custom Grafana plugin URL.
- Requires grafana 4.6.x or later.
state:
description:
- Status of the Grafana plugin.
- If latest is set, the version parameter will be ignored.
choices: [ absent, present ]
default: present
'''
EXAMPLES = '''
---
- name: Install - update Grafana piechart panel plugin
grafana_plugin:
name: grafana-piechart-panel
version: latest
state: present
'''
RETURN = '''
---
version:
description: version of the installed / removed plugin.
type: string
returned: allways
'''
import base64
import json
import os
from ansible.module_utils.basic import AnsibleModule
__metaclass__ = type
class GrafanaCliException(Exception):
pass
def grafana_cli_bin(params):
'''
Get the grafana-cli binary path with global options.
Raise a GrafanaCliException if the grafana-cli is not present or not in PATH
:param params: ansible module params. Used to fill grafana-cli global params.
'''
program = 'grafana-cli'
grafana_cli = None
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
grafana_cli = program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
grafana_cli = exe_file
break
if grafana_cli is None:
raise GrafanaCliException('grafana-cli binary is not present or not in PATH')
else:
if 'grafana_plugin_url' in params and params['grafana_plugin_url']:
grafana_cli = '{} {} {}'.format(grafana_cli, '--pluginUrl', params['grafana_plugin_url'])
if 'grafana_plugins_dir' in params and params['grafana_plugins_dir']:
grafana_cli = '{} {} {}'.format(grafana_cli, '--pluginsDir', params['grafana_plugins_dir'])
if 'grafana_repo' in params and params['grafana_repo']:
grafana_cli = '{} {} {}'.format(grafana_cli, '--repo', params['grafana_repo'])
if 'validate_certs' in params and params['validate_certs'] is False:
grafana_cli = '{} {}'.format(grafana_cli, '--insecure')
return '{} {}'.format(grafana_cli, 'plugins')
def get_grafana_plugin_version(module, params):
'''
Fetch grafana installed plugin version. Return None if plugin is not installed.
:param module: ansible module object. used to run system commands.
:param params: ansible module params.
'''
grafana_cli = grafana_cli_bin(params)
rc, stdout, stderr = module.run_command('{} ls'.format(grafana_cli))
stdout_lines = stdout.split("\n")
for line in stdout_lines:
if line.find(' @ ') != -1:
line = line.rstrip()
plugin_name, plugin_version = line.split(' @ ')
if plugin_name == params['name']:
return plugin_version
return None
def grafana_plugin(module, params):
'''
Install update or remove grafana plugin
:param module: ansible module object. used to run system commands.
:param params: ansible module params.
'''
grafana_cli = grafana_cli_bin(params)
if params['state'] == 'present':
grafana_plugin_version = get_grafana_plugin_version(module, params)
if grafana_plugin_version is not None:
if 'version' in params and params['version']:
if params['version'] == grafana_plugin_version:
return {'msg': 'Grafana plugin already installed',
'changed': False,
'version': grafana_plugin_version}
else:
if params['version'] == 'latest' or params['version'] is None:
cmd = '{} update {}'.format(grafana_cli, params['name'])
else:
cmd = '{} install {} {}'.format(grafana_cli, params['name'], params['version'])
else:
return {'msg': 'Grafana plugin already installed',
'changed': False,
'version': grafana_plugin_version}
else:
if 'version' in params:
if params['version'] == 'latest' or params['version'] is None:
cmd = '{} install {}'.format(grafana_cli, params['name'])
else:
cmd = '{} install {} {}'.format(grafana_cli, params['name'], params['version'])
else:
cmd = '{} install {}'.format(grafana_cli, params['name'])
else:
cmd = '{} uninstall {}'.format(grafana_cli, params['name'])
rc, stdout, stderr = module.run_command(cmd)
if rc == 0:
stdout_lines = stdout.split("\n")
for line in stdout_lines:
if line.find(params['name']):
if line.find(' @ ') != -1:
line = line.rstrip()
plugin_name, plugin_version = line.split(' @ ')
else:
plugin_version = None
return {'msg': 'Grafana plugin {} installed : {}'.format(params['name'], cmd),
'changed': True,
'version': plugin_version}
else:
raise GrafanaCliException("'{}' execution returned an error : [{}] {} {}".format(cmd, rc, stdout, stderr))
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True,
type='str'),
version=dict(type='str'),
grafana_plugins_dir=dict(type='str'),
grafana_repo=dict(type='str'),
grafana_plugin_url=dict(type='str'),
state=dict(choices=['present', 'absent'],
default='present')
),
supports_check_mode=False
)
try:
result = grafana_plugin(module, module.params)
except GrafanaCliException as e:
module.fail_json(
failed=True,
msg="{}".format(e)
)
return
except Exception as e:
module.fail_json(
failed=True,
msg="{} : {} ".format(type(e), e)
)
return
module.exit_json(
failed=False,
**result
)
return
if __name__ == '__main__':
main()
| gpl-3.0 |
HIIT/mediacollection | test/test_seura.py | 1 | 1036 | import sys
import os
import filecmp
import importlib
import datetime
import common
path = os.path.abspath('.')
sys.path.append(path)
domain = 'seura'
url = 'http://seura.fi/puheenaihe/ajankohtaista/vasemmisto-kehuu-kokoomusta-harjoittavat-rehellisesti-politiikkaa-joka-on-ajanut-suomen-lamaan/?shared=43026-ad87bd06-500'
out = 'test/parser_out.txt'
module = importlib.import_module( 'sites.' + domain )
d = module.parse(url)
class TestParser:
@classmethod
def setup_class(cls):
common.initialise_file( out, d )
def test_file_exists(self):
common.file_exists(out)
def test_file_not_empty(self):
common.file_not_empty(out)
def test_file_contents_match(self):
common.file_contents_match(domain, out)
def test_dictionary_created(self):
common.dictionary_created(d)
def test_dictionary_contains_right_keys(self):
common.dictionary_contains_right_keys(d)
def test_dictionary_values_correct_type(self):
common.dictionary_values_correct_type(d)
| mit |
danielkza/dnf | tests/test_main.py | 17 | 1729 | # Copyright (C) 2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
"""Tests of the CLI entry point."""
from __future__ import unicode_literals
import dnf.cli.main
import dnf.logging
import dnf.pycomp
import tests.support
class MainTest(tests.support.TestCase):
"""Tests the ``dnf.cli.main`` module."""
def test_ex_IOError_logs_traceback(self):
"""Test whether the traceback is logged if an error is raised."""
lvl = dnf.logging.SUBDEBUG
out = dnf.pycomp.StringIO()
with tests.support.wiretap_logs('dnf', lvl, out):
try:
raise OSError('test_ex_IOError_logs_traceback')
except OSError as e:
dnf.cli.main.ex_IOError(e)
self.assertTracebackIn('OSError: test_ex_IOError_logs_traceback\n',
out.getvalue())
| gpl-2.0 |
robcarver17/pysystemtrade | sysdata/production/historic_orders.py | 1 | 4692 |
"""
Historic orders
Orders which are still being executed live in the order stack type; see sysexecution
Note orderID here are different from ones used in order stack (which are temporary)
We store three types of orders: strategy level, contract level and broker level
Use to analyse execution and also construct strategy/contract level p&l
Doesn't have to reconcile with positions!
"""
import datetime
from syscore.objects import arg_not_supplied, missing_order
from sysdata.base_data import baseData
from sysobjects.fills import listOfFills, fill_from_order
from sysexecution.orders.base_orders import Order
from sysexecution.orders.broker_orders import single_fill_from_broker_order
from sysexecution.order_stacks.order_stack import missingOrder
from sysexecution.orders.list_of_orders import listOfOrders
from sysobjects.production.tradeable_object import instrumentStrategy, futuresContract
from syslogdiag.log_to_screen import logtoscreen
class genericOrdersData(baseData):
def __init__(self, log=logtoscreen("")):
super().__init__(log=log)
def __repr__(self):
return "genericOrdersData object"
def delete_order_with_orderid(self, order_id: int):
order = self.get_order_with_orderid(order_id)
if order is missing_order:
raise missingOrder
self._delete_order_with_orderid_without_checking(order_id)
def add_order_to_data(self, order: Order, ignore_duplication=False):
raise NotImplementedError
def get_list_of_order_ids(self) -> list:
raise NotImplementedError
def get_order_with_orderid(self, order_id: int):
# return missing_order if not found
raise NotImplementedError
def _delete_order_with_orderid_without_checking(self, order_id: int):
raise NotImplementedError
def update_order_with_orderid(self, order_id: int, order: Order):
raise NotImplementedError
def get_list_of_order_ids_in_date_range(
self,
period_start: datetime.datetime,
period_end: datetime.datetime=arg_not_supplied) -> list:
raise NotImplementedError
class strategyHistoricOrdersData(genericOrdersData):
def get_fills_history_for_instrument_strategy(
self, instrument_strategy: instrumentStrategy
) -> listOfFills:
"""
:param instrument_code: str
:param contract_id: str
:return: fillHistory object, with fill and price
"""
order_list = self.get_list_of_orders_for_instrument_strategy(
instrument_strategy
)
order_list_as_fills = [fill_from_order(order) for order in order_list]
list_of_fills = listOfFills(order_list_as_fills)
return list_of_fills
def get_list_of_orders_for_instrument_strategy(self, instrument_strategy: instrumentStrategy) -> listOfOrders:
list_of_ids = self.get_list_of_order_ids_for_instrument_strategy(instrument_strategy)
order_list = []
for order_id in list_of_ids:
order = self.get_order_with_orderid(order_id)
order_list.append(order)
order_list = listOfOrders(order_list)
return order_list
def get_list_of_order_ids_for_instrument_strategy(self, instrument_strategy: instrumentStrategy):
raise NotImplementedError
class contractHistoricOrdersData(genericOrdersData):
pass
class brokerHistoricOrdersData(contractHistoricOrdersData):
def get_fills_history_for_contract(
self, futures_contract: futuresContract
) -> listOfFills:
"""
:param instrument_code: str
:param contract_id: str
:return: fillHistory object, with fill and price
"""
instrument_code = futures_contract.instrument_code
contract_str = futures_contract.date_str
list_of_order_ids = self.get_list_of_order_ids_for_instrument_and_contract_str(
instrument_code=instrument_code, contract_str=contract_str
)
list_of_fills = [self.get_fill_from_order_id(orderid, contract_str) for orderid in list_of_order_ids]
list_of_fills = [fill for fill in list_of_fills if fill is not missing_order]
list_of_fills = listOfFills(list_of_fills)
return list_of_fills
def get_fill_from_order_id(self, orderid, contract_str: str):
order = self.get_order_with_orderid(orderid)
fill = single_fill_from_broker_order(order, contract_str)
return fill
def get_list_of_order_ids_for_instrument_and_contract_str(self, instrument_code: str,
contract_str: str) -> list:
raise NotImplementedError
| gpl-3.0 |
naterh/cloud-init-rax-pkg | cloudinit/url_helper.py | 3 | 12910 | # vi: ts=4 expandtab
#
# Copyright (C) 2012 Canonical Ltd.
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# Copyright (C) 2012 Yahoo! Inc.
#
# Author: Scott Moser <[email protected]>
# Author: Juerg Haefliger <[email protected]>
# Author: Joshua Harlow <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import time
import six
import requests
from requests import exceptions
from six.moves.urllib.parse import (
urlparse, urlunparse,
quote as urlquote)
from cloudinit import log as logging
from cloudinit import version
LOG = logging.getLogger(__name__)
if six.PY2:
import httplib
NOT_FOUND = httplib.NOT_FOUND
else:
import http.client
NOT_FOUND = http.client.NOT_FOUND
# Check if requests has ssl support (added in requests >= 0.8.8)
SSL_ENABLED = False
CONFIG_ENABLED = False # This was added in 0.7 (but taken out in >=1.0)
_REQ_VER = None
try:
from distutils.version import LooseVersion
import pkg_resources
_REQ = pkg_resources.get_distribution('requests')
_REQ_VER = LooseVersion(_REQ.version)
if _REQ_VER >= LooseVersion('0.8.8'):
SSL_ENABLED = True
if _REQ_VER >= LooseVersion('0.7.0') and _REQ_VER < LooseVersion('1.0.0'):
CONFIG_ENABLED = True
except:
pass
def _cleanurl(url):
parsed_url = list(urlparse(url, scheme='http'))
if not parsed_url[1] and parsed_url[2]:
# Swap these since this seems to be a common
# occurrence when given urls like 'www.google.com'
parsed_url[1] = parsed_url[2]
parsed_url[2] = ''
return urlunparse(parsed_url)
def combine_url(base, *add_ons):
def combine_single(url, add_on):
url_parsed = list(urlparse(url))
path = url_parsed[2]
if path and not path.endswith("/"):
path += "/"
path += urlquote(str(add_on), safe="/:")
url_parsed[2] = path
return urlunparse(url_parsed)
url = base
for add_on in add_ons:
url = combine_single(url, add_on)
return url
# Made to have same accessors as UrlResponse so that the
# read_file_or_url can return this or that object and the
# 'user' of those objects will not need to know the difference.
class StringResponse(object):
def __init__(self, contents, code=200):
self.code = code
self.headers = {}
self.contents = contents
self.url = None
def ok(self, *args, **kwargs):
if self.code != 200:
return False
return True
def __str__(self):
return self.contents
class FileResponse(StringResponse):
def __init__(self, path, contents, code=200):
StringResponse.__init__(self, contents, code=code)
self.url = path
class UrlResponse(object):
def __init__(self, response):
self._response = response
@property
def contents(self):
return self._response.content
@property
def url(self):
return self._response.url
def ok(self, redirects_ok=False):
upper = 300
if redirects_ok:
upper = 400
if self.code >= 200 and self.code < upper:
return True
else:
return False
@property
def headers(self):
return self._response.headers
@property
def code(self):
return self._response.status_code
def __str__(self):
return self._response.text
class UrlError(IOError):
def __init__(self, cause, code=None, headers=None):
IOError.__init__(self, str(cause))
self.cause = cause
self.code = code
self.headers = headers
if self.headers is None:
self.headers = {}
def _get_ssl_args(url, ssl_details):
ssl_args = {}
scheme = urlparse(url).scheme
if scheme == 'https' and ssl_details:
if not SSL_ENABLED:
LOG.warn("SSL is not supported in requests v%s, "
"cert. verification can not occur!", _REQ_VER)
else:
if 'ca_certs' in ssl_details and ssl_details['ca_certs']:
ssl_args['verify'] = ssl_details['ca_certs']
else:
ssl_args['verify'] = True
if 'cert_file' in ssl_details and 'key_file' in ssl_details:
ssl_args['cert'] = [ssl_details['cert_file'],
ssl_details['key_file']]
elif 'cert_file' in ssl_details:
ssl_args['cert'] = str(ssl_details['cert_file'])
return ssl_args
def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
headers=None, headers_cb=None, ssl_details=None,
check_status=True, allow_redirects=True, exception_cb=None):
url = _cleanurl(url)
req_args = {
'url': url,
}
req_args.update(_get_ssl_args(url, ssl_details))
req_args['allow_redirects'] = allow_redirects
req_args['method'] = 'GET'
if timeout is not None:
req_args['timeout'] = max(float(timeout), 0)
if data:
req_args['method'] = 'POST'
# It doesn't seem like config
# was added in older library versions (or newer ones either), thus we
# need to manually do the retries if it wasn't...
if CONFIG_ENABLED:
req_config = {
'store_cookies': False,
}
# Don't use the retry support built-in
# since it doesn't allow for 'sleep_times'
# in between tries....
# if retries:
# req_config['max_retries'] = max(int(retries), 0)
req_args['config'] = req_config
manual_tries = 1
if retries:
manual_tries = max(int(retries) + 1, 1)
if not headers:
headers = {
'User-Agent': 'Cloud-Init/%s' % (version.version_string()),
}
if not headers_cb:
def _cb(url):
return headers
headers_cb = _cb
if data:
req_args['data'] = data
if sec_between is None:
sec_between = -1
excps = []
# Handle retrying ourselves since the built-in support
# doesn't handle sleeping between tries...
for i in range(0, manual_tries):
req_args['headers'] = headers_cb(url)
filtered_req_args = {}
for (k, v) in req_args.items():
if k == 'data':
continue
filtered_req_args[k] = v
try:
LOG.debug("[%s/%s] open '%s' with %s configuration", i,
manual_tries, url, filtered_req_args)
r = requests.request(**req_args)
if check_status:
r.raise_for_status()
LOG.debug("Read from %s (%s, %sb) after %s attempts", url,
r.status_code, len(r.content), (i + 1))
# Doesn't seem like we can make it use a different
# subclass for responses, so add our own backward-compat
# attrs
return UrlResponse(r)
except exceptions.RequestException as e:
if (isinstance(e, (exceptions.HTTPError))
and hasattr(e, 'response') # This appeared in v 0.10.8
and hasattr(e.response, 'status_code')):
excps.append(UrlError(e, code=e.response.status_code,
headers=e.response.headers))
else:
excps.append(UrlError(e))
if SSL_ENABLED and isinstance(e, exceptions.SSLError):
# ssl exceptions are not going to get fixed by waiting a
# few seconds
break
if exception_cb and not exception_cb(req_args.copy(), excps[-1]):
break
if i + 1 < manual_tries and sec_between > 0:
LOG.debug("Please wait %s seconds while we wait to try again",
sec_between)
time.sleep(sec_between)
if excps:
raise excps[-1]
return None # Should throw before this...
def wait_for_url(urls, max_wait=None, timeout=None,
status_cb=None, headers_cb=None, sleep_time=1,
exception_cb=None):
"""
urls: a list of urls to try
max_wait: roughly the maximum time to wait before giving up
The max time is *actually* len(urls)*timeout as each url will
be tried once and given the timeout provided.
a number <= 0 will always result in only one try
timeout: the timeout provided to urlopen
status_cb: call method with string message when a url is not available
headers_cb: call method with single argument of url to get headers
for request.
exception_cb: call method with 2 arguments 'msg' (per status_cb) and
'exception', the exception that occurred.
the idea of this routine is to wait for the EC2 metdata service to
come up. On both Eucalyptus and EC2 we have seen the case where
the instance hit the MD before the MD service was up. EC2 seems
to have permenantely fixed this, though.
In openstack, the metadata service might be painfully slow, and
unable to avoid hitting a timeout of even up to 10 seconds or more
(LP: #894279) for a simple GET.
Offset those needs with the need to not hang forever (and block boot)
on a system where cloud-init is configured to look for EC2 Metadata
service but is not going to find one. It is possible that the instance
data host (169.254.169.254) may be firewalled off Entirely for a sytem,
meaning that the connection will block forever unless a timeout is set.
"""
start_time = time.time()
def log_status_cb(msg, exc=None):
LOG.debug(msg)
if status_cb is None:
status_cb = log_status_cb
def timeup(max_wait, start_time):
return ((max_wait <= 0 or max_wait is None) or
(time.time() - start_time > max_wait))
loop_n = 0
while True:
sleep_time = int(loop_n / 5) + 1
for url in urls:
now = time.time()
if loop_n != 0:
if timeup(max_wait, start_time):
break
if timeout and (now + timeout > (start_time + max_wait)):
# shorten timeout to not run way over max_time
timeout = int((start_time + max_wait) - now)
reason = ""
url_exc = None
try:
if headers_cb is not None:
headers = headers_cb(url)
else:
headers = {}
response = readurl(url, headers=headers, timeout=timeout,
check_status=False)
if not response.contents:
reason = "empty response [%s]" % (response.code)
url_exc = UrlError(ValueError(reason), code=response.code,
headers=response.headers)
elif not response.ok():
reason = "bad status code [%s]" % (response.code)
url_exc = UrlError(ValueError(reason), code=response.code,
headers=response.headers)
else:
return url
except UrlError as e:
reason = "request error [%s]" % e
url_exc = e
except Exception as e:
reason = "unexpected error [%s]" % e
url_exc = e
time_taken = int(time.time() - start_time)
status_msg = "Calling '%s' failed [%s/%ss]: %s" % (url,
time_taken,
max_wait,
reason)
status_cb(status_msg)
if exception_cb:
# This can be used to alter the headers that will be sent
# in the future, for example this is what the MAAS datasource
# does.
exception_cb(msg=status_msg, exception=url_exc)
if timeup(max_wait, start_time):
break
loop_n = loop_n + 1
LOG.debug("Please wait %s seconds while we wait to try again",
sleep_time)
time.sleep(sleep_time)
return False
| gpl-3.0 |
zentol/flink | flink-python/pyflink/table/__init__.py | 4 | 4723 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""
Important classes of Flink Table API:
- :class:`pyflink.table.TableEnvironment`
Main entry point for :class:`Table` and SQL functionality
- :class:`pyflink.table.Table`
The core component of the Table API.
Use the methods of :class:`Table` to transform data.
- :class:`pyflink.table.TableConfig`
A config to define the runtime behavior of the Table API.
It is necessary when creating :class:`TableEnvironment`.
- :class:`pyflink.table.EnvironmentSettings`
Defines all parameters that initialize a table environment.
- :class:`pyflink.table.TableSource`
Defines an external data source as a table.
- :class:`pyflink.table.TableSink`
Specifies how to emit a table to an external system or location.
- :class:`pyflink.table.DataTypes`
Defines a list of data types available.
- :class:`pyflink.table.Row`
A row in a :class:`Table`.
- :class:`pyflink.table.window`
Helper classes for working with :class:`pyflink.table.window.GroupWindow`
(:class:`pyflink.table.window.Tumble`, :class:`pyflink.table.window.Session`,
:class:`pyflink.table.window.Slide`) and :class:`pyflink.table.window.OverWindow` window
(:class:`pyflink.table.window.Over`).
- :class:`pyflink.table.descriptors`
Helper classes that describes DDL information, such as how to connect to another system,
the format of data, the schema of table, the event time attribute in the schema, etc.
- :class:`pyflink.table.catalog`
Responsible for reading and writing metadata such as database/table/views/UDFs
from a registered :class:`pyflink.table.catalog.Catalog`.
- :class:`pyflink.table.TableSchema`
Represents a table's structure with field names and data types.
- :class:`pyflink.table.FunctionContext`
Used to obtain global runtime information about the context in which the
user-defined function is executed, such as the metric group, and global job parameters, etc.
- :class:`pyflink.table.ScalarFunction`
Base interface for user-defined scalar function.
- :class:`pyflink.table.StatementSet`
Base interface accepts DML statements or Tables.
"""
from __future__ import absolute_import
from pyflink.table.environment_settings import EnvironmentSettings
from pyflink.table.explain_detail import ExplainDetail
from pyflink.table.result_kind import ResultKind
from pyflink.table.sinks import CsvTableSink, TableSink, WriteMode
from pyflink.table.sources import CsvTableSource, TableSource
from pyflink.table.sql_dialect import SqlDialect
from pyflink.table.statement_set import StatementSet
from pyflink.table.table import GroupWindowedTable, GroupedTable, OverWindowedTable, Table, \
WindowGroupedTable
from pyflink.table.table_config import TableConfig
from pyflink.table.table_environment import (TableEnvironment, StreamTableEnvironment,
BatchTableEnvironment)
from pyflink.table.table_result import TableResult
from pyflink.table.table_schema import TableSchema
from pyflink.table.types import DataTypes, UserDefinedType, Row
from pyflink.table.udf import FunctionContext, ScalarFunction
__all__ = [
'BatchTableEnvironment',
'CsvTableSink',
'CsvTableSource',
'DataTypes',
'EnvironmentSettings',
'ExplainDetail',
'FunctionContext',
'GroupWindowedTable',
'GroupedTable',
'OverWindowedTable',
'ResultKind',
'Row',
'ScalarFunction',
'SqlDialect',
'StatementSet',
'StreamTableEnvironment',
'Table',
'TableConfig',
'TableEnvironment',
'TableResult',
'TableSchema',
'TableSink',
'TableSource',
'UserDefinedType',
'WindowGroupedTable',
'WriteMode'
]
| apache-2.0 |
ClearCorp-dev/management-system | mgmtsystem/__openerp__.py | 1 | 1845 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 Savoir-faire Linux (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Management System",
"version": "1.0",
"author": "Savoir-faire Linux,Odoo Community Association (OCA)",
"website": "http://www.savoirfairelinux.com",
"license": "AGPL-3",
"category": "Management System",
"complexity": "normal",
"description": """\
This module is the basis of any management system applications:
* audit reports,
* nonconformities,
* immediate actions,
* preventive actions,
* corrective actions,
* improvement opportunities.
""",
"depends": [
'base',
'board',
'document_page',
],
"data": [
'security/mgmtsystem_security.xml',
'security/ir.model.access.csv',
'menus.xml',
'mgmtsystem_system.xml',
'board_mgmtsystem_view.xml',
],
"demo": [],
'installable': True,
}
| agpl-3.0 |
0x776b7364/kali-scripts | page-enum.py | 2 | 1620 | #!/usr/bin/env python
import requests
from random import randint
from time import sleep
from bs4 import BeautifulSoup
from PIL import Image
from StringIO import StringIO
import mimetypes
import subprocess
USER_AGENT = "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:56.0) Gecko/20100101 Firefox/56.0"
INITIAL_URL = "https://www.my-site.com"
add_header = {
'User-Agent': USER_AGENT,
'DNT': '1',
'Upgrade-Insecure-Requests': '1'
}
print("[I] Sending initial request ...")
initial_req = requests.head(INITIAL_URL, headers=add_header, allow_redirects=True)
redir_url = initial_req.url
print("[I] Obtained redirected URL: %s" % (redir_url))
for i in range(1,5):
sleep(randint(2,5))
curr = "%03d" % (i,)
req = requests.get(redir_url + "keyword" + curr, headers=add_header)
resp_parsed = BeautifulSoup(req.text, 'html.parser')
image_element = resp_parsed.find("meta", property="og:image")
if image_element:
image_url = image_element["content"]
print("[I] %s: Found. Saving image %s ..." % (curr, image_url))
try:
image_req = requests.get(image_url)
content_type = image_req.headers['content-type']
extension = mimetypes.guess_extension(content_type)
image_obj = Image.open(StringIO(image_req.content))
image_obj.save("keyword" + curr + extension)
except:
print("[E] %s: Invalid image URL %s. Saving placeholder file ..." % (curr, image_url))
bashCommand = "touch keyword" + curr + ".error.img"
process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
else:
print("[E] %s: Not Found" % (curr))
print("[I] Script completed.")
| mit |
mstrader/MkidDigitalReadout | DataReadout/ChannelizerControls/Roach2Controls.py | 1 | 90924 | """
Author: Alex Walter
Date: April 25, 2016
Firmware: darkS2*.fpg
This class is for setting and reading LUTs, registers, and other memory components in the ROACH2 Virtex 6 FPGA using casperfpga tools.
It's also the IO for the ADC/DAC board's Virtex 7 FPGA through the ROACH2
NOTE: All freqencies are considered positive. A negative frequency can be asserted by the aliased signal of large positive frequency (by adding sample rate).
This makes things easier for coding since I can check valid frequencies > 0 and also for calculating which fftBin a frequency resides in (see generateFftChanSelection()).
Example usage:
# Collect MKID info
nFreqs=1024
loFreq = 5.e9
spacing = 2.e6
freqList = np.arange(loFreq-nFreqs/2.*spacing,loFreq+nFreqs/2.*spacing,spacing)
freqList+=np.random.uniform(-spacing,spacing,nFreqs)
freqList = np.sort(freqList)
attenList = np.random.randint(23,33,nFreqs)
# Talk to Roach
roach_0 = FpgaControls(ip, params, True, True)
roach_0.setLOFreq(loFreq)
roach_0.generateResonatorChannels(freqList)
roach_0.generateFftChanSelection()
roach_0.generateDacComb(freqList=None, resAttenList=attenList, globalDacAtten=17)
roach_0.generateDdsTones()
roach_0.loadChanSelection()
roach_0.loadDacLUT()
List of Functions:
__init__ - Connects to Roach2 FPGA, sets the delay between the dds lut and the end of the fft block
connect - Connect to V6 FPGA on Roach2
initializeV7UART - Initializes the UART connection to the Virtex 7
loadDdsShift - Set the delay between the dds lut and the end of the fft block
generateResonatorChannels - Figures out which stream:channel to assign to each resonator frequency
generateFftChanSelection - Assigns fftBins to each steam:channel
loadSingleChanSelection - Loads a channel for each stream into the channel selector LUT
loadChanSelection - Loops through loadSingleChanSelection()
setLOFreq - Defines LO frequency as an attribute, self.LOFreq
loadLOFreq - Loads the LO frequency to the IF board
generateTones - Returns a list of I,Q time series for each frequency provided
generateDacComb - Returns a single I,Q time series representing the DAC freq comb
loadDacLut - Loads the freq comb from generateDacComb() into the LUT
generateDdsTones - Defines interweaved tones for dds
loadDdsLUT - Loads dds tones into Roach2 memory
List of useful class attributes:
ip - ip address of roach2
params - Dictionary of parameters
freqList - List of resonator frequencies
attenList - List of resonator attenuations
freqChannels - 2D array of frequencies. Each column is the a stream and each row is a channel.
If uneven number of frequencies this array is padded with -1's
fftBinIndChannels - 2D array of fftBin indices corresponding to the frequencies/streams/channels in freqChannels. freq=-1 maps to fftBin=0.
dacPhaseList - List of the most recent relative phases used for generating DAC frequency comb
dacScaleFactor - Scale factor for frequency comb to scale the sum of tones onto the DAC's dynamic range.
Careful, this is 1/scaleFactor we defined for ARCONS templar
dacQuantizedFreqList - List of frequencies used to define DAC frequency comb. Quantized to DAC digital limits
dacFreqComb - Complex time series signal used for DAC frequency comb.
LOFreq - LO frequency of IF board
ddsQuantizedFreqList - 2D array of frequencies shaped like freqChannels. Quantized to dds digital limits
ddsPhaseList - 2D array of frequencies shaped like freqChannels. Used to rotate loops.
TODO:
modify takePhaseSnapshot() to work for nStreams: need register names
In performIQSweep(), is it skipping every other LO freq???
Changed delays in performIQSweep(), and takeAvgIQData() from 0.1 to 0.01 seconds
BUGS:
The frequencies in freqList are assumed to be unique.
If they aren't, then there are problems determining which frequency corresponds to which ch/stream.
This should be fixed with some indexing tricks which don't rely on np.where
"""
import sys,os,time,datetime,struct,math
import warnings, inspect
import matplotlib.pyplot as plt
import numpy as np
import scipy.special
import casperfpga
import socket
import binascii
from Utils.binTools import castBin # part of SDR
from readDict import readDict #Part of the ARCONS-pipeline/util
from initialBeammap import xyPack,xyUnpack
class Roach2Controls:
def __init__(self, ip, paramFile, verbose=False, debug=False):
'''
Input:
ip - ip address string of ROACH2
paramFile - param object or directory string to dictionary containing important info
verbose - show print statements
debug - Save some things to disk for debugging
'''
np.random.seed(1) #Make the random phase values always the same
self.verbose=verbose
self.debug=debug
self.ip = ip
try:
self.params = readDict()
self.params.readFromFile(paramFile)
except TypeError:
self.params = paramFile
if debug and not os.path.exists(self.params['debugDir']):
os.makedirs(self.params['debugDir'])
#Some more parameters
self.freqPadValue = -1 # pad frequency lists so that we have a multiple of number of streams
self.fftBinPadValue = 0 # pad fftBin selection with fftBin 0
self.ddsFreqPadValue = -1 #
self.v7_ready = 0
self.lut_dump_buffer_size = self.params['lut_dump_buffer_size']
def connect(self):
self.fpga = casperfpga.katcp_fpga.KatcpFpga(self.ip,timeout=3.)
time.sleep(.1)
self.fpga._timeout = 50.
if not self.fpga.is_running():
print 'Firmware is not running. Start firmware, calibrate, and load wave into qdr first!'
else:
self.fpga.get_system_information()
print self.fpga.snapshots
def checkDdsShift(self):
'''
This function checks the delay between the dds channels and the fft.
It returns the difference.
Call loadDdsShift with the difference
OUTPUTS:
ddsShift - # clock cycles delay between dds and fft channels
'''
self.fpga.write_int(self.params['start_reg'],1) # Make sure fft is running
self.fpga.write_int(self.params['read_dds_reg'],1) # Make sure dds is running
ddsShift_initial = self.fpga.read_int(self.params['ddsShift_reg'])
#self.fpga.write_int(self.params['ddsShift_reg'],0)
self.fpga.write_int(self.params['checkLag_reg'],0) # make sure this starts as off
self.fpga.write_int(self.params['checkLag_reg'],1) # Tell firmware to grab dds ch and fft ch at same exact time. Stores them in ladDds_reg and lagData_reg
self.fpga.write_int(self.params['checkLag_reg'],0) # turn this off for next time
data_ch = self.fpga.read_int(self.params['lagData_reg'])
dds_ch = self.fpga.read_int(self.params['lagDds_reg'])
#self.fpga.write_int(self.params['ddsShift_reg'],ddsShift_initial) # load what we had in there before
ddsShift = (ddsShift_initial + dds_ch - data_ch +1) % self.params['nChannelsPerStream'] # have to add 1 here if we use the np.roll in the writeQDR() function
#ddsShift = (ddsShift_initial + dds_ch - data_ch ) % self.params['nChannelsPerStream']
if self.verbose:
print 'current dds lag', ddsShift_initial
print 'dds ch',dds_ch
print 'fft ch',data_ch
return ddsShift
def loadDdsShift(self,ddsShift=76):
'''
Set the delay between the dds lut and the end of the fft block (firmware dependent)
INPUTS:
ddsShift - # clock cycles
'''
self.fpga.write_int(self.params['ddsShift_reg'],ddsShift)
if self.verbose:
print 'dds lag: ',ddsShift
return ddsShift
def initializeV7UART(self, waitForV7Ready = True, baud_rate = None, lut_dump_buffer_size = None):
'''
Initializes the UART connection to the Virtex 7. Puts the V7 in Recieve mode, sets the
baud rate
Defines global variables:
self.baud_rate - baud rate of UART connection
self.v7_ready - 1 when v7 is ready for a command
self.lut_dump_data_period - number of clock cycles between writes to the UART
self.lut_dump_buffer_size - size, in bytes, of each BRAM dump
'''
if(baud_rate == None):
self.baud_rate = self.params['baud_rate']
else:
self.baud_rate = baud_rate
if(lut_dump_buffer_size == None):
self.lut_dump_buffer_size = self.params['lut_dump_buffer_size']
else:
self.lut_dump_buffer_size = lut_dump_buffer_size
self.lut_dump_data_period = (10*self.params['fpgaClockRate'])//self.baud_rate + 1 #10 bits per data byte
self.v7_ready = 0
self.fpga.write_int(self.params['enBRAMDump_reg'], 0)
self.fpga.write_int(self.params['txEnUART_reg'],0)
self.fpga.write_int('a2g_ctrl_lut_dump_data_period', self.lut_dump_data_period)
self.fpga.write_int(self.params['resetUART_reg'],1)
time.sleep(1)
self.fpga.write_int(self.params['resetUART_reg'],0)
if waitForV7Ready:
while(not(self.v7_ready)):
self.v7_ready = self.fpga.read_int(self.params['v7Ready_reg'])
self.v7_ready = 0
self.fpga.write_int(self.params['inByteUART_reg'],1) # Acknowledge that ROACH2 knows MB is ready for commands
time.sleep(0.01)
self.fpga.write_int(self.params['txEnUART_reg'],1)
time.sleep(0.01)
self.fpga.write_int(self.params['txEnUART_reg'],0)
def initV7MB(self):
"""
Send commands over UART to initialize V7.
Call initializeV7UART first
"""
while(not(self.v7_ready)):
self.v7_ready = self.fpga.read_int(self.params['v7Ready_reg'])
time.sleep(.2)
self.v7_ready = 0
self.sendUARTCommand(self.params['mbEnableDACs'])
while(not(self.v7_ready)):
self.v7_ready = self.fpga.read_int(self.params['v7Ready_reg'])
time.sleep(.2)
self.v7_ready = 0
self.sendUARTCommand(self.params['mbSendLUTToDAC'])
while(not(self.v7_ready)):
self.v7_ready = self.fpga.read_int(self.params['v7Ready_reg'])
time.sleep(.2)
self.v7_ready = 0
self.sendUARTCommand(self.params['mbInitLO'])
while(not(self.v7_ready)):
self.v7_ready = self.fpga.read_int(self.params['v7Ready_reg'])
time.sleep(.2)
self.v7_ready = 0
self.sendUARTCommand(self.params['mbInitAtten'])
while(not(self.v7_ready)):
self.v7_ready = self.fpga.read_int(self.params['v7Ready_reg'])
time.sleep(.2)
self.v7_ready = 0
self.sendUARTCommand(self.params['mbEnFracLO'])
def generateDdsTones(self, freqChannels=None, fftBinIndChannels=None, phaseList=None):
"""
Create and interweave dds frequencies
Call setLOFreq(), generateResonatorChannels(), generateFftChanSelection() first.
Sets self.ddsPhaseList
INPUT:
freqChannels - Each column contains the resonantor frequencies in a single stream. The row index is the channel number. It's padded with -1's.
Made by generateResonatorChannels(). If None, use self.freqChannels
fftBinIndChannels - Same shape as freqChannels but contains the fft bin index. Made by generateFftChanSelection(). If None, use self.fftBinIndChannels
phaseList - Same shape as freqChannels. Contains phase offsets (0 to 2Pi) for dds sampling.
If None, set to self.ddsPhaseList. if self.ddsPhaseList doesn't exist then set to all zeros
OUTPUT:
dictionary with following keywords
'iStreamList' - 2D array. Each row is an interweaved list of i values for a single stream.
'qStreamList' - q values
'quantizedFreqList' - 2d array of dds frequencies. (same shape as freqChannels) Padded with self.ddsFreqPadValue
'phaseList' - 2d array of phases for each frequency (same shape as freqChannels) Padded with 0's
"""
#Interpret Inputs
if freqChannels is None:
freqChannels = self.freqChannels
if len(np.ravel(freqChannels))>self.params['nChannels']:
raise ValueError("Too many freqs provided. Can only accommodate "+str(self.params['nChannels'])+" resonators")
self.freqChannels = freqChannels
if fftBinIndChannels is None:
fftBinIndChannels = self.fftBinIndChannels
if len(np.ravel(fftBinIndChannels))>self.params['nChannels']:
raise ValueError("Too many freqs provided. Can only accommodate "+str(self.params['nChannels'])+" resonators")
self.fftBinIndChannels = fftBinIndChannels
if phaseList is None:
if hasattr(self,'ddsPhaseList'): phaseList = self.ddsPhaseList
else: phaseList = np.zeros(np.asarray(freqChannels).shape)
if np.asarray(phaseList).shape != np.asarray(freqChannels).shape:
phaseList = np.zeros(np.asarray(freqChannels).shape)
if not hasattr(self,'LOFreq'):
raise ValueError("Need to set LO freq by calling setLOFreq()")
if self.verbose:
print "Generating Dds Tones..."
# quantize resonator tones to dds resolution
# first figure out the actual frequencies being made by the DAC
dacFreqList = freqChannels-self.LOFreq
dacFreqList[np.where(dacFreqList<0.)] += self.params['dacSampleRate'] #For +/- freq
dacFreqResolution = self.params['dacSampleRate']/(self.params['nDacSamplesPerCycle']*self.params['nLutRowsToUse'])
dacQuantizedFreqList = np.round(dacFreqList/dacFreqResolution)*dacFreqResolution
# Figure out how the dac tones end up relative to their FFT bin centers
fftBinSpacing = self.params['dacSampleRate']/self.params['nFftBins']
fftBinCenterFreqList = fftBinIndChannels*fftBinSpacing
ddsFreqList = dacQuantizedFreqList - fftBinCenterFreqList
# Quantize to DDS sample rate and make sure all freqs are positive by adding sample rate for aliasing
ddsSampleRate = self.params['nDdsSamplesPerCycle'] * self.params['fpgaClockRate'] / self.params['nCyclesToLoopToSameChannel']
ddsFreqList[np.where(ddsFreqList<0)]+=ddsSampleRate # large positive frequencies are aliased back to negative freqs
nDdsSamples = self.params['nDdsSamplesPerCycle']*self.params['nQdrRows']/self.params['nCyclesToLoopToSameChannel']
ddsFreqResolution = 1.*ddsSampleRate/nDdsSamples
ddsQuantizedFreqList = np.round(ddsFreqList/ddsFreqResolution)*ddsFreqResolution
ddsQuantizedFreqList[np.where(freqChannels<0)] = self.ddsFreqPadValue # Pad excess frequencies with -1
self.ddsQuantizedFreqList = ddsQuantizedFreqList
# For each Stream, generate tones and interweave time streams for the dds time multiplexed multiplier
nStreams = int(self.params['nChannels']/self.params['nChannelsPerStream']) #number of processing streams. For Gen 2 readout this should be 4
iStreamList = []
qStreamList = []
for i in range(nStreams):
# generate individual tone time streams
toneParams={
'freqList': ddsQuantizedFreqList[:,i][np.where(dacQuantizedFreqList[:,i]>0)],
'nSamples': nDdsSamples,
'sampleRate': ddsSampleRate,
'amplitudeList': None, #defaults to 1
'phaseList': phaseList[:,i][np.where(dacQuantizedFreqList[:,i]>0)]}
toneDict = self.generateTones(**toneParams)
#scale amplitude to number of bits in memory and round
nBitsPerSampleComponent = self.params['nBitsPerDdsSamplePair']/2
maxValue = int(np.round(2**(nBitsPerSampleComponent - 1)-1)) # 1 bit for sign
iValList = np.array(np.round(toneDict['I']*maxValue),dtype=np.int)
qValList = np.array(np.round(toneDict['Q']*maxValue),dtype=np.int)
#print 'iVals: '+str(iValList)
#print 'qVals: '+str(qValList)
#print np.asarray(iValList).shape
#interweave the values such that we have two samples from freq 0 (row 0), two samples from freq 1, ... to freq 256. Then have the next two samples from freq 1 ...
freqPad = np.zeros((self.params['nChannelsPerStream'] - len(toneDict['quantizedFreqList']),nDdsSamples),dtype=np.int)
#First pad with missing resonators
if len(iValList) >0:
iValList = np.append(iValList,freqPad,0)
qValList = np.append(qValList,freqPad,0)
else: #if no resonators in stream then everything is 0's
iValList = freqPad
qValList = freqPad
iValList = np.reshape(iValList,(self.params['nChannelsPerStream'],-1,self.params['nDdsSamplesPerCycle']))
qValList = np.reshape(qValList,(self.params['nChannelsPerStream'],-1,self.params['nDdsSamplesPerCycle']))
iValList = np.swapaxes(iValList,0,1)
qValList = np.swapaxes(qValList,0,1)
iValues = iValList.flatten('C')
qValues = qValList.flatten('C')
# put into list
iStreamList.append(iValues)
qStreamList.append(qValues)
phaseList[:len(toneDict['phaseList']),i] = toneDict['phaseList'] # We need this if we let self.generateTones() choose random phases
self.ddsPhaseList = phaseList
self.ddsIStreamsList = iStreamList
self.ddsQStreamsList = qStreamList
if self.verbose:
print '\tDDS freqs: '+str(self.ddsQuantizedFreqList)
for i in range(nStreams):
print '\tStream '+str(i)+' I vals: '+str(self.ddsIStreamsList[i])
print '\tStream '+str(i)+' Q vals: '+str(self.ddsQStreamsList[i])
print '...Done!'
return {'iStreamList':iStreamList, 'qStreamList':qStreamList, 'quantizedFreqList':ddsQuantizedFreqList, 'phaseList':phaseList}
def loadDdsLUT(self, ddsToneDict=None):
'''
Load dds tones to LUT in Roach2 memory
INPUTS:
ddsToneDict - from generateDdsTones()
dictionary with following keywords
'iStreamList' - 2D array. Each row is an interweaved list of i values for a single stream. Columns are different streams.
'qStreamList' - q values
OUTPUTS:
allMemVals - memory values written to QDR
'''
if ddsToneDict is None:
try:
ddsToneDict={'iStreamList':self.ddsIStreamsList,'qStreamList':self.ddsQStreamsList}
except AttributeError:
print "Need to run generateDdsTones() first!"
raise
if self.verbose:
print "Loading DDS LUT..."
self.fpga.write_int(self.params['read_dds_reg'],0) #do not read from qdr while writing
memNames = self.params['ddsMemName_regs']
allMemVals=[]
for iMem in range(len(memNames)):
iVals,qVals = ddsToneDict['iStreamList'][iMem],ddsToneDict['qStreamList'][iMem]
formatWaveparams={'iVals':iVals,
'qVals':qVals,
'nBitsPerSamplePair':self.params['nBitsPerDdsSamplePair'],
'nSamplesPerCycle':self.params['nDdsSamplesPerCycle'],
'nMems':1,
'nBitsPerMemRow':self.params['nBytesPerQdrSample']*8,
'earlierSampleIsMsb':True}
memVals = self.formatWaveForMem(**formatWaveparams)
#time.sleep(.1)
allMemVals.append(memVals)
#time.sleep(5)
if self.verbose: print "\twriting QDR for Stream",iMem
writeQDRparams={'memName':memNames[iMem],
'valuesToWrite':memVals[:,0],
'start':0,
'bQdrFlip':True,
'nQdrRows':self.params['nQdrRows']}
self.writeQdr(**writeQDRparams)
time.sleep(.1)
self.fpga.write_int(self.params['read_dds_reg'],1)
if self.verbose: print "...Done!"
return allMemVals
def writeBram(self, memName, valuesToWrite, start=0,nBytesPerSample=4):
"""
format values and write them to bram
"""
if nBytesPerSample == 4:
formatChar = 'L'
elif nBytesPerSample == 8:
formatChar = 'Q'
memValues = np.array(valuesToWrite,dtype=np.uint64) #cast signed values
nValues = len(valuesToWrite)
toWriteStr = struct.pack('>{}{}'.format(nValues,formatChar),*memValues)
self.fpga.blindwrite(memName,toWriteStr,start)
def writeQdr(self, memName, valuesToWrite, start=0, bQdrFlip=True, nQdrRows=2**20):
"""
format and write 64 bit values to qdr
NOTE: If you see an error that looks like: WARNING:casperfpga.katcp_fpga:Could not send message '?write qdr0_memory 0 \\0\\0\\0\\0\\0 .....
This may be because the string you are writing is larger than the socket's write buffer size.
You can fix this by adding a monkey patch in casperfpga/casperfpga/katcp_fpga.py
if hasattr(katcp.CallbackClient, 'MAX_WRITE_BUFFER_SIZE'):
setattr(katcp.CallbackClient, 'MAX_WRITE_BUFFER_SIZE', katcp.CallbackClient.MAX_WRITE_BUFFER_SIZE * 10)
Then reinstalling the casperfpga code: python casperfpga/setup.py install
INPUTS:
"""
nBytesPerSample = 8
formatChar = 'Q'
memValues = np.array(valuesToWrite,dtype=np.uint64) #cast signed values
nValues = len(valuesToWrite)
if bQdrFlip: #For some reason, on Roach2 with the current qdr calibration, the 64 bit word seen in firmware
#has the first and second 32 bit chunks swapped compared to the 64 bit word sent by katcp, so to accommodate
#we swap those chunks here, so they will be in the right order in firmware
mask32 = int('1'*32,2)
memValues = (memValues >> 32)+((memValues & mask32) << 32)
#Unfortunately, with the current qdr calibration, the addresses in katcp and firmware are shifted (rolled) relative to each other
#so to compensate we roll the values to write here
memValues = np.roll(memValues,-1)
toWriteStr = struct.pack('>{}{}'.format(nValues,formatChar),*memValues)
self.fpga.blindwrite(memName,toWriteStr,start)
def formatWaveForMem(self, iVals, qVals, nBitsPerSamplePair=32, nSamplesPerCycle=4096, nMems=3, nBitsPerMemRow=64, earlierSampleIsMsb=False):
"""
put together IQ values from tones to be loaded to a firmware memory LUT
INPUTS:
iVals - time series of I values
qVals -
"""
nBitsPerSampleComponent = nBitsPerSamplePair / 2
#I vals and Q vals are 12 bits, combine them into 24 bit vals
iqVals = (iVals << nBitsPerSampleComponent) + qVals
iqRows = np.reshape(iqVals,(-1,nSamplesPerCycle))
#we need to set dtype to object to use python's native long type
colBitShifts = nBitsPerSamplePair*(np.arange(nSamplesPerCycle,dtype=object))
if earlierSampleIsMsb:
#reverse order so earlier (more left) columns are shifted to more significant bits
colBitShifts = colBitShifts[::-1]
iqRowVals = np.sum(iqRows<<colBitShifts,axis=1) #shift each col by specified amount, and sum each row
#Now we have 2**20 row values, each is 192 bits and contain 8 IQ pairs
#next we divide these 192 bit rows into three 64-bit qdr rows
#Mem0 has the most significant bits
memRowBitmask = int('1'*nBitsPerMemRow,2)
memMaskShifts = nBitsPerMemRow*np.arange(nMems,dtype=object)[::-1]
#now do bitwise_and each value with the mask, and shift back down
memRowVals = (iqRowVals[:,np.newaxis] >> memMaskShifts) & memRowBitmask
#now each column contains the 64-bit qdr values to be sent to a particular qdr
return memRowVals
def loadDacLUT(self, combDict=None):
"""
Sends frequency comb to V7 over UART, where it is loaded
into a lookup table
Call generateDacComb() first
INPUTS:
combDict - return value from generateDacComb(). If None, it trys to gather information from attributes
"""
if combDict is None:
try:
combDict = {'I':np.real(self.dacFreqComb).astype(np.int), 'Q':np.imag(self.dacFreqComb).astype(np.int), 'quantizedFreqList':self.dacQuantizedFreqList}
except AttributeError:
print "Run generateDacComb() first!"
raise
#Format comb for onboard memory
#Interweave I and Q arrays
memVals = np.empty(combDict['I'].size + combDict['Q'].size)
memVals[0::2]=combDict['Q']
memVals[1::2]=combDict['I']
if self.debug:
np.savetxt(self.params['debugDir']+'dacFreqs.txt', combDict['quantizedFreqList']/10**6., fmt='%3.11f', header="Array of DAC frequencies [MHz]")
#Write data to LUTs
while(not(self.v7_ready)):
self.v7_ready = self.fpga.read_int(self.params['v7Ready_reg'])
self.v7_ready = 0
self.fpga.write_int(self.params['inByteUART_reg'],self.params['mbRecvDACLUT'])
time.sleep(0.01)
self.fpga.write_int(self.params['txEnUART_reg'],1)
time.sleep(0.01)
self.fpga.write_int(self.params['txEnUART_reg'],0)
time.sleep(0.01)
#time.sleep(10)
self.fpga.write_int(self.params['enBRAMDump_reg'],1)
#print 'v7 ready before dump: ' + str(self.fpga.read_int(self.params['v7Ready_reg']))
num_lut_dumps = int(math.ceil(len(memVals)*2/self.lut_dump_buffer_size)) #Each value in memVals is 2 bytes
if self.verbose:
print 'num lut dumps ' + str(num_lut_dumps)
#print 'len(memVals) ' + str(len(memVals))
sending_data = 1 #indicates that ROACH2 is still sending LUT
for i in range(num_lut_dumps):
if(len(memVals)>self.lut_dump_buffer_size/2*(i+1)):
iqList = memVals[self.lut_dump_buffer_size/2*i:self.lut_dump_buffer_size/2*(i+1)]
else:
iqList = memVals[self.lut_dump_buffer_size/2*i:len(memVals)]
iqList = iqList.astype(np.int16)
toWriteStr = struct.pack('<{}{}'.format(len(iqList), 'h'), *iqList)
if self.verbose:
#print 'To Write Str Length: ', str(len(toWriteStr))
#print iqList.dtype
#print iqList
print 'bram dump # ' + str(i)
while(sending_data):
sending_data = self.fpga.read_int(self.params['lutDumpBusy_reg'])
self.fpga.blindwrite(self.params['lutBramAddr_reg'],toWriteStr,0)
time.sleep(0.01)
self.fpga.write_int(self.params['lutBufferSize_reg'],len(toWriteStr))
time.sleep(0.01)
while(not(self.v7_ready)):
self.v7_ready = self.fpga.read_int(self.params['v7Ready_reg'])
self.fpga.write_int(self.params['txEnUART_reg'],1)
#print 'enable write'
time.sleep(0.05)
self.fpga.write_int(self.params['txEnUART_reg'],0)
sending_data = 1
self.v7_ready = 0
self.fpga.write_int(self.params['enBRAMDump_reg'],0)
def setLOFreq(self,LOFreq):
"""
Sets the attribute LOFreq (in Hz)
"""
self.LOFreq = LOFreq
def loadLOFreq(self,LOFreq=None):
"""
Send LO frequency to V7 over UART.
Must initialize LO first.
INPUTS:
LOFreq - LO frequency in MHz
Sends LO freq one byte at a time, LSB first
sends integer bytes first, then fractional
"""
if LOFreq is None:
try:
LOFreq = self.LOFreq/1e6 #IF board uses MHz
except AttributeError:
print "Run setLOFreq() first!"
raise
loFreqInt = int(LOFreq)
loFreqFrac = LOFreq - loFreqInt
# Put V7 into LO recv mode
while(not(self.v7_ready)):
self.v7_ready = self.fpga.read_int(self.params['v7Ready_reg'])
self.v7_ready = 0
self.fpga.write_int(self.params['inByteUART_reg'],self.params['mbRecvLO'])
time.sleep(0.01)
self.fpga.write_int(self.params['txEnUART_reg'],1)
time.sleep(0.01)
self.fpga.write_int(self.params['txEnUART_reg'],0)
for i in range(2):
transferByte = (loFreqInt>>(i*8))&255 #takes an 8-bit "slice" of loFreqInt
while(not(self.v7_ready)):
self.v7_ready = self.fpga.read_int(self.params['v7Ready_reg'])
self.v7_ready = 0
self.fpga.write_int(self.params['inByteUART_reg'],transferByte)
time.sleep(0.01)
self.fpga.write_int(self.params['txEnUART_reg'],1)
time.sleep(0.001)
self.fpga.write_int(self.params['txEnUART_reg'],0)
#print 'loFreqFrac' + str(loFreqFrac)
loFreqFrac = int(loFreqFrac*(2**16))
#print 'loFreqFrac' + str(loFreqFrac)
# same as transfer of int bytes
for i in range(2):
transferByte = (loFreqFrac>>(i*8))&255
while(not(self.v7_ready)):
self.v7_ready = self.fpga.read_int(self.params['v7Ready_reg'])
self.v7_ready = 0
self.fpga.write_int(self.params['inByteUART_reg'],transferByte)
time.sleep(0.01)
self.fpga.write_int(self.params['txEnUART_reg'],1)
time.sleep(0.001)
self.fpga.write_int(self.params['txEnUART_reg'],0)
while(not(self.v7_ready)): # Wait for V7 to say it's done setting LO
self.v7_ready = self.fpga.read_int(self.params['v7Ready_reg'])
def setAdcScale(self, scale=.25):
"""
Change the scale factor applied to adc data values before
sending to fft, to hopefully avoid overflowing the fft.
There are 4 bits in the scale with 4 bits after the binary point
(as of darkquad17_2016_Jul_17_2216).
INPUTS:
scale - scale factor applied to all ADC values. Between 0 and 0.9375, in increments of 0.0625
"""
scaleInt = scale*(2**self.params['adcScaleBinPt'])
scaleInt = int(scaleInt)
if self.verbose:
print 'setting adc scale to',(scaleInt / 2.**self.params['adcScaleBinPt'])
self.fpga.write_int(self.params['adcScale_reg'],scaleInt)
def changeAtten(self, attenID, attenVal):
"""
Change the attenuation on IF Board attenuators
Must initialize attenuator SPI connection first
INPUTS:
attenID
1 - RF Upconverter path
2 - RF Upconverter path
3 - RF Downconverter path
attenVal - attenuation between 0 and 31.75 dB. Must be multiple of 0.25 dB
"""
if attenVal > 31.75 or attenVal<0:
raise ValueError("Attenuation must be between 0 and 31.75")
attenVal = int(np.round(attenVal*4)) #attenVal register holds value 4x(attenuation)
while(not(self.v7_ready)):
self.v7_ready = self.fpga.read_int(self.params['v7Ready_reg'])
self.v7_ready = 0
self.sendUARTCommand(self.params['mbChangeAtten'])
while(not(self.v7_ready)):
self.v7_ready = self.fpga.read_int(self.params['v7Ready_reg'])
self.v7_ready = 0
self.sendUARTCommand(attenID)
while(not(self.v7_ready)):
self.v7_ready = self.fpga.read_int(self.params['v7Ready_reg'])
self.v7_ready = 0
self.sendUARTCommand(attenVal)
def generateDacComb(self, freqList=None, resAttenList=None, globalDacAtten = 0, phaseList=None):
"""
Creates DAC frequency comb by adding many complex frequencies together with specified amplitudes and phases.
The resAttenList holds the absolute attenuation for each resonantor signal coming out of the DAC. Zero attenuation means that the tone amplitude is set to the full dynamic range of the DAC and the DAC attenuator(s) are set to 0. Thus, all values in resAttenList must be larger than globalDacAtten. If you decrease the globalDacAtten, the amplitude in the DAC LUT decreases so that the total attenuation of the signal is the same.
Note: Usually the attenuation values are integer dB values but technically the DAC attenuators can be set to every 1/4 dB and the amplitude in the DAC LUT can have arbitrary attenuation (quantized by number of bits).
INPUTS:
freqList - list of all resonator frequencies. If None, use self.freqList
resAttenList - list of absolute attenuation values (dB) for each resonator. If None, use 20's
globalDacAtten - global attenuation for entire DAC. Sum of the two DAC attenuaters on IF board
dacPhaseList - list of phases for each complex signal. If None, generates random phases. Old phaseList is under self.dacPhaseList
OUTPUTS:
dictionary with keywords
I - I(t) values for frequency comb [signed 32-bit integers]
Q - Q(t)
quantizedFreqList - list of frequencies after digitial quantiziation
"""
# Interpret Inputs
if freqList is None:
freqList=self.freqList
if len(freqList)>self.params['nChannels']:
warnings.warn("Too many freqs provided. Can only accommodate "+str(self.params['nChannels'])+" resonators")
freqList = freqList[:self.params['nChannels']]
freqList = np.ravel(freqList).flatten()
if resAttenList is None:
try: resAttenList = self.attenList
except AttributeError:
warnings.warn("Individual resonator attenuations assumed to be 20")
resAttenList=np.zeros(len(freqList))+20
if len(resAttenList)>self.params['nChannels']:
warnings.warn("Too many attenuations provided. Can only accommodate "+str(self.params['nChannels'])+" resonators")
resAttenList = resAttenList[:self.params['nChannels']]
resAttenList = np.ravel(resAttenList).flatten()
if len(freqList) != len(resAttenList):
raise ValueError("Need exactly one attenuation value for each resonant frequency!")
if (phaseList is not None) and len(freqList) != len(phaseList):
raise ValueError("Need exactly one phase value for each resonant frequency!")
if np.any(resAttenList < globalDacAtten):
raise ValueError("Impossible to attain desired resonator attenuations! Decrease the global DAC attenuation.")
self.attenList = resAttenList
self.freqList = freqList
if self.verbose:
print 'Generating DAC comb...'
# Calculate relative amplitudes for DAC LUT
nBitsPerSampleComponent = self.params['nBitsPerSamplePair']/2
maxAmp = int(np.round(2**(nBitsPerSampleComponent - 1)-1)) # 1 bit for sign
amplitudeList = maxAmp*10**(-(resAttenList - globalDacAtten)/20.)
# Calculate nSamples and sampleRate
nSamples = self.params['nDacSamplesPerCycle']*self.params['nLutRowsToUse']
sampleRate = self.params['dacSampleRate']
# Calculate resonator frequencies for DAC
if not hasattr(self,'LOFreq'):
raise ValueError("Need to set LO freq by calling setLOFreq()")
dacFreqList = self.freqList-self.LOFreq
dacFreqList[np.where(dacFreqList<0.)] += self.params['dacSampleRate'] #For +/- freq
# Generate and add up individual tone time series.
toneDict = self.generateTones(dacFreqList, nSamples, sampleRate, amplitudeList, phaseList)
self.dacQuantizedFreqList = toneDict['quantizedFreqList']
self.dacPhaseList = toneDict['phaseList']
iValues = np.array(np.round(np.sum(toneDict['I'],axis=0)),dtype=np.int)
qValues = np.array(np.round(np.sum(toneDict['Q'],axis=0)),dtype=np.int)
self.dacFreqComb = iValues + 1j*qValues
# check that we are utilizing the dynamic range of the DAC correctly
highestVal = np.max((np.abs(iValues).max(),np.abs(qValues).max()))
expectedHighestVal_sig = scipy.special.erfinv((len(iValues)-0.1)/len(iValues))*np.sqrt(2.) # 10% of the time there should be a point this many sigmas higher than average
if highestVal > expectedHighestVal_sig*np.max((np.std(iValues),np.std(qValues))):
warnings.warn("The freq comb's relative phases may have added up sub-optimally. You should calculate new random phases")
if highestVal > maxAmp:
dBexcess = int(np.ceil(20.*np.log10(1.0*highestVal/maxAmp)))
raise ValueError("Not enough dynamic range in DAC! Try decreasing the global DAC Attenuator by "+str(dBexcess)+' dB')
elif 1.0*maxAmp/highestVal > 10**((1)/20.):
# all amplitudes in DAC less than 1 dB below max allowed by dynamic range
warnings.warn("DAC Dynamic range not fully utilized. Increase global attenuation by: "+str(int(np.floor(20.*np.log10(1.0*maxAmp/highestVal))))+' dB')
if self.verbose:
print '\tUsing '+str(1.0*highestVal/maxAmp*100)+' percent of DAC dynamic range'
print '\thighest: '+str(highestVal)+' out of '+str(maxAmp)
print '\tsigma_I: '+str(np.std(iValues))+' sigma_Q: '+str(np.std(qValues))
print '\tLargest val_I: '+str(1.0*np.abs(iValues).max()/np.std(iValues))+' sigma. Largest val_Q: '+str(1.0*np.abs(qValues).max()/np.std(qValues))+' sigma.'
print '\tExpected val: '+str(expectedHighestVal_sig)+' sigmas'
#print '\n\tDac freq list: '+str(self.dacQuantizedFreqList)
#print '\tDac Q vals: '+str(qValues)
#print '\tDac I vals: '+str(iValues)
print '...Done!'
'''
if self.debug:
plt.figure()
plt.plot(iValues)
plt.plot(qValues)
std_i = np.std(iValues)
std_q = np.std(qValues)
plt.axhline(y=std_i,color='k')
plt.axhline(y=2*std_i,color='k')
plt.axhline(y=3*std_i,color='k')
plt.axhline(y=expectedHighestVal_sig*std_i,color='r')
plt.axhline(y=expectedHighestVal_sig*std_q,color='r')
plt.figure()
plt.hist(iValues,1000)
plt.hist(qValues,1000)
x_gauss = np.arange(-maxAmp,maxAmp,maxAmp/2000.)
i_gauss = len(iValues)/(std_i*np.sqrt(2.*np.pi))*np.exp(-x_gauss**2/(2.*std_i**2.))
q_gauss = len(qValues)/(std_q*np.sqrt(2.*np.pi))*np.exp(-x_gauss**2/(2.*std_q**2.))
plt.plot(x_gauss,i_gauss)
plt.plot(x_gauss,q_gauss)
plt.axvline(x=std_i,color='k')
plt.axvline(x=2*std_i,color='k')
plt.axvline(x=3*std_i,color='k')
plt.axvline(x=expectedHighestVal_sig*std_i,color='r')
plt.axvline(x=expectedHighestVal_sig*std_q,color='r')
plt.figure()
sig = np.fft.fft(self.dacFreqComb)
sig_freq = np.fft.fftfreq(len(self.dacFreqComb),1./self.params['dacSampleRate'])
plt.plot(sig_freq, np.real(sig),'b')
plt.plot(sig_freq, np.imag(sig),'g')
for f in self.dacQuantizedFreqList:
x_f=f
if f > self.params['dacSampleRate']/2.:
x_f=f-self.params['dacSampleRate']
plt.axvline(x=x_f, ymin=np.amin(np.real(sig)), ymax = np.amax(np.real(sig)), color='r')
#plt.show()
'''
return {'I':iValues,'Q':qValues,'quantizedFreqList':self.dacQuantizedFreqList}
def generateTones(self, freqList, nSamples, sampleRate, amplitudeList, phaseList):
"""
Generate a list of complex signals with amplitudes and phases specified and frequencies quantized
INPUTS:
freqList - list of resonator frequencies
nSamples - Number of time samples
sampleRate - Used to quantize the frequencies
amplitudeList - list of amplitudes. If None, use 1.
phaseList - list of phases. If None, use random phase
OUTPUTS:
dictionary with keywords
I - each element is a list of I(t) values for specific freq
Q - Q(t)
quantizedFreqList - list of frequencies after digitial quantiziation
phaseList - list of phases for each frequency
"""
if amplitudeList is None:
amplitudeList = np.asarray([1.]*len(freqList))
if phaseList is None:
phaseList = np.random.uniform(0,2.*np.pi,len(freqList))
if len(freqList) != len(amplitudeList) or len(freqList) != len(phaseList):
raise ValueError("Need exactly one phase and amplitude value for each resonant frequency!")
# Quantize the frequencies to their closest digital value
freqResolution = sampleRate/nSamples
quantizedFreqList = np.round(freqList/freqResolution)*freqResolution
# generate each signal
iValList = []
qValList = []
dt = 1. / sampleRate
t = dt*np.arange(nSamples)
for i in range(len(quantizedFreqList)):
phi = 2.*np.pi*quantizedFreqList[i]*t
expValues = amplitudeList[i]*np.exp(1.j*(phi+phaseList[i]))
#print 'Rotating ch'+str(i)+' to '+str(phaseList[i]*180./np.pi)+' deg'
iValList.append(np.real(expValues))
qValList.append(np.imag(expValues))
'''
if self.debug:
plt.figure()
for i in range(len(quantizedFreqList)):
plt.plot(iValList[i])
plt.plot(qValList[i])
#plt.show()
'''
return {'I':np.asarray(iValList),'Q':np.asarray(qValList),'quantizedFreqList':quantizedFreqList,'phaseList':phaseList}
def generateResonatorChannels(self, freqList,order='F'):
"""
Algorithm for deciding which resonator frequencies are assigned to which stream and channel number.
This is used to define the dds LUTs and calculate the fftBin index for each freq to set the appropriate chan_sel block
Try to evenly distribute the given frequencies into each stream (unless you use order='stream')
INPUTS:
freqList - list of resonator frequencies (Assumed sorted but technically doesn't need to be)
If it's not unique there could be problems later on.
order - 'F' places sequential frequencies into a single stream but forces an even distribution among streams
'C' places sequential frequencies into the same channel number but forces an even distribution among streams
'stream' sequentially fills stream 0 first, then stream 1, etc...
OUTPUTS:
self.freqChannels - Each column contains the resonantor frequencies in a single stream.
The row index is the channel number.
It's padded with -1's.
"""
#Interpret inputs...
if order not in ['F','C','A','stream']: #if invalid, grab default value
args,__,__,defaults = inspect.getargspec(Roach2Controls.generateResonatorChannels)
order = defaults[args.index('order')-len(args)]
if self.verbose: print "Invalid 'order' parameter for generateResonatorChannels(). Changed to default: "+str(order)
if len(np.array(freqList))>self.params['nChannels']:
warnings.warn("Too many freqs provided. Can only accommodate "+str(self.params['nChannels'])+" resonators")
freqList = freqList[:self.params['nChannels']]
self.freqList = np.unique(np.ravel(freqList))
if len(np.unique(self.freqList)) != len(self.freqList):
warnings.warn("Be careful, I assumed everywhere that the frequencies were unique!")
self.freqChannels = self.freqList
if self.verbose:
print 'Generating Resonator Channels...'
#Pad with freq = -1 so that freqChannels's length is a multiple of nStreams
nStreams = int(self.params['nChannels']/self.params['nChannelsPerStream']) #number of processing streams. For Gen 2 readout this should be 4
padValue = self.freqPadValue #pad with freq=-1
if order == 'F':
padNum = (nStreams - (len(self.freqChannels) % nStreams))%nStreams # number of empty elements to pad
for i in range(padNum):
ind = len(self.freqChannels)-i*np.ceil(len(self.freqChannels)*1.0/nStreams)
self.freqChannels=np.insert(self.freqChannels,int(ind),padValue)
elif order == 'C' or order == 'A':
padNum = (nStreams - (len(self.freqChannels) % nStreams))%nStreams # number of empty elements to pad
self.freqChannels = np.append(self.freqChannels, [padValue]*(padNum))
elif order == 'stream':
nFreqs = len(self.freqList)
if nFreqs < self.params['nChannelsPerStream']:
padNum = nFreqs * (nStreams-1)
else:
padNum = self.params['nChannels'] - nFreqs
self.freqChannels=np.append(self.freqChannels,[padValue]*padNum)
order = 'F'
#Split up to assign channel numbers
self.freqChannels = np.reshape(self.freqChannels,(-1,nStreams),order)
# Make indexer arrays
self.freqChannelToStreamChannel=np.zeros((len(self.freqList),2),dtype=np.int)
self.streamChannelToFreqChannel=np.zeros(self.freqChannels.shape,dtype=np.int)-1
for i in range(len(self.freqList)):
ch_i,stream_i = np.where(self.freqChannels==self.freqList[i])
self.freqChannelToStreamChannel[i] = np.asarray([int(ch_i),int(stream_i)])
self.streamChannelToFreqChannel[ch_i,stream_i]=i
if self.verbose:
print '\tFreq Channels: ',self.freqChannels
print '...Done!'
return self.freqChannels
def generateFftChanSelection(self,freqChannels=None):
'''
This calculates the fftBin index for each resonant frequency and arranges them by stream and channel.
Used by channel selector block
Call setLOFreq() and generateResonatorChannels() first.
INPUTS (optional):
freqChannels - 2D array of frequencies where each column is the a stream and each row is a channel. If freqChannels isn't given then try to grab it from attribute.
OUTPUTS:
self.fftBinIndChannels - Array with each column containing the fftbin index of a single stream. The row index is the channel number
'''
if freqChannels is None:
try:
freqChannels = self.freqChannels
except AttributeError:
print "Run generateResonatorChannels() first!"
raise
freqChannels = np.asarray(freqChannels)
if self.verbose:
print "Finding FFT Bins..."
#The frequencies seen by the fft block are actually from the DAC, up/down converted by the IF board, and then digitized by the ADC
dacFreqChannels = (freqChannels-self.LOFreq)
dacFreqChannels[np.where(dacFreqChannels<0)]+=self.params['dacSampleRate']
freqResolution = self.params['dacSampleRate']/(self.params['nDacSamplesPerCycle']*self.params['nLutRowsToUse'])
dacQuantizedFreqChannels = np.round(dacFreqChannels/freqResolution)*freqResolution
#calculate fftbin index for each freq
binSpacing = self.params['dacSampleRate']/self.params['nFftBins']
genBinIndex = dacQuantizedFreqChannels/binSpacing
self.fftBinIndChannels = np.round(genBinIndex)
self.fftBinIndChannels[np.where(freqChannels<0)]=self.fftBinPadValue # empty channels have freq=-1. Assign this to fftBin=0
self.fftBinIndChannels = self.fftBinIndChannels.astype(np.int)
if self.verbose:
print '\tfft bin indices: ',self.fftBinIndChannels
print '...Done!'
return self.fftBinIndChannels
def loadChanSelection(self,fftBinIndChannels=None):
"""
Loads fftBin indices to all channels (in each stream), to configure chan_sel block in firmware on self.fpga
Call generateFftChanSelection() first
INPUTS (optional):
fftBinIndChannels - Array with each column containing the fftbin index of a single stream. The row is the channel number
"""
if fftBinIndChannels is None:
try:
fftBinIndChannels = self.fftBinIndChannels
except AttributeError:
print "Run generateFftChanSelection() first!"
raise
nStreams = self.params['nChannels']/self.params['nChannelsPerStream']
if self.verbose: print 'Configuring chan_sel block...\n\tCh: Stream'+str(range(len(fftBinIndChannels[0])))
for row in range(self.params['nChannelsPerStream']):
try:
fftBinInds = fftBinIndChannels[row]
except IndexError:
fftBinInds = np.asarray([self.fftBinPadValue]*nStreams)
self.loadSingleChanSelection(selBinNums=fftBinInds,chanNum=row)
#for row in range(len(fftBinIndChannels)):
# if row > self.params['nChannelsPerStream']:
# warnings.warn("Too many freqs provided. Can only accommodate "+str(self.params['nChannels'])+" resonators")
# break
# self.loadSingleChanSelection(selBinNums=fftBinIndChannels[row],chanNum=row)
if self.verbose: print '...Done!'
if self.debug:
np.savetxt(self.params['debugDir']+'freqChannels.txt', self.freqChannels/10**9.,fmt='%2.25f',header="2D Array of MKID frequencies [GHz]. \nEach column represents a stream and each row is a channel")
np.savetxt(self.params['debugDir']+'fftBinIndChannels.txt', self.fftBinIndChannels,fmt='%8i',header="2D Array of fftBin Indices. \nEach column represents a stream and each row is a channel")
def loadSingleChanSelection(self,selBinNums,chanNum=0):
"""
Assigns bin numbers to a single channel (in each stream), to configure chan_sel block
Used by loadChanSelection()
INPUTS:
selBinNums: array of bin numbers (for each stream) to be assigned to chanNum (4 element int array for Gen 2 firmware)
chanNum: the channel number to be assigned
"""
nStreams = int(self.params['nChannels']/self.params['nChannelsPerStream']) #number of processing streams. For Gen 2 readout this should be 4
if selBinNums is None or len(selBinNums) != nStreams:
raise TypeError,'selBinNums must have number of elements matching number of streams in firmware'
self.fpga.write_int(self.params['chanSelLoad_reg'],0) #set to zero so nothing loads while we set other registers.
#assign the bin number to be loaded to each stream
for i in range(nStreams):
self.fpga.write_int(self.params['chanSel_regs'][i],selBinNums[i])
time.sleep(.001)
#in the register chan_sel_load, the lsb initiates the loading of the above bin numbers into memory
#the 8 bits above the lsb indicate which channel is being loaded (for all streams)
loadVal = (chanNum << 1) + 1
self.fpga.write_int(self.params['chanSelLoad_reg'],loadVal)
time.sleep(.001) #give it a chance to load
self.fpga.write_int(self.params['chanSelLoad_reg'],0) #stop loading
if self.verbose: print '\t'+str(chanNum)+': '+str(selBinNums)
#def freqChannelToStreamChannel(self, freqCh=None):
def getStreamChannelFromFreqChannel(self,freqCh=None):
'''
This function converts a channel indexed by the location in the freqlist
to a stream/channel in the Firmware
Throws attribute error if self.freqList or self.freqChannels don't exist
Call self.generateResonatorChannels() first
INPUTS:
freqCh - index or list of indices corresponding to the resonators location in the freqList
OUTPUTS:
ch - list of channel numbers for resonators in firmware
stream - stream(s) corresponding to ch
'''
if freqCh is None:
freqCh = range(len(self.freqList))
channels = np.atleast_2d(self.freqChannelToStreamChannel[freqCh])[:,0]
streams = np.atleast_2d(self.freqChannelToStreamChannel[freqCh])[:,1]
return channels, streams
#ch, stream = np.where(np.in1d(self.freqChannels,np.asarray(self.freqList)[freqCh]).reshape(self.freqChannels.shape))
#ch=[]
#stream=[]
#for i in np.atleast_1d(freqCh):
# ch_i, stream_i = np.where(np.in1d(self.freqChannels,np.asarray(self.freqList)[i]).reshape(self.freqChannels.shape))
# ch.append(ch_i)
# stream.append(stream_i)
#return np.atleast_1d(ch), np.atleast_1d(stream)
#def streamChannelToFreqChannel(self, ch, stream):
def getFreqChannelFromStreamChannel(self, ch, stream):
'''
This function converts a stream/ch index from the firmware
to a channel indexed by the location in the freqList
Throws attribute error if self.freqList or self.freqChannels don't exist
Call self.generateResonatorChannels() first
INPUTS:
ch - value or list of channel numbers for resonators in firmware
stream - stream(s) corresponding to ch
OUTPUTS:
channel - list of indices corresponding to the resonator's location in the freqList
'''
freqCh = self.streamChannelToFreqChannel[ch,stream]
#if np.any(freqCh==-1):
# raise ValueError('No freq channel exists for ch/stream:',ch[np.where(freqCh==-1)],'/',stream[np.where(freqCh==-1)]
return freqCh
#freqChannels = []
#for i in range(len(np.atleast_1d(ch))):
# freqCh_i = np.where(np.in1d(self.freqList,np.asarray(self.freqChannels)[np.atleast_1d(ch)[i],np.atleast_1d(stream)[i]]))[0]
# freqChannels.append(freqCh_i)
#return np.atleast_1d(freqChannels)
#return np.where(np.in1d(self.freqList,np.asarray(self.freqChannels)[ch,stream]))[0]
def setMaxCountRate(self, cpsLimit = 2500):
for reg in self.params['captureCPSlim_regs']:
try:
self.fpga.write_int(reg,cpsLimit)
except:
print "Couldn't write to", reg
def setThreshByFreqChannel(self,thresholdRad = -.1, freqChannel=0):
"""
Overloads setThresh but using channel as indexed by the freqList
INPUTS:
thresholdRad: The threshold in radians. The phase must drop below this value to trigger a photon event
freqChannel - channel as indexed by the freqList
"""
#ch, stream = self.freqChannelToStreamChannel(freqChannel)
ch, stream = self.getStreamChannelFromFreqChannel(freqChannel)
self.setThresh(thresholdRad = thresholdRad,ch=int(ch), stream=int(stream))
def setThresh(self,thresholdRad = -.1,ch=0,stream=0):
"""Sets the phase threshold and baseline filter for photon pulse detection triggers in each channel
INPUTS:
thresholdRad: The threshold in radians. The phase must drop below this value to trigger a photon event
ch - the channel number in the stream
stream - the stream number
"""
#convert deg to radians
#thresholdRad = thresholdDeg * np.pi/180.
#format it as a fix16_13 to be placed in a register
binThreshold = castBin(thresholdRad,quantization='Round',nBits=16,binaryPoint=13,format='uint')
sampleRate = 1.e6
#for the baseline, we apply a second order state variable low pass filter to the phase
#See http://www.earlevel.com/main/2003/03/02/the-digital-state-variable-filter/
#The filter takes two parameters based on the desired Q factor and cutoff frequency
criticalFreq = 200 #Hz
Q=.7
baseKf=2*np.sin(np.pi*criticalFreq/sampleRate)
baseKq=1./Q
#format these paramters as fix18_16 values to be loaded to registers
binBaseKf=castBin(baseKf,quantization='Round',nBits=18,binaryPoint=16,format='uint')
binBaseKq=castBin(baseKq,quantization='Round',nBits=18,binaryPoint=16,format='uint')
if self.verbose:
print 'threshold',thresholdRad,binThreshold
print 'Kf:',baseKf,binBaseKf
print 'Kq:',baseKq,binBaseKq
#load the values in
self.fpga.write_int(self.params['captureBasekf_regs'][stream],binBaseKf)
self.fpga.write_int(self.params['captureBasekq_regs'][stream],binBaseKq)
self.fpga.write_int(self.params['captureThreshold_regs'][stream],binThreshold)
self.fpga.write_int(self.params['captureLoadThreshold_regs'][stream],1+(ch<<1))
time.sleep(.003) # Each snapshot should take 2 msec of phase data
self.fpga.write_int(self.params['captureLoadThreshold_regs'][stream],0)
def loadFIRCoeffs(self,coeffFile):
'''
This function loads the FIR coefficients into the Firmware's phase filter for every resonator
You can provide a filter for each resonator channel or just a single filter that's applied to each resonator
Any channels without resonators have their filter taps set to 0
If self.freqList and self.freqChannels don't exist then it loads FIR coefficients into every channel
Be careful, depending on how you set up the channel selection block you might assign the wrong filters to the resonators
(see self.generateResonatorChannels() for making self.freqList, self.freqChannels)
INPUTS:
coeffFile - path to plain text file that contains a 2d array
The i'th column corresponds to the i'th resonator in the freqList
If there is only one column then use it for every resonator in the freqList
The j'th row is the filter's coefficient for the j'th tap
'''
# Decide which channels to write FIRs to
try:
freqChans = range(len(self.freqList))
#channels, streams = self.freqChannelToStreamChannel(freqChans) # Need to be careful about how the resonators are distributed into firmware streams
channels, streams = self.getStreamChannelFromFreqChannel(freqChans)
except AttributeError: # If we haven't loaded in frequencies yet then load FIRs into all channels
freqChans = range(self.params['nChannels'])
streams = np.repeat(range(self.params['nChannels']/self.params['nChannelsPerStream']), self.params['nChannelsPerStream'])
channels = np.tile(range(self.params['nChannelsPerStream']), self.params['nChannels']/self.params['nChannelsPerStream'])
# grab FIR coeff from file
firCoeffs = np.transpose(np.loadtxt(coeffFile))
if firCoeffs.ndim==1: firCoeffs = np.tile(firCoeffs, (len(freqChans),1)) # if using the same filter for every pixel
firBinPt=self.params['firBinPt']
firInts=np.asarray(firCoeffs*(2**firBinPt),dtype=np.int32)
zeroWriteStr = struct.pack('>{}{}'.format(len(firInts[0]),'l'), *np.zeros(len(firInts[0]))) # write zeros for channels without resonators
# loop through and write FIRs to firmware
nStreams = self.params['nChannels']/self.params['nChannelsPerStream']
for stream in range(nStreams):
try:
self.fpga.write_int(self.params['firLoadChan_regs'][stream],0) #just double check that this is at 0
ch_inds = np.where(streams==stream) # indices in list of resonator channels that correspond to this stream
ch_stream = np.atleast_1d(channels)[ch_inds] # list of the stream channels with this stream
ch_freqs = np.atleast_1d(freqChans)[ch_inds] # list of freq channels with this stream
for ch in range(self.params['nChannelsPerStream']):
if ch in np.atleast_1d(ch_stream):
ch_freq = int(np.atleast_1d(ch_freqs)[np.where(np.atleast_1d(ch_stream)==ch)]) # The freq channel of the resonator corresponding to ch/stream
toWriteStr = struct.pack('>{}{}'.format(len(firInts[ch_freq]),'l'), *firInts[ch_freq])
print ' ch:'+str(ch_freq)+' ch/stream: '+str(ch)+'/'+str(stream)
else:
toWriteStr=zeroWriteStr
self.fpga.blindwrite(self.params['firTapsMem_regs'][stream], toWriteStr,0)
time.sleep(.001) # 1ms is more than enough. Should only take nTaps/fpgaClockRate seconds to load in
loadVal = (1<<8)+ch # first bit indicates we will write, next 8 bits is the chan number for the stream
self.fpga.write_int(self.params['firLoadChan_regs'][stream],loadVal)
time.sleep(.001)
self.fpga.write_int(self.params['firLoadChan_regs'][stream],0)
except:
print 'Failed to write FIRs on stream '+str(stream) # Often times test firmware only implements stream 0
if stream==0: raise
def takePhaseSnapshotOfFreqChannel(self, freqChan):
'''
This function overloads takePhaseSnapshot
INPUTS:
freqChan - the resonator channel as indexed in the freqList
'''
#ch, stream = self.freqChannelToStreamChannel(freqChan)
ch, stream = self.getStreamChannelFromFreqChannel(freqChan)
selChanIndex = (int(stream)<<8) + int(ch)
print "Taking phase snap from ch/stream:",str(ch),'/',str(stream),' selChanIndex:',str(selChanIndex)
return self.takePhaseSnapshot(selChanIndex)
def takePhaseSnapshot(self, selChanIndex):
"""
Takes phase data using snapshot block
INPUTS:
selChanIndex: channel to take data from
OUTPUTS:
snapDict with keywords:
phase - list of phases in radians
trig - list of booleans indicating the firmware triggered
time - Number of seconds for each phase point starting at 0 (1 point every 256 clock cycles)
"""
self.fpga.write_int(self.params['phaseSnpCh_reg'],selChanIndex)
self.fpga.snapshots[self.params['phaseSnapshot']].arm(man_valid=False)
time.sleep(.001)
self.fpga.write_int(self.params['phaseSnpTrig_reg'],1)#trigger snapshots
time.sleep(.001) #wait for other trigger conditions to be met
self.fpga.write_int(self.params['phaseSnpTrig_reg'],0)#release trigger
snapDict = self.fpga.snapshots[self.params['phaseSnapshot']].read(timeout=5,arm=False,man_valid=False)['data']
trig = np.roll(snapDict['trig'],-2) #there is an extra 2 cycle delay in firmware between we_out and phase
snapDict['trig']=trig
dt=self.params['nChannelsPerStream']/self.params['fpgaClockRate']
snapDict['time']=dt*np.arange(len(trig))
return snapDict
#def startPhaseStream(self,selChanIndex=0, pktsPerFrame=100, fabric_port=50000, destIPID=50):
def startPhaseStream(self,selChanIndex=0, pktsPerFrame=100, fabric_port=50000, hostIP='10.0.0.50'):
"""initiates streaming of phase timestream (after prog_fir) to the 1Gbit ethernet
INPUTS:
selChanIndex: stream/channel. The first two bits indicate the stream, last 8 bits for the channel
pktsPerFrame: number of 8 byte photon words per ethernet frame
fabric_port
destIPID: destination IP is 10.0.0.destIPID
"""
dest_ip = binascii.hexlify(socket.inet_aton(hostIP))
dest_ip = int(dest_ip,16)
#dest_ip = 0xa000000 + destIPID
#print dest_ip
#configure the gbe core,
#print 'restarting'
self.fpga.write_int(self.params['destIP_reg'],dest_ip)
self.fpga.write_int(self.params['phasePort_reg'],fabric_port)
self.fpga.write_int(self.params['wordsPerFrame_reg'],pktsPerFrame)
#reset the core to make sure it's in a clean state
self.fpga.write_int(self.params['photonCapStart_reg'],0) #make sure we're not streaming photons
self.fpga.write_int(self.params['phaseDumpEn_reg'],0) #can't send packets when resetting
self.fpga.write_int(self.params['gbe64Rst_reg'],1)
time.sleep(.1)
self.fpga.write_int(self.params['gbe64Rst_reg'],0)
#choose what channel to stream
self.fpga.write_int(self.params['phaseDumpChanSel_reg'],selChanIndex)
#turn it on
self.fpga.write_int(self.params['phaseDumpEn_reg'],1)
def stopStream(self):
"""stops streaming of phase timestream (after prog_fir) to the 1Gbit ethernet
"""
self.fpga.write_int(self.params['phaseDumpEn_reg'],0)
def recvPhaseStream(self, channel=0, duration=60, pktsPerFrame=100, host = '10.0.0.50', port = 50000):
"""
Recieves phase timestream data over ethernet, writes it to a file. Must call
startPhaseStream first to initiate phase stream.
The data is saved in self.phaseTimeStreamData
INPUTS:
channel - stream/channel. The first two bits indicate the stream, last 8 bits for the channel
channel = 0 means ch 0 on stream 0. channel = 256 means ch 0 on stream 1, etc...
duration - duration (in seconds) of phase stream
host - IP address of computer receiving packets
(represented as a string)
port
OUTPUTS:
self.phaseTimeStreamData - phase packet data. See parsePhaseStream()
"""
#d = datetime.datetime.today()
#filename = ('phase_dump_pixel_' + str(channel) + '_' + str(d.day) + '_' + str(d.month) + '_' +
# str(d.year) + '_' + str(d.hour) + '_' + str(d.minute) + str('.bin'))
if self.verbose:
print 'host ' + host
print 'port ' + str(port)
print 'duration ' + str(duration)
# create dgram udp socket
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
except socket.error:
print 'Failed to create socket'
raise
# Bind socket to local host and port
try:
sock.bind((host, port))
except socket.error , msg:
print 'Bind failed. Error Code : ' + str(msg[0]) + ' Message ' + msg[1]
sock.close()
raise
#print 'Socket bind complete'
bufferSize = int(8*pktsPerFrame) #Each photon word is 8 bytes
iFrame = 0
nFramesLost = 0
lastPack = -1
expectedPackDiff = -1
frameData = ''
#dumpFile = open(filename, 'w')
#self.lastPhaseDumpFile = filename
startTime = time.time()
try:
while (time.time()-startTime) < duration:
frame = sock.recvfrom(bufferSize)
frameData += frame[0]
iFrame += 1
if self.verbose and iFrame%1000==0:
print iFrame
except KeyboardInterrupt:
print 'Exiting'
sock.close()
self.phaseTimeStreamData = frameData
#dumpFile.write(frameData)
#dumpFile.close()
return
#print 'Exiting'
sock.close()
self.phaseTimeStreamData = frameData
#dumpFile.write(frameData)
#dumpFile.close()
return self.phaseTimeStreamData
def takePhaseStreamDataOfFreqChannel(self, freqChan=0, duration=2, pktsPerFrame=100, fabric_port=50000, hostIP='10.0.0.50'):
"""
This function overloads takePhaseStreamData() but uses the channel index corresponding to the freqlist instead of a ch/stream index
INPUTS:
freqChan - which channel to collect phase on. freqChan corresponds to the resonator index in freqList
duration - duration (in seconds) of stream
pktsPerFrame - number of 8 byte photon words per ethernet frame
fabric_port -
destIPID - IP address of computer receiving stream
OUTPUTS:
phases - a list of phases in radians
"""
#ch, stream = self.freqChannelToStreamChannel(freqChan)
ch, stream = self.getStreamChannelFromFreqChannel(freqChan)
selChanIndex = (int(stream) << 8) + int(ch)
return self.takePhaseStreamData(selChanIndex, duration, pktsPerFrame, fabric_port, hostIP)
def takePhaseStreamData(self, selChanIndex=0, duration=2, pktsPerFrame=100, fabric_port=50000, hostIP='10.0.0.50'):
"""
Takes phase timestream data from the specified channel for the specified amount of time
Gets one phase value for every nChannelsPerStream/fpgaClockRate seconds
INPUTS:
selChanIndex - stream/channel. The first two bits indicate the stream, last 8 bits for the channel
channel = 0 means ch 0 on stream 0. channel = 256 means ch 0 on stream 1, etc...
duration - duration (in seconds) of stream
pktsPerFrame - number of 8 byte photon words per ethernet frame
fabric_port -
destIPID - IP address of computer receiving stream
OUTPUTS:
phases - a list of phases in radians
"""
self.startPhaseStream(selChanIndex, pktsPerFrame, fabric_port, hostIP )
if self.verbose:
print "Collecting phase time stream..."
#self.recvPhaseStream(selChanIndex, duration, pktsPerFrame, '10.0.0.'+str(destIPID), fabric_port)
phaseTimeStreamData=self.recvPhaseStream(selChanIndex, duration, pktsPerFrame, hostIP, fabric_port)
self.stopStream()
if self.verbose:
print "...Done!"
return self.parsePhaseStream(phaseTimeStreamData,pktsPerFrame)
#def parsePhaseStream(self, phaseDumpFile=None, pktsPerFrame=100):
def parsePhaseStream(self, phaseTimeStreamData=None, pktsPerFrame=100):
"""
This function parses the packet data from recvPhaseStream()
INPUTS:
phaseTimeStreamData - phase packet data from recvPhaseStream()
pktsPerFrame - number of 8 byte photon words per ethernet frame
OUTPUTS:
phases - a list of phases in radians
"""
#if(phaseDumpFile == None):
# try:
# phaseDumpFile = self.lastPhaseDumpFile
# except AttributeError:
# print 'Specify a file or run takePhaseStreamData()'
#
#with open(phaseDumpFile,'r') as dumpFile:
# data = dumpFile.read()
data = phaseTimeStreamData
if phaseTimeStreamData is None:
data = self.phaseTimeStreamData
nBytes = len(data)
nWords = nBytes/8 #64 bit words
#break into 64 bit words
words = np.array(struct.unpack('>{:d}Q'.format(nWords), data),dtype=object)
#remove headers
headerFirstByte = 0xff
firstBytes = words >> (64-8)
headerIdx = np.where(firstBytes == headerFirstByte)[0]
words = np.delete(words,headerIdx)
nBitsPerPhase = 12
binPtPhase = 9
nPhasesPerWord = 5
#to parse out the 5 12-bit values, we'll shift down the bits we don't want for each value, then apply a bitmask to take out
#bits higher than the 12 we want
#The least significant bits in the word should be the earliest phase, so the first column should have zero bitshift
bitmask = int('1'*nBitsPerPhase,2)
bitshifts = nBitsPerPhase*np.arange(nPhasesPerWord)
#add an axis so we can broadcast
#and shift away the bits we don't keep for each row
#print np.shape(words[:,np.newaxis]),words.dtype
#print bitshifts
#print words[0:10]
phases = (words[:,np.newaxis]) >> bitshifts
phases = phases & bitmask
#now we have a nWords x nPhasesPerWord array
#flatten so that the phases are in order
phases = phases.flatten(order='C')
phases = np.array(phases,dtype=np.uint64)
signBits = np.array(phases / (2**(nBitsPerPhase-1)),dtype=np.bool)
#print signBits[0:10]
#check the sign bits to see what values should be negative
#for the ones that should be negative undo the 2's complement, and flip the sign
phases[signBits] = ((~phases[signBits]) & bitmask)+1
phases = np.array(phases,dtype=np.double)
phases[signBits] = -phases[signBits]
#now shift down to the binary point
phases = phases / 2**binPtPhase
return phases
#convert from radians to degrees
#phases = 180./np.pi * phases
#plt.plot(phases[-2**15:],'.-')
# photonPeriod = 4096 #timesteps (us)
# #fold it and make sure we have the same phases every time
# nPhotons = len(phases)//photonPeriod
# phases = phases[0:(nPhotons*photonPeriod)].reshape((-1,photonPeriod))
# disagreement = (phases[1:] - phases[0])
# print 'discrepancies:',np.sum(disagreement)
# np.save
#plt.show()
def performIQSweep(self,startLOFreq,stopLOFreq,stepLOFreq):
"""
Performs a sweep over the LO frequency. Records
one IQ point per channel per freqeuency; stores in
self.iqSweepData
makes iqData - 4xn array - each row has I and Q values for a single stream. For each row:
256 I points + 256 Q points for LO step 0, then 256 I points + 256 Q points for LO step 1, etc..
Shape = [4, (nChannelsPerStream+nChannelsPerStream) * nLOsteps]
Get one per stream (4 streams for all thousand resonators)
Formatted using formatIQSweepData then stored in self.iqSweepData
The logic is as follows:
set LO
Arm the snapshot block
trigger write enable - This grabs the first set of 256 I, 256 Q points
disable the writeEnable
set LO
trigger write enable - This grabs the a second set of 256 I, 256 Q points
read snapshot block - read 1024 points from mem
disable writeEnable
INPUTS:
startLOFreq - starting sweep frequency [MHz]
stopLOFreq - final sweep frequency [MHz]
stepLOFreq - frequency sweep step size [MHz]
OUTPUTS:
iqSweepData - Dictionary with following keywords
I - 2D array with shape = [nFreqs, nLOsteps]
Q - 2D array with shape = [nFreqs, nLOsteps]
freqOffsets - list of offsets from LO in Hz. shape = [nLOsteps]
"""
LOFreqs = np.arange(startLOFreq, stopLOFreq, stepLOFreq)
nStreams = self.params['nChannels']/self.params['nChannelsPerStream']
iqData = np.empty([nStreams,0])
# The magic number 4 below is the number of IQ points per read
# We get two I points and two Q points every read
iqPt = np.empty([nStreams,self.params['nChannelsPerStream']*4])
self.fpga.write_int(self.params['iqSnpStart_reg'],0)
for i in range(len(LOFreqs)):
if self.verbose:
print 'Sweeping LO ' + str(LOFreqs[i]) + ' MHz'
self.loadLOFreq(LOFreqs[i])
#time.sleep(0.01) # I dunno how long it takes to set the LO
if(i%2==0):
for stream in range(nStreams):
self.fpga.snapshots[self.params['iqSnp_regs'][stream]].arm(man_valid = False, man_trig = False)
self.fpga.write_int(self.params['iqSnpStart_reg'],1)
time.sleep(0.001) # takes nChannelsPerStream/fpgaClockRate seconds to load all the values
if(i%2==1):
for stream in range(nStreams):
iqPt[stream]=self.fpga.snapshots[self.params['iqSnp_regs'][stream]].read(timeout = 10, arm = False)['data']['iq']
iqData = np.append(iqData, iqPt,1)
self.fpga.write_int(self.params['iqSnpStart_reg'],0)
# if odd number of LO steps then we still need to read out half of the last buffer
if len(LOFreqs) % 2 == 1:
self.fpga.write_int(self.params['iqSnpStart_reg'],1)
time.sleep(0.001)
for stream in range(nStreams):
iqPt[stream]=self.fpga.snapshots[self.params['iqSnp_regs'][stream]].read(timeout = 10, arm = False)['data']['iq']
iqData = np.append(iqData, iqPt[:,:self.params['nChannelsPerStream']*2],1)
self.fpga.write_int(self.params['iqSnpStart_reg'],0)
self.loadLOFreq() # reloads initial lo freq
self.iqSweepData = self.formatIQSweepData(iqData)
self.iqSweepData['freqOffsets'] = np.copy((LOFreqs*10**6. - self.LOFreq)) # [Hz]
#self.iqSweepData = iqData
return self.iqSweepData
def formatIQSweepData(self, iqDataStreams):
"""
Reshapes the iqdata into a usable format
Need to put the data in the same order as the freqList that was loaded in
If we haven't loaded in a freqList then the order is channels 0..256 in stream 0, then stream 1, etc..
INPUTS:
iqDataStreams - 2D array with following shape:
[nStreams, (nChannelsPerStream+nChannelsPerStream) * nSteps]
OUTPUTS:
iqSweepData - Dictionary with following keywords
I - 2D array with shape = [nFreqs, nSteps]
Q - 2D array with shape = [nFreqs, nSteps]
"""
# Only return IQ data for channels/streams with resonators associated with them
try:
freqChans = range(len(self.freqList))
#channels, streams = self.freqChannelToStreamChannel(freqChans) # Need to be careful about how the resonators are distributed into firmware streams
channels, streams = self.getStreamChannelFromFreqChannel(freqChans)
except AttributeError: # If we haven't loaded in frequencies yet then grab all channels
freqChans = range(self.params['nChannels'])
streams = np.repeat(range(self.params['nChannels']/self.params['nChannelsPerStream']), self.params['nChannelsPerStream'])
channels = np.tile(range(self.params['nChannelsPerStream']), self.params['nChannels']/self.params['nChannelsPerStream'])
I_list = []
Q_list = []
for i in range(len(freqChans)):
ch, stream = np.atleast_1d(channels)[i], np.atleast_1d(streams)[i]
if i==380 or i==371:
print 'i:',i,' stream/ch:',stream,'/',ch
print 'freq[ch]:',self.freqList[i]
print 'freq[ch,stream]:',self.freqChannels[ch,stream]
I = iqDataStreams[stream, ch :: self.params['nChannelsPerStream']*2]
Q = iqDataStreams[stream, ch+self.params['nChannelsPerStream'] :: self.params['nChannelsPerStream']*2]
#Ivals = np.roll(I.flatten(),-2)
#Qvals = np.roll(I.flatten(),-2)
I_list.append(I.flatten())
Q_list.append(Q.flatten())
return {'I':I_list, 'Q':Q_list}
#I_list2=I_list[2:] + I_list[:2]
#Q_list2=Q_list[2:] + Q_list[:2]
I_list2 = I_list[-2:] + I_list[:-2]
Q_list2 = Q_list[-2:] + Q_list[:-2]
#I_list2[:-2]=I_list[2:] # There is a 2 cycle delay in the snapshot block
#I_list2[-2:]=I_list[:2] # need to shift the channels by two
#Q_list2=Q_list
#Q_list2[:-2]=Q_list[2:]
#Q_list2[-2:]=Q_list[:2]
return {'I':I_list2, 'Q':Q_list2}
def loadBeammapCoords(self,beammap=None,initialBeammapDict=None):
"""
Load the beammap coordinates x,y corresponding to each frqChannel for each stream
INPUTS:
beammap - to be determined, if None, use initial beammap assignments
initialBeammapDict containts
'feedline' - the feedline for this roach
'sideband' - either 'pos' or 'neg' indicating whether these frequences are above or below the LO
'boardRange' - either 'a' or 'b' indicationg whether this board is assigned to low (a) or high (b) frequencies of a feedline
"""
if beammap is None:
allStreamChannels,allStreams = self.getStreamChannelFromFreqChannel()
for stream in np.unique(allStreams):
streamChannels = allStreamChannels[np.where(allStreams==stream)]
streamCoordBits = []
for streamChannel in streamChannels:
freqChannel = self.getFreqChannelFromStreamChannel(streamChannel,stream)
coordDict = xyPack(freqChannel=freqChannel,**initialBeammapDict)
x = coordDict['x']
y = coordDict['y']
streamCoordBits.append((x << self.params['nBitsYCoord']) + y)
streamCoordBits = np.array(streamCoordBits)
self.writeBram(memName = self.params['pixelnames_bram'][stream],valuesToWrite = streamCoordBits)
else:
raise ValueError('loading beammaps not implemented yet')
def takeAvgIQData(self,numPts=100):
"""
Take IQ data with the LO fixed (at self.LOFreq)
INPUTS:
numPts - Number of IQ points to take
OUTPUTS:
iqSweepData - Dictionary with following keywords
I - 2D array with shape = [nFreqs, nLOsteps]
Q - 2D array with shape = [nFreqs, nLOsteps]
"""
counter = np.arange(numPts)
nStreams = self.params['nChannels']/self.params['nChannelsPerStream']
iqData = np.empty([nStreams,0])
self.fpga.write_int(self.params['iqSnpStart_reg'],0)
iqPt = np.empty([nStreams,self.params['nChannelsPerStream']*4])
for i in counter:
if self.verbose:
print 'IQ point #' + str(i)
if(i%2==0):
for stream in range(nStreams):
self.fpga.snapshots[self.params['iqSnp_regs'][stream]].arm(man_valid = False, man_trig = False)
self.fpga.write_int(self.params['iqSnpStart_reg'],1)
time.sleep(0.001) # takes nChannelsPerStream/fpgaClockRate seconds to load all the values
if(i%2==1):
for stream in range(nStreams):
iqPt[stream]=self.fpga.snapshots[self.params['iqSnp_regs'][stream]].read(timeout = 10, arm = False)['data']['iq']
iqData = np.append(iqData, iqPt,1)
self.fpga.write_int(self.params['iqSnpStart_reg'],0)
# if odd number of steps then we still need to read out half of the last buffer
if len(counter) % 2 == 1:
self.fpga.write_int(self.params['iqSnpStart_reg'],1)
time.sleep(0.001)
for stream in range(nStreams):
iqPt[stream]=self.fpga.snapshots[self.params['iqSnp_regs'][stream]].read(timeout = 10, arm = False)['data']['iq']
iqData = np.append(iqData, iqPt[:,:self.params['nChannelsPerStream']*2],1)
self.fpga.write_int(self.params['iqSnpStart_reg'],0)
self.iqToneData = self.formatIQSweepData(iqData)
#self.iqToneDataRaw = iqData
return self.iqToneData
def loadIQcenters(self, centers):
"""
Load IQ centers in firmware registers
INPUTS:
centers - 2d array of centers.
First column is I centers, second is Q centers.
Rows correspond to resonators in the same order as the freqlist
shape: [nFreqs, 2]
"""
#channels, streams = self.freqChannelToStreamChannel()
channels, streams = self.getStreamChannelFromFreqChannel()
for i in range(len(centers)):
ch = channels[i]
stream=streams[i]
#ch, stream = np.where(self.freqChannels == self.freqList[i])
#print 'IQ center',ch,centers[i][0],centers[i][1]
I_c = int(centers[i][0]/2**3)
Q_c = int(centers[i][1]/2**3)
center = (I_c<<16) + (Q_c<<0) # 32 bit number - 16bit I + 16bit Q
#print 'loading I,Q',I_c,Q_c
self.fpga.write_int(self.params['iqCenter_regs'][stream], center)
self.fpga.write_int(self.params['iqLoadCenter_regs'][stream], (ch<<1)+(1<<0))
self.fpga.write_int(self.params['iqLoadCenter_regs'][stream], 0)
def sendUARTCommand(self, inByte):
"""
Sends a single byte to V7 over UART
Doesn't wait for a v7_ready signal
Inputs:
inByte - byte to send over UART
"""
self.fpga.write_int(self.params['inByteUART_reg'],inByte)
time.sleep(0.01)
self.fpga.write_int(self.params['txEnUART_reg'],1)
time.sleep(0.01)
self.fpga.write_int(self.params['txEnUART_reg'],0)
if __name__=='__main__':
if len(sys.argv) > 1:
ip = sys.argv[1]
else:
ip='10.0.0.113'
if len(sys.argv) > 2:
params = sys.argv[2]
else:
params='DarknessFpga_V2.param'
print ip
print params
#warnings.filterwarnings('error')
#freqList = [7.32421875e9, 8.e9, 9.e9, 10.e9,11.e9,12.e9,13.e9,14.e9,15e9,16e9,17.e9,18.e9,19.e9,20.e9,21.e9,22.e9,23.e9]
nFreqs=17
loFreq = 5.e9
spacing = 2.e6
freqList = np.arange(loFreq-nFreqs/2.*spacing,loFreq+nFreqs/2.*spacing,spacing)
freqList+=np.random.uniform(-spacing,spacing,nFreqs)
freqList = np.sort(freqList)
#attenList = np.random.randint(40,45,nFreqs)
#freqList=np.asarray([5.2498416321e9, 5.125256256e9, 4.852323456e9, 4.69687416351e9])#,4.547846e9])
#attenList=np.asarray([41,42,43,45])#,6])
freqList=np.asarray([4.620303e9])
attenList=np.asarray([0])
#attenList = attenList[np.where(freqList > loFreq)]
#freqList = freqList[np.where(freqList > loFreq)]
roach_0 = Roach2Controls(ip, params, True, False)
roach_0.connect()
roach_0.setLOFreq(loFreq)
roach_0.generateResonatorChannels(freqList)
roach_0.generateFftChanSelection()
#roach_0.generateDacComb(resAttenList=attenList,globalDacAtten=9)
roach_0.generateDacComb(freqList=freqList)
print 'Generating DDS Tones...'
roach_0.generateDdsTones()
roach_0.debug=False
#for i in range(10000):
# roach_0.generateDacComb(resAttenList=attenList,globalDacAtten=9)
print 'Loading DDS LUT...'
#roach_0.loadDdsLUT()
print 'Checking DDS Shift...'
#DdsShift = roach_0.checkDdsShift()
#print DdsShift
#roach_0.loadDdsShift(DdsShift)
print 'Loading ChanSel...'
#roach_0.loadChanSelection()
print 'Init V7'
roach_0.initializeV7UART(waitForV7Ready=False)
#roach_0.initV7MB()
roach_0.loadLOFreq()
roach_0.loadDacLUT()
#roach_0.generateDacComb(freqList, attenList, 17)
#print roach_0.phaseList
#print 10**(-0.25/20.)
#roach_0.generateDacComb(freqList, attenList, 17, phaseList = roach_0.phaseList, dacScaleFactor=roach_0.dacScaleFactor*10**(-3./20.))
#roach_0.generateDacComb(freqList, attenList, 20, phaseList = roach_0.phaseList, dacScaleFactor=roach_0.dacScaleFactor)
#roach_0.loadDacLUT()
#roach_0.generateDdsTones()
#if roach_0.debug: plt.show()
| gpl-2.0 |
ressu/SickGear | SickBeard.py | 1 | 21195 | #!/usr/bin/env python2
# Author: Nic Wolfe <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickGear.
#
# SickGear is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickGear is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickGear. If not, see <http://www.gnu.org/licenses/>.
# Check needed software dependencies to nudge users to fix their setup
from __future__ import with_statement
import time
import signal
import sys
import shutil
import subprocess
if sys.version_info < (2, 6):
print "Sorry, requires Python 2.6 or 2.7."
sys.exit(1)
try:
import Cheetah
if Cheetah.Version[0] != '2':
raise ValueError
except ValueError:
print "Sorry, requires Python module Cheetah 2.1.0 or newer."
sys.exit(1)
except:
print "The Python module Cheetah is required"
sys.exit(1)
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), 'lib')))
# We only need this for compiling an EXE and I will just always do that on 2.6+
if sys.hexversion >= 0x020600F0:
from multiprocessing import freeze_support # @UnresolvedImport
import locale
import datetime
import threading
import getopt
import sickbeard
from sickbeard import db, logger, network_timezones, failed_history, name_cache, versionChecker
from sickbeard.tv import TVShow
from sickbeard.webserveInit import WebServer
from sickbeard.databases.mainDB import MIN_DB_VERSION, MAX_DB_VERSION
from sickbeard.event_queue import Events
from lib.configobj import ConfigObj
throwaway = datetime.datetime.strptime('20110101', '%Y%m%d')
signal.signal(signal.SIGINT, sickbeard.sig_handler)
signal.signal(signal.SIGTERM, sickbeard.sig_handler)
class SickGear(object):
def __init__(self):
# system event callback for shutdown/restart
sickbeard.events = Events(self.shutdown)
# daemon constants
self.runAsDaemon = False
self.CREATEPID = False
self.PIDFILE = ''
# webserver constants
self.webserver = None
self.forceUpdate = False
self.forcedPort = None
self.noLaunch = False
def help_message(self):
"""
print help message for commandline options
"""
help_msg = "\n"
help_msg += "Usage: " + sickbeard.MY_FULLNAME + " <option> <another option>\n"
help_msg += "\n"
help_msg += "Options:\n"
help_msg += "\n"
help_msg += " -h --help Prints this message\n"
help_msg += " -f --forceupdate Force update all shows in the DB (from tvdb) on startup\n"
help_msg += " -q --quiet Disables logging to console\n"
help_msg += " --nolaunch Suppress launching web browser on startup\n"
if sys.platform == 'win32':
help_msg += " -d --daemon Running as real daemon is not supported on Windows\n"
help_msg += " On Windows, --daemon is substituted with: --quiet --nolaunch\n"
else:
help_msg += " -d --daemon Run as double forked daemon (includes options --quiet --nolaunch)\n"
help_msg += " --pidfile=<path> Combined with --daemon creates a pidfile (full path including filename)\n"
help_msg += " -p <port> --port=<port> Override default/configured port to listen on\n"
help_msg += " --datadir=<path> Override folder (full path) as location for\n"
help_msg += " storing database, configfile, cache, logfiles \n"
help_msg += " Default: " + sickbeard.PROG_DIR + "\n"
help_msg += " --config=<path> Override config filename (full path including filename)\n"
help_msg += " to load configuration from \n"
help_msg += " Default: config.ini in " + sickbeard.PROG_DIR + " or --datadir location\n"
help_msg += " --noresize Prevent resizing of the banner/posters even if PIL is installed\n"
return help_msg
def start(self):
# do some preliminary stuff
sickbeard.MY_FULLNAME = os.path.normpath(os.path.abspath(__file__))
sickbeard.MY_NAME = os.path.basename(sickbeard.MY_FULLNAME)
sickbeard.PROG_DIR = os.path.dirname(sickbeard.MY_FULLNAME)
sickbeard.DATA_DIR = sickbeard.PROG_DIR
sickbeard.MY_ARGS = sys.argv[1:]
sickbeard.SYS_ENCODING = None
try:
locale.setlocale(locale.LC_ALL, "")
except (locale.Error, IOError):
pass
try:
sickbeard.SYS_ENCODING = locale.getpreferredencoding()
except (locale.Error, IOError):
pass
# For OSes that are poorly configured I'll just randomly force UTF-8
if not sickbeard.SYS_ENCODING or sickbeard.SYS_ENCODING in ('ANSI_X3.4-1968', 'US-ASCII', 'ASCII'):
sickbeard.SYS_ENCODING = 'UTF-8'
if not hasattr(sys, "setdefaultencoding"):
reload(sys)
try:
# pylint: disable=E1101
# On non-unicode builds this will raise an AttributeError, if encoding type is not valid it throws a LookupError
sys.setdefaultencoding(sickbeard.SYS_ENCODING)
except:
print 'Sorry, you MUST add the SickGear folder to the PYTHONPATH environment variable'
print 'or find another way to force Python to use ' + sickbeard.SYS_ENCODING + ' for string encoding.'
sys.exit(1)
# Need console logging for SickBeard.py and SickBeard-console.exe
self.consoleLogging = (not hasattr(sys, "frozen")) or (sickbeard.MY_NAME.lower().find('-console') > 0)
# Rename the main thread
threading.currentThread().name = "MAIN"
try:
opts, args = getopt.getopt(sys.argv[1:], "hfqdp::",
['help', 'forceupdate', 'quiet', 'nolaunch', 'daemon', 'pidfile=', 'port=',
'datadir=', 'config=', 'noresize']) # @UnusedVariable
except getopt.GetoptError:
sys.exit(self.help_message())
for o, a in opts:
# Prints help message
if o in ('-h', '--help'):
sys.exit(self.help_message())
# For now we'll just silence the logging
if o in ('-q', '--quiet'):
self.consoleLogging = False
# Should we update (from indexer) all shows in the DB right away?
if o in ('-f', '--forceupdate'):
self.forceUpdate = True
# Suppress launching web browser
# Needed for OSes without default browser assigned
# Prevent duplicate browser window when restarting in the app
if o in ('--nolaunch',):
self.noLaunch = True
# Override default/configured port
if o in ('-p', '--port'):
try:
self.forcedPort = int(a)
except ValueError:
sys.exit("Port: " + str(a) + " is not a number. Exiting.")
# Run as a double forked daemon
if o in ('-d', '--daemon'):
self.runAsDaemon = True
# When running as daemon disable consoleLogging and don't start browser
self.consoleLogging = False
self.noLaunch = True
if sys.platform == 'win32':
self.runAsDaemon = False
# Write a pidfile if requested
if o in ('--pidfile',):
self.CREATEPID = True
self.PIDFILE = str(a)
# If the pidfile already exists, sickbeard may still be running, so exit
if os.path.exists(self.PIDFILE):
sys.exit("PID file: " + self.PIDFILE + " already exists. Exiting.")
# Specify folder to load the config file from
if o in ('--config',):
sickbeard.CONFIG_FILE = os.path.abspath(a)
# Specify folder to use as the data dir
if o in ('--datadir',):
sickbeard.DATA_DIR = os.path.abspath(a)
# Prevent resizing of the banner/posters even if PIL is installed
if o in ('--noresize',):
sickbeard.NO_RESIZE = True
# The pidfile is only useful in daemon mode, make sure we can write the file properly
if self.CREATEPID:
if self.runAsDaemon:
pid_dir = os.path.dirname(self.PIDFILE)
if not os.access(pid_dir, os.F_OK):
sys.exit("PID dir: " + pid_dir + " doesn't exist. Exiting.")
if not os.access(pid_dir, os.W_OK):
sys.exit("PID dir: " + pid_dir + " must be writable (write permissions). Exiting.")
else:
if self.consoleLogging:
sys.stdout.write("Not running in daemon mode. PID file creation disabled.\n")
self.CREATEPID = False
# If they don't specify a config file then put it in the data dir
if not sickbeard.CONFIG_FILE:
sickbeard.CONFIG_FILE = os.path.join(sickbeard.DATA_DIR, "config.ini")
# Make sure that we can create the data dir
if not os.access(sickbeard.DATA_DIR, os.F_OK):
try:
os.makedirs(sickbeard.DATA_DIR, 0744)
except os.error, e:
raise SystemExit("Unable to create datadir '" + sickbeard.DATA_DIR + "'")
# Make sure we can write to the data dir
if not os.access(sickbeard.DATA_DIR, os.W_OK):
raise SystemExit("Datadir must be writeable '" + sickbeard.DATA_DIR + "'")
# Make sure we can write to the config file
if not os.access(sickbeard.CONFIG_FILE, os.W_OK):
if os.path.isfile(sickbeard.CONFIG_FILE):
raise SystemExit("Config file '" + sickbeard.CONFIG_FILE + "' must be writeable.")
elif not os.access(os.path.dirname(sickbeard.CONFIG_FILE), os.W_OK):
raise SystemExit(
"Config file root dir '" + os.path.dirname(sickbeard.CONFIG_FILE) + "' must be writeable.")
# Check if we need to perform a restore first
restoreDir = os.path.join(sickbeard.DATA_DIR, 'restore')
if os.path.exists(restoreDir):
if self.restore(restoreDir, sickbeard.DATA_DIR):
logger.log(u"Restore successful...")
else:
logger.log(u"Restore FAILED!", logger.ERROR)
os.chdir(sickbeard.DATA_DIR)
# Load the config and publish it to the sickbeard package
if not os.path.isfile(sickbeard.CONFIG_FILE):
logger.log(u"Unable to find '" + sickbeard.CONFIG_FILE + "' , all settings will be default!", logger.ERROR)
sickbeard.CFG = ConfigObj(sickbeard.CONFIG_FILE)
CUR_DB_VERSION = db.DBConnection().checkDBVersion()
if CUR_DB_VERSION > 0:
if CUR_DB_VERSION < MIN_DB_VERSION:
raise SystemExit("Your database version (" + str(
CUR_DB_VERSION) + ") is too old to migrate from with this version of SickGear (" + str(
MIN_DB_VERSION) + ").\n" + \
"Upgrade using a previous version of SB first, or start with no database file to begin fresh.")
if CUR_DB_VERSION > MAX_DB_VERSION:
raise SystemExit("Your database version (" + str(
CUR_DB_VERSION) + ") has been incremented past what this version of SickGear supports (" + str(
MAX_DB_VERSION) + ").\n" + \
"If you have used other forks of SB, your database may be unusable due to their modifications.")
# Initialize the config and our threads
sickbeard.initialize(consoleLogging=self.consoleLogging)
if self.runAsDaemon:
self.daemonize()
# Get PID
sickbeard.PID = os.getpid()
# Build from the DB to start with
self.loadShowsFromDB()
if self.forcedPort:
logger.log(u"Forcing web server to port " + str(self.forcedPort))
self.startPort = self.forcedPort
else:
self.startPort = sickbeard.WEB_PORT
if sickbeard.WEB_LOG:
self.log_dir = sickbeard.LOG_DIR
else:
self.log_dir = None
# sickbeard.WEB_HOST is available as a configuration value in various
# places but is not configurable. It is supported here for historic reasons.
if sickbeard.WEB_HOST and sickbeard.WEB_HOST != '0.0.0.0':
self.webhost = sickbeard.WEB_HOST
else:
if sickbeard.WEB_IPV6:
self.webhost = '::'
else:
self.webhost = '0.0.0.0'
# web server options
self.web_options = {
'port': int(self.startPort),
'host': self.webhost,
'data_root': os.path.join(sickbeard.PROG_DIR, 'gui', sickbeard.GUI_NAME),
'web_root': sickbeard.WEB_ROOT,
'log_dir': self.log_dir,
'username': sickbeard.WEB_USERNAME,
'password': sickbeard.WEB_PASSWORD,
'enable_https': sickbeard.ENABLE_HTTPS,
'handle_reverse_proxy': sickbeard.HANDLE_REVERSE_PROXY,
'https_cert': os.path.join(sickbeard.PROG_DIR, sickbeard.HTTPS_CERT),
'https_key': os.path.join(sickbeard.PROG_DIR, sickbeard.HTTPS_KEY),
}
# start web server
try:
self.webserver = WebServer(self.web_options)
self.webserver.start()
except IOError:
logger.log(u"Unable to start web server, is something else running on port %d?" % self.startPort,
logger.ERROR)
if sickbeard.LAUNCH_BROWSER and not self.runAsDaemon:
logger.log(u"Launching browser and exiting", logger.ERROR)
sickbeard.launchBrowser(self.startPort)
os._exit(1)
if self.consoleLogging:
print "Starting up SickGear " + sickbeard.BRANCH + " from " + sickbeard.CONFIG_FILE
# Fire up all our threads
sickbeard.start()
# Build internal name cache
name_cache.buildNameCache()
# refresh network timezones
network_timezones.update_network_dict()
# sure, why not?
if sickbeard.USE_FAILED_DOWNLOADS:
failed_history.trimHistory()
# Start an update if we're supposed to
if self.forceUpdate or sickbeard.UPDATE_SHOWS_ON_START:
sickbeard.showUpdateScheduler.action.run(force=True) # @UndefinedVariable
# Launch browser
if sickbeard.LAUNCH_BROWSER and not (self.noLaunch or self.runAsDaemon):
sickbeard.launchBrowser(self.startPort)
# main loop
while (True):
time.sleep(1)
def daemonize(self):
"""
Fork off as a daemon
"""
# pylint: disable=E1101
# Make a non-session-leader child process
try:
pid = os.fork() # @UndefinedVariable - only available in UNIX
if pid != 0:
os._exit(0)
except OSError, e:
sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
os.setsid() # @UndefinedVariable - only available in UNIX
# Make sure I can read my own files and shut out others
prev = os.umask(0)
os.umask(prev and int('077', 8))
# Make the child a session-leader by detaching from the terminal
try:
pid = os.fork() # @UndefinedVariable - only available in UNIX
if pid != 0:
os._exit(0)
except OSError, e:
sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# Write pid
if self.CREATEPID:
pid = str(os.getpid())
logger.log(u"Writing PID: " + pid + " to " + str(self.PIDFILE))
try:
file(self.PIDFILE, 'w').write("%s\n" % pid)
except IOError, e:
logger.log_error_and_exit(
u"Unable to write PID file: " + self.PIDFILE + " Error: " + str(e.strerror) + " [" + str(
e.errno) + "]")
# Redirect all output
sys.stdout.flush()
sys.stderr.flush()
devnull = getattr(os, 'devnull', '/dev/null')
stdin = file(devnull, 'r')
stdout = file(devnull, 'a+')
stderr = file(devnull, 'a+')
os.dup2(stdin.fileno(), sys.stdin.fileno())
os.dup2(stdout.fileno(), sys.stdout.fileno())
os.dup2(stderr.fileno(), sys.stderr.fileno())
def remove_pid_file(self, PIDFILE):
try:
if os.path.exists(PIDFILE):
os.remove(PIDFILE)
except (IOError, OSError):
return False
return True
def loadShowsFromDB(self):
"""
Populates the showList with shows from the database
"""
logger.log(u"Loading initial show list")
myDB = db.DBConnection()
sqlResults = myDB.select("SELECT * FROM tv_shows")
sickbeard.showList = []
for sqlShow in sqlResults:
try:
curShow = TVShow(int(sqlShow["indexer"]), int(sqlShow["indexer_id"]))
curShow.nextEpisode()
sickbeard.showList.append(curShow)
except Exception, e:
logger.log(
u"There was an error creating the show in " + sqlShow["location"] + ": " + str(e).decode('utf-8',
'replace'),
logger.ERROR)
def restore(self, srcDir, dstDir):
try:
for file in os.listdir(srcDir):
srcFile = os.path.join(srcDir, file)
dstFile = os.path.join(dstDir, file)
bakFile = os.path.join(dstDir, file + '.bak')
shutil.move(dstFile, bakFile)
shutil.move(srcFile, dstFile)
os.rmdir(srcDir)
return True
except:
return False
def shutdown(self, type):
if sickbeard.started:
# stop all tasks
sickbeard.halt()
# save all shows to DB
sickbeard.saveAll()
# shutdown web server
if self.webserver:
logger.log("Shutting down Tornado")
self.webserver.shutDown()
try:
self.webserver.join(10)
except:
pass
# if run as daemon delete the pidfile
if self.runAsDaemon and self.CREATEPID:
self.remove_pid_file(self.PIDFILE)
if type == sickbeard.events.SystemEvent.RESTART:
install_type = sickbeard.versionCheckScheduler.action.install_type
popen_list = []
if install_type in ('git', 'source'):
popen_list = [sys.executable, sickbeard.MY_FULLNAME]
elif install_type == 'win':
if hasattr(sys, 'frozen'):
# c:\dir\to\updater.exe 12345 c:\dir\to\sickbeard.exe
popen_list = [os.path.join(sickbeard.PROG_DIR, 'updater.exe'), str(sickbeard.PID),
sys.executable]
else:
logger.log(u"Unknown SB launch method, please file a bug report about this", logger.ERROR)
popen_list = [sys.executable, os.path.join(sickbeard.PROG_DIR, 'updater.py'),
str(sickbeard.PID),
sys.executable,
sickbeard.MY_FULLNAME]
if popen_list:
popen_list += sickbeard.MY_ARGS
if '--nolaunch' not in popen_list:
popen_list += ['--nolaunch']
logger.log(u"Restarting SickGear with " + str(popen_list))
logger.close()
subprocess.Popen(popen_list, cwd=os.getcwd())
# system exit
os._exit(0)
if __name__ == "__main__":
if sys.hexversion >= 0x020600F0:
freeze_support()
# start SickGear
SickGear().start()
| gpl-3.0 |
mgedmin/mgp2pdf | samples/python/l5filter.py | 1 | 1505 | #!/usr/bin/python
import sys
pagesize = 11
preamble = """
%nodefault,area 90 90, vgap 260, size 8, font "standard", fore "#134d73", back "white", right, newimage -zoom 75 "vu-logo.png"
%size 3
%size 6, vgap 40, left
""".strip()
pagebreak = "%page\n" + preamble
def format(lines):
iterlines = iter(lines)
def nextline():
return next(iterlines, None)
print(preamble)
nlines = 0
line = nextline()
while line is not None:
if line.startswith("#"):
print(line.strip())
line = nextline()
continue
output = []
if '--' in line:
a, b = line.split("--", 1)
output = []
if a:
output += ['%font "thick"\n' + a.strip()]
if b.strip():
output.append('%font "standard"\n\t' + b.strip())
line = nextline()
else:
while True:
line = nextline()
if line is None:
break
if not line.startswith(' '):
break
line = line[4:]
output.append('%font "standard"\n\t' + line.rstrip())
else:
output = ['%font "thick"\n' + line.strip()]
line = nextline()
if nlines + len(output) > pagesize:
nlines = 0
print(pagebreak)
print("\n".join(output))
nlines += len(output)
format(sys.stdin)
| gpl-2.0 |
Jeromefromcn/shadowsocks | tests/nose_plugin.py | 1072 | 1164 | #!/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import nose
from nose.plugins.base import Plugin
class ExtensionPlugin(Plugin):
name = "ExtensionPlugin"
def options(self, parser, env):
Plugin.options(self, parser, env)
def configure(self, options, config):
Plugin.configure(self, options, config)
self.enabled = True
def wantFile(self, file):
return file.endswith('.py')
def wantDirectory(self, directory):
return True
def wantModule(self, file):
return True
if __name__ == '__main__':
nose.main(addplugins=[ExtensionPlugin()])
| apache-2.0 |
lidiamcfreitas/FenixScheduleMaker | ScheduleMaker/brython/www/src/Lib/test/fork_wait.py | 32 | 2152 | """This test case provides support for checking forking and wait behavior.
To test different wait behavior, override the wait_impl method.
We want fork1() semantics -- only the forking thread survives in the
child after a fork().
On some systems (e.g. Solaris without posix threads) we find that all
active threads survive in the child after a fork(); this is an error.
"""
import os, sys, time, unittest
import test.support as support
_thread = support.import_module('_thread')
LONGSLEEP = 2
SHORTSLEEP = 0.5
NUM_THREADS = 4
class ForkWait(unittest.TestCase):
def setUp(self):
self.alive = {}
self.stop = 0
def f(self, id):
while not self.stop:
self.alive[id] = os.getpid()
try:
time.sleep(SHORTSLEEP)
except IOError:
pass
def wait_impl(self, cpid):
for i in range(10):
# waitpid() shouldn't hang, but some of the buildbots seem to hang
# in the forking tests. This is an attempt to fix the problem.
spid, status = os.waitpid(cpid, os.WNOHANG)
if spid == cpid:
break
time.sleep(2 * SHORTSLEEP)
self.assertEqual(spid, cpid)
self.assertEqual(status, 0, "cause = %d, exit = %d" % (status&0xff, status>>8))
@support.reap_threads
def test_wait(self):
for i in range(NUM_THREADS):
_thread.start_new(self.f, (i,))
time.sleep(LONGSLEEP)
a = sorted(self.alive.keys())
self.assertEqual(a, list(range(NUM_THREADS)))
prefork_lives = self.alive.copy()
if sys.platform in ['unixware7']:
cpid = os.fork1()
else:
cpid = os.fork()
if cpid == 0:
# Child
time.sleep(LONGSLEEP)
n = 0
for key in self.alive:
if self.alive[key] != prefork_lives[key]:
n += 1
os._exit(n)
else:
# Parent
try:
self.wait_impl(cpid)
finally:
# Tell threads to die
self.stop = 1
| bsd-2-clause |
extertioner/django-blog-zinnia | zinnia/tests/test_admin_fields.py | 10 | 2666 | """Test cases for Zinnia's admin fields"""
from django.test import TestCase
from django.utils.encoding import smart_text
from zinnia.models import Category
from zinnia.admin.fields import MPTTModelChoiceIterator
from zinnia.admin.fields import MPTTModelMultipleChoiceField
class MPTTModelChoiceIteratorTestCase(TestCase):
def test_choice(self):
category_1 = Category.objects.create(
title='Category 1', slug='cat-1')
category_2 = Category.objects.create(
title='Category 2', slug='cat-2',
parent=category_1)
class FakeField(object):
queryset = Category.objects.all()
def prepare_value(self, value):
return value.pk
def label_from_instance(self, obj):
return smart_text(obj)
field = FakeField()
iterator = MPTTModelChoiceIterator(field)
self.assertEqual(iterator.choice(category_1),
(category_1.pk, 'Category 1', (1, 1)))
self.assertEqual(iterator.choice(category_2),
(category_2.pk, 'Category 2', (1, 2)))
class MPTTModelMultipleChoiceFieldTestCase(TestCase):
def setUp(self):
self.category_1 = Category.objects.create(
title='Category 1', slug='cat-1')
self.category_2 = Category.objects.create(
title='Category 2', slug='cat-2',
parent=self.category_1)
def test_label_from_instance(self):
queryset = Category.objects.all()
field = MPTTModelMultipleChoiceField(
queryset=queryset)
self.assertEqual(field.label_from_instance(self.category_1),
'Category 1')
self.assertEqual(field.label_from_instance(self.category_2),
'|-- Category 2')
field = MPTTModelMultipleChoiceField(
level_indicator='-->', queryset=queryset)
self.assertEqual(field.label_from_instance(self.category_2),
'--> Category 2')
def test_get_choices(self):
queryset = Category.objects.all()
field = MPTTModelMultipleChoiceField(
queryset=queryset)
self.assertEqual(list(field.choices),
[(self.category_1.pk, 'Category 1', (1, 1)),
(self.category_2.pk, '|-- Category 2', (1, 2))])
field = MPTTModelMultipleChoiceField(
level_indicator='-->', queryset=queryset)
self.assertEqual(list(field.choices),
[(self.category_1.pk, 'Category 1', (1, 1)),
(self.category_2.pk, '--> Category 2', (1, 2))])
| bsd-3-clause |
dkdewitt/werkzeug | werkzeug/contrib/fixers.py | 259 | 10183 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.fixers
~~~~~~~~~~~~~~~~~~~~~~~
.. versionadded:: 0.5
This module includes various helpers that fix bugs in web servers. They may
be necessary for some versions of a buggy web server but not others. We try
to stay updated with the status of the bugs as good as possible but you have
to make sure whether they fix the problem you encounter.
If you notice bugs in webservers not fixed in this module consider
contributing a patch.
:copyright: Copyright 2009 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
try:
from urllib import unquote
except ImportError:
from urllib.parse import unquote
from werkzeug.http import parse_options_header, parse_cache_control_header, \
parse_set_header
from werkzeug.useragents import UserAgent
from werkzeug.datastructures import Headers, ResponseCacheControl
class CGIRootFix(object):
"""Wrap the application in this middleware if you are using FastCGI or CGI
and you have problems with your app root being set to the cgi script's path
instead of the path users are going to visit
.. versionchanged:: 0.9
Added `app_root` parameter and renamed from `LighttpdCGIRootFix`.
:param app: the WSGI application
:param app_root: Defaulting to ``'/'``, you can set this to something else
if your app is mounted somewhere else.
"""
def __init__(self, app, app_root='/'):
self.app = app
self.app_root = app_root
def __call__(self, environ, start_response):
# only set PATH_INFO for older versions of Lighty or if no
# server software is provided. That's because the test was
# added in newer Werkzeug versions and we don't want to break
# people's code if they are using this fixer in a test that
# does not set the SERVER_SOFTWARE key.
if 'SERVER_SOFTWARE' not in environ or \
environ['SERVER_SOFTWARE'] < 'lighttpd/1.4.28':
environ['PATH_INFO'] = environ.get('SCRIPT_NAME', '') + \
environ.get('PATH_INFO', '')
environ['SCRIPT_NAME'] = self.app_root.strip('/')
return self.app(environ, start_response)
# backwards compatibility
LighttpdCGIRootFix = CGIRootFix
class PathInfoFromRequestUriFix(object):
"""On windows environment variables are limited to the system charset
which makes it impossible to store the `PATH_INFO` variable in the
environment without loss of information on some systems.
This is for example a problem for CGI scripts on a Windows Apache.
This fixer works by recreating the `PATH_INFO` from `REQUEST_URI`,
`REQUEST_URL`, or `UNENCODED_URL` (whatever is available). Thus the
fix can only be applied if the webserver supports either of these
variables.
:param app: the WSGI application
"""
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
for key in 'REQUEST_URL', 'REQUEST_URI', 'UNENCODED_URL':
if key not in environ:
continue
request_uri = unquote(environ[key])
script_name = unquote(environ.get('SCRIPT_NAME', ''))
if request_uri.startswith(script_name):
environ['PATH_INFO'] = request_uri[len(script_name):] \
.split('?', 1)[0]
break
return self.app(environ, start_response)
class ProxyFix(object):
"""This middleware can be applied to add HTTP proxy support to an
application that was not designed with HTTP proxies in mind. It
sets `REMOTE_ADDR`, `HTTP_HOST` from `X-Forwarded` headers. While
Werkzeug-based applications already can use
:py:func:`werkzeug.wsgi.get_host` to retrieve the current host even if
behind proxy setups, this middleware can be used for applications which
access the WSGI environment directly.
If you have more than one proxy server in front of your app, set
`num_proxies` accordingly.
Do not use this middleware in non-proxy setups for security reasons.
The original values of `REMOTE_ADDR` and `HTTP_HOST` are stored in
the WSGI environment as `werkzeug.proxy_fix.orig_remote_addr` and
`werkzeug.proxy_fix.orig_http_host`.
:param app: the WSGI application
:param num_proxies: the number of proxy servers in front of the app.
"""
def __init__(self, app, num_proxies=1):
self.app = app
self.num_proxies = num_proxies
def get_remote_addr(self, forwarded_for):
"""Selects the new remote addr from the given list of ips in
X-Forwarded-For. By default it picks the one that the `num_proxies`
proxy server provides. Before 0.9 it would always pick the first.
.. versionadded:: 0.8
"""
if len(forwarded_for) >= self.num_proxies:
return forwarded_for[-1 * self.num_proxies]
def __call__(self, environ, start_response):
getter = environ.get
forwarded_proto = getter('HTTP_X_FORWARDED_PROTO', '')
forwarded_for = getter('HTTP_X_FORWARDED_FOR', '').split(',')
forwarded_host = getter('HTTP_X_FORWARDED_HOST', '')
environ.update({
'werkzeug.proxy_fix.orig_wsgi_url_scheme': getter('wsgi.url_scheme'),
'werkzeug.proxy_fix.orig_remote_addr': getter('REMOTE_ADDR'),
'werkzeug.proxy_fix.orig_http_host': getter('HTTP_HOST')
})
forwarded_for = [x for x in [x.strip() for x in forwarded_for] if x]
remote_addr = self.get_remote_addr(forwarded_for)
if remote_addr is not None:
environ['REMOTE_ADDR'] = remote_addr
if forwarded_host:
environ['HTTP_HOST'] = forwarded_host
if forwarded_proto:
environ['wsgi.url_scheme'] = forwarded_proto
return self.app(environ, start_response)
class HeaderRewriterFix(object):
"""This middleware can remove response headers and add others. This
is for example useful to remove the `Date` header from responses if you
are using a server that adds that header, no matter if it's present or
not or to add `X-Powered-By` headers::
app = HeaderRewriterFix(app, remove_headers=['Date'],
add_headers=[('X-Powered-By', 'WSGI')])
:param app: the WSGI application
:param remove_headers: a sequence of header keys that should be
removed.
:param add_headers: a sequence of ``(key, value)`` tuples that should
be added.
"""
def __init__(self, app, remove_headers=None, add_headers=None):
self.app = app
self.remove_headers = set(x.lower() for x in (remove_headers or ()))
self.add_headers = list(add_headers or ())
def __call__(self, environ, start_response):
def rewriting_start_response(status, headers, exc_info=None):
new_headers = []
for key, value in headers:
if key.lower() not in self.remove_headers:
new_headers.append((key, value))
new_headers += self.add_headers
return start_response(status, new_headers, exc_info)
return self.app(environ, rewriting_start_response)
class InternetExplorerFix(object):
"""This middleware fixes a couple of bugs with Microsoft Internet
Explorer. Currently the following fixes are applied:
- removing of `Vary` headers for unsupported mimetypes which
causes troubles with caching. Can be disabled by passing
``fix_vary=False`` to the constructor.
see: http://support.microsoft.com/kb/824847/en-us
- removes offending headers to work around caching bugs in
Internet Explorer if `Content-Disposition` is set. Can be
disabled by passing ``fix_attach=False`` to the constructor.
If it does not detect affected Internet Explorer versions it won't touch
the request / response.
"""
# This code was inspired by Django fixers for the same bugs. The
# fix_vary and fix_attach fixers were originally implemented in Django
# by Michael Axiak and is available as part of the Django project:
# http://code.djangoproject.com/ticket/4148
def __init__(self, app, fix_vary=True, fix_attach=True):
self.app = app
self.fix_vary = fix_vary
self.fix_attach = fix_attach
def fix_headers(self, environ, headers, status=None):
if self.fix_vary:
header = headers.get('content-type', '')
mimetype, options = parse_options_header(header)
if mimetype not in ('text/html', 'text/plain', 'text/sgml'):
headers.pop('vary', None)
if self.fix_attach and 'content-disposition' in headers:
pragma = parse_set_header(headers.get('pragma', ''))
pragma.discard('no-cache')
header = pragma.to_header()
if not header:
headers.pop('pragma', '')
else:
headers['Pragma'] = header
header = headers.get('cache-control', '')
if header:
cc = parse_cache_control_header(header,
cls=ResponseCacheControl)
cc.no_cache = None
cc.no_store = False
header = cc.to_header()
if not header:
headers.pop('cache-control', '')
else:
headers['Cache-Control'] = header
def run_fixed(self, environ, start_response):
def fixing_start_response(status, headers, exc_info=None):
headers = Headers(headers)
self.fix_headers(environ, headers, status)
return start_response(status, headers.to_wsgi_list(), exc_info)
return self.app(environ, fixing_start_response)
def __call__(self, environ, start_response):
ua = UserAgent(environ)
if ua.browser != 'msie':
return self.app(environ, start_response)
return self.run_fixed(environ, start_response)
| bsd-3-clause |
Bysmyyr/chromium-crosswalk | third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/wpt/wpt/tools/wptserve/wptserve/response.py | 158 | 14749 | from collections import OrderedDict
from datetime import datetime, timedelta
import Cookie
import json
import types
import uuid
import socket
from constants import response_codes
from logger import get_logger
missing = object()
class Response(object):
"""Object representing the response to a HTTP request
:param handler: RequestHandler being used for this response
:param request: Request that this is the response for
.. attribute:: request
Request associated with this Response.
.. attribute:: encoding
The encoding to use when converting unicode to strings for output.
.. attribute:: add_required_headers
Boolean indicating whether mandatory headers should be added to the
response.
.. attribute:: send_body_for_head_request
Boolean, default False, indicating whether the body content should be
sent when the request method is HEAD.
.. attribute:: explicit_flush
Boolean indicating whether output should be flushed automatically or only
when requested.
.. attribute:: writer
The ResponseWriter for this response
.. attribute:: status
Status tuple (code, message). Can be set to an integer, in which case the
message part is filled in automatically, or a tuple.
.. attribute:: headers
List of HTTP headers to send with the response. Each item in the list is a
tuple of (name, value).
.. attribute:: content
The body of the response. This can either be a string or a iterable of response
parts. If it is an iterable, any item may be a string or a function of zero
parameters which, when called, returns a string."""
def __init__(self, handler, request):
self.request = request
self.encoding = "utf8"
self.add_required_headers = True
self.send_body_for_head_request = False
self.explicit_flush = False
self.close_connection = False
self.writer = ResponseWriter(handler, self)
self._status = (200, None)
self.headers = ResponseHeaders()
self.content = []
self.logger = get_logger()
@property
def status(self):
return self._status
@status.setter
def status(self, value):
if hasattr(value, "__len__"):
if len(value) != 2:
raise ValueError
else:
self._status = (int(value[0]), str(value[1]))
else:
self._status = (int(value), None)
def set_cookie(self, name, value, path="/", domain=None, max_age=None,
expires=None, secure=False, httponly=False, comment=None):
"""Set a cookie to be sent with a Set-Cookie header in the
response
:param name: String name of the cookie
:param value: String value of the cookie
:param max_age: datetime.timedelta int representing the time (in seconds)
until the cookie expires
:param path: String path to which the cookie applies
:param domain: String domain to which the cookie applies
:param secure: Boolean indicating whether the cookie is marked as secure
:param httponly: Boolean indicating whether the cookie is marked as
HTTP Only
:param comment: String comment
:param expires: datetime.datetime or datetime.timedelta indicating a
time or interval from now when the cookie expires
"""
days = dict((i+1, name) for i, name in enumerate(["jan", "feb", "mar",
"apr", "may", "jun",
"jul", "aug", "sep",
"oct", "nov", "dec"]))
if value is None:
value = ''
max_age = 0
expires = timedelta(days=-1)
if isinstance(expires, timedelta):
expires = datetime.utcnow() + expires
if expires is not None:
expires_str = expires.strftime("%d %%s %Y %H:%M:%S GMT")
expires_str = expires_str % days[expires.month]
expires = expires_str
if max_age is not None:
if hasattr(max_age, "total_seconds"):
max_age = int(max_age.total_seconds())
max_age = "%.0d" % max_age
m = Cookie.Morsel()
def maybe_set(key, value):
if value is not None and value is not False:
m[key] = value
m.set(name, value, value)
maybe_set("path", path)
maybe_set("domain", domain)
maybe_set("comment", comment)
maybe_set("expires", expires)
maybe_set("max-age", max_age)
maybe_set("secure", secure)
maybe_set("httponly", httponly)
self.headers.append("Set-Cookie", m.OutputString())
def unset_cookie(self, name):
"""Remove a cookie from those that are being sent with the response"""
cookies = self.headers.get("Set-Cookie")
parser = Cookie.BaseCookie()
for cookie in cookies:
parser.load(cookie)
if name in parser.keys():
del self.headers["Set-Cookie"]
for m in parser.values():
if m.key != name:
self.headers.append(("Set-Cookie", m.OutputString()))
def delete_cookie(self, name, path="/", domain=None):
"""Delete a cookie on the client by setting it to the empty string
and to expire in the past"""
self.set_cookie(name, None, path=path, domain=domain, max_age=0,
expires=timedelta(days=-1))
def iter_content(self):
"""Iterator returning chunks of response body content.
If any part of the content is a function, this will be called
and the resulting value (if any) returned."""
if type(self.content) in types.StringTypes:
yield self.content
else:
for item in self.content:
if hasattr(item, "__call__"):
value = item()
else:
value = item
if value:
yield value
def write_status_headers(self):
"""Write out the status line and headers for the response"""
self.writer.write_status(*self.status)
for item in self.headers:
self.writer.write_header(*item)
self.writer.end_headers()
def write_content(self):
"""Write out the response content"""
if self.request.method != "HEAD" or self.send_body_for_head_request:
for item in self.iter_content():
self.writer.write_content(item)
def write(self):
"""Write the whole response"""
self.write_status_headers()
self.write_content()
def set_error(self, code, message=""):
"""Set the response status headers and body to indicate an
error"""
err = {"code": code,
"message": message}
data = json.dumps({"error": err})
self.status = code
self.headers = [("Content-Type", "text/json"),
("Content-Length", len(data))]
self.content = data
if code == 500:
self.logger.error(message)
class MultipartContent(object):
def __init__(self, boundary=None, default_content_type=None):
self.items = []
if boundary is None:
boundary = str(uuid.uuid4())
self.boundary = boundary
self.default_content_type = default_content_type
def __call__(self):
boundary = "--" + self.boundary
rv = ["", boundary]
for item in self.items:
rv.append(str(item))
rv.append(boundary)
rv[-1] += "--"
return "\r\n".join(rv)
def append_part(self, data, content_type=None, headers=None):
if content_type is None:
content_type = self.default_content_type
self.items.append(MultipartPart(data, content_type, headers))
def __iter__(self):
#This is hackish; when writing the response we need an iterable
#or a string. For a multipart/byterange response we want an
#iterable that contains a single callable; the MultipartContent
#object itself
yield self
class MultipartPart(object):
def __init__(self, data, content_type=None, headers=None):
self.headers = ResponseHeaders()
if content_type is not None:
self.headers.set("Content-Type", content_type)
if headers is not None:
for name, value in headers:
if name.lower() == "content-type":
func = self.headers.set
else:
func = self.headers.append
func(name, value)
self.data = data
def __str__(self):
rv = []
for item in self.headers:
rv.append("%s: %s" % item)
rv.append("")
rv.append(self.data)
return "\r\n".join(rv)
class ResponseHeaders(object):
"""Dictionary-like object holding the headers for the response"""
def __init__(self):
self.data = OrderedDict()
def set(self, key, value):
"""Set a header to a specific value, overwriting any previous header
with the same name
:param key: Name of the header to set
:param value: Value to set the header to
"""
self.data[key.lower()] = (key, [value])
def append(self, key, value):
"""Add a new header with a given name, not overwriting any existing
headers with the same name
:param key: Name of the header to add
:param value: Value to set for the header
"""
if key.lower() in self.data:
self.data[key.lower()][1].append(value)
else:
self.set(key, value)
def get(self, key, default=missing):
"""Get the set values for a particular header."""
try:
return self[key]
except KeyError:
if default is missing:
return []
return default
def __getitem__(self, key):
"""Get a list of values for a particular header
"""
return self.data[key.lower()][1]
def __delitem__(self, key):
del self.data[key.lower()]
def __contains__(self, key):
return key.lower() in self.data
def __setitem__(self, key, value):
self.set(key, value)
def __iter__(self):
for key, values in self.data.itervalues():
for value in values:
yield key, value
def items(self):
return list(self)
def update(self, items_iter):
for name, value in items_iter:
self.set(name, value)
def __repr__(self):
return repr(self.data)
class ResponseWriter(object):
"""Object providing an API to write out a HTTP response.
:param handler: The RequestHandler being used.
:param response: The Response associated with this writer.
After each part of the response is written, the output is
flushed unless response.explicit_flush is False, in which case
the user must call .flush() explicitly."""
def __init__(self, handler, response):
self._wfile = handler.wfile
self._response = response
self._handler = handler
self._headers_seen = set()
self._headers_complete = False
self.content_written = False
self.request = response.request
def write_status(self, code, message=None):
"""Write out the status line of a response.
:param code: The integer status code of the response.
:param message: The message of the response. Defaults to the message commonly used
with the status code."""
if message is None:
if code in response_codes:
message = response_codes[code][0]
else:
message = ''
self.write("%s %d %s\r\n" %
(self._response.request.protocol_version, code, message))
def write_header(self, name, value):
"""Write out a single header for the response.
:param name: Name of the header field
:param value: Value of the header field
"""
self._headers_seen.add(name.lower())
self.write("%s: %s\r\n" % (name, value))
if not self._response.explicit_flush:
self.flush()
def write_default_headers(self):
for name, f in [("Server", self._handler.version_string),
("Date", self._handler.date_time_string)]:
if name.lower() not in self._headers_seen:
self.write_header(name, f())
if (type(self._response.content) in (str, unicode) and
"content-length" not in self._headers_seen):
#Would be nice to avoid double-encoding here
self.write_header("Content-Length", len(self.encode(self._response.content)))
def end_headers(self):
"""Finish writing headers and write the separator.
Unless add_required_headers on the response is False,
this will also add HTTP-mandated headers that have not yet been supplied
to the response headers"""
if self._response.add_required_headers:
self.write_default_headers()
self.write("\r\n")
if "content-length" not in self._headers_seen:
self._response.close_connection = True
if not self._response.explicit_flush:
self.flush()
self._headers_complete = True
def write_content(self, data):
"""Write the body of the response."""
self.write(self.encode(data))
if not self._response.explicit_flush:
self.flush()
def write(self, data):
"""Write directly to the response, converting unicode to bytes
according to response.encoding. Does not flush."""
self.content_written = True
try:
self._wfile.write(self.encode(data))
except socket.error:
# This can happen if the socket got closed by the remote end
pass
def encode(self, data):
"""Convert unicode to bytes according to response.encoding."""
if isinstance(data, str):
return data
elif isinstance(data, unicode):
return data.encode(self._response.encoding)
else:
raise ValueError
def flush(self):
"""Flush the output."""
try:
self._wfile.flush()
except socket.error:
# This can happen if the socket got closed by the remote end
pass
| bsd-3-clause |
frank10704/DF_GCS_W | MissionPlanner-master/packages/IronPython.StdLib.2.7.5-beta1/content/Lib/distutils/sysconfig.py | 42 | 21927 | """Provide access to Python's configuration information. The specific
configuration variables available depend heavily on the platform and
configuration. The values may be retrieved using
get_config_var(name), and the list of variables is available via
get_config_vars().keys(). Additional convenience functions are also
available.
Written by: Fred L. Drake, Jr.
Email: <[email protected]>
"""
__revision__ = "$Id$"
import os
import re
import string
import sys
from distutils.errors import DistutilsPlatformError
# These are needed in a couple of spots, so just compute them once.
PREFIX = os.path.normpath(sys.prefix)
EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
# Path to the base directory of the project. On Windows the binary may
# live in project/PCBuild9. If we're dealing with an x64 Windows build,
# it'll live in project/PCbuild/amd64.
project_base = os.path.dirname(os.path.abspath(sys.executable))
if os.name == "nt" and "pcbuild" in project_base[-8:].lower():
project_base = os.path.abspath(os.path.join(project_base, os.path.pardir))
# PC/VS7.1
if os.name == "nt" and "\\pc\\v" in project_base[-10:].lower():
project_base = os.path.abspath(os.path.join(project_base, os.path.pardir,
os.path.pardir))
# PC/AMD64
if os.name == "nt" and "\\pcbuild\\amd64" in project_base[-14:].lower():
project_base = os.path.abspath(os.path.join(project_base, os.path.pardir,
os.path.pardir))
# python_build: (Boolean) if true, we're either building Python or
# building an extension with an un-installed Python, so we use
# different (hard-wired) directories.
# Setup.local is available for Makefile builds including VPATH builds,
# Setup.dist is available on Windows
def _python_build():
for fn in ("Setup.dist", "Setup.local"):
if os.path.isfile(os.path.join(project_base, "Modules", fn)):
return True
return False
python_build = _python_build()
def get_python_version():
"""Return a string containing the major and minor Python version,
leaving off the patchlevel. Sample return values could be '1.5'
or '2.2'.
"""
return sys.version[:3]
def get_python_inc(plat_specific=0, prefix=None):
"""Return the directory containing installed Python header files.
If 'plat_specific' is false (the default), this is the path to the
non-platform-specific header files, i.e. Python.h and so on;
otherwise, this is the path to platform-specific header files
(namely pyconfig.h).
If 'prefix' is supplied, use it instead of sys.prefix or
sys.exec_prefix -- i.e., ignore 'plat_specific'.
"""
if prefix is None:
prefix = plat_specific and EXEC_PREFIX or PREFIX
if os.name == "posix":
if python_build:
buildir = os.path.dirname(sys.executable)
if plat_specific:
# python.h is located in the buildir
inc_dir = buildir
else:
# the source dir is relative to the buildir
srcdir = os.path.abspath(os.path.join(buildir,
get_config_var('srcdir')))
# Include is located in the srcdir
inc_dir = os.path.join(srcdir, "Include")
return inc_dir
return os.path.join(prefix, "include", "python" + get_python_version())
elif os.name == "nt":
return os.path.join(prefix, "include")
elif os.name == "os2":
return os.path.join(prefix, "Include")
else:
raise DistutilsPlatformError(
"I don't know where Python installs its C header files "
"on platform '%s'" % os.name)
def get_python_lib(plat_specific=0, standard_lib=0, prefix=None):
"""Return the directory containing the Python library (standard or
site additions).
If 'plat_specific' is true, return the directory containing
platform-specific modules, i.e. any module from a non-pure-Python
module distribution; otherwise, return the platform-shared library
directory. If 'standard_lib' is true, return the directory
containing standard Python library modules; otherwise, return the
directory for site-specific modules.
If 'prefix' is supplied, use it instead of sys.prefix or
sys.exec_prefix -- i.e., ignore 'plat_specific'.
"""
if prefix is None:
prefix = plat_specific and EXEC_PREFIX or PREFIX
if os.name == "posix":
libpython = os.path.join(prefix,
"lib", "python" + get_python_version())
if standard_lib:
return libpython
else:
return os.path.join(libpython, "site-packages")
elif os.name == "nt":
if standard_lib:
return os.path.join(prefix, "Lib")
else:
if get_python_version() < "2.2":
return prefix
else:
return os.path.join(prefix, "Lib", "site-packages")
elif os.name == "os2":
if standard_lib:
return os.path.join(prefix, "Lib")
else:
return os.path.join(prefix, "Lib", "site-packages")
else:
raise DistutilsPlatformError(
"I don't know where Python installs its library "
"on platform '%s'" % os.name)
def customize_compiler(compiler):
"""Do any platform-specific customization of a CCompiler instance.
Mainly needed on Unix, so we can plug in the information that
varies across Unices and is stored in Python's Makefile.
"""
if compiler.compiler_type == "unix":
(cc, cxx, opt, cflags, ccshared, ldshared, so_ext) = \
get_config_vars('CC', 'CXX', 'OPT', 'CFLAGS',
'CCSHARED', 'LDSHARED', 'SO')
if 'CC' in os.environ:
cc = os.environ['CC']
if 'CXX' in os.environ:
cxx = os.environ['CXX']
if 'LDSHARED' in os.environ:
ldshared = os.environ['LDSHARED']
if 'CPP' in os.environ:
cpp = os.environ['CPP']
else:
cpp = cc + " -E" # not always
if 'LDFLAGS' in os.environ:
ldshared = ldshared + ' ' + os.environ['LDFLAGS']
if 'CFLAGS' in os.environ:
cflags = opt + ' ' + os.environ['CFLAGS']
ldshared = ldshared + ' ' + os.environ['CFLAGS']
if 'CPPFLAGS' in os.environ:
cpp = cpp + ' ' + os.environ['CPPFLAGS']
cflags = cflags + ' ' + os.environ['CPPFLAGS']
ldshared = ldshared + ' ' + os.environ['CPPFLAGS']
cc_cmd = cc + ' ' + cflags
compiler.set_executables(
preprocessor=cpp,
compiler=cc_cmd,
compiler_so=cc_cmd + ' ' + ccshared,
compiler_cxx=cxx,
linker_so=ldshared,
linker_exe=cc)
compiler.shared_lib_extension = so_ext
def get_config_h_filename():
"""Return full pathname of installed pyconfig.h file."""
if python_build:
if os.name == "nt":
inc_dir = os.path.join(project_base, "PC")
else:
inc_dir = project_base
else:
inc_dir = get_python_inc(plat_specific=1)
if get_python_version() < '2.2':
config_h = 'config.h'
else:
# The name of the config.h file changed in 2.2
config_h = 'pyconfig.h'
return os.path.join(inc_dir, config_h)
def get_makefile_filename():
"""Return full pathname of installed Makefile from the Python build."""
if python_build:
return os.path.join(os.path.dirname(sys.executable), "Makefile")
lib_dir = get_python_lib(plat_specific=1, standard_lib=1)
return os.path.join(lib_dir, "config", "Makefile")
def parse_config_h(fp, g=None):
"""Parse a config.h-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
if g is None:
g = {}
define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n")
undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n")
#
while 1:
line = fp.readline()
if not line:
break
m = define_rx.match(line)
if m:
n, v = m.group(1, 2)
try: v = int(v)
except ValueError: pass
g[n] = v
else:
m = undef_rx.match(line)
if m:
g[m.group(1)] = 0
return g
# Regexes needed for parsing Makefile (and similar syntaxes,
# like old-style Setup files).
_variable_rx = re.compile("([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
def parse_makefile(fn, g=None):
"""Parse a Makefile-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
from distutils.text_file import TextFile
fp = TextFile(fn, strip_comments=1, skip_blanks=1, join_lines=1)
if g is None:
g = {}
done = {}
notdone = {}
while 1:
line = fp.readline()
if line is None: # eof
break
m = _variable_rx.match(line)
if m:
n, v = m.group(1, 2)
v = v.strip()
# `$$' is a literal `$' in make
tmpv = v.replace('$$', '')
if "$" in tmpv:
notdone[n] = v
else:
try:
v = int(v)
except ValueError:
# insert literal `$'
done[n] = v.replace('$$', '$')
else:
done[n] = v
# do variable interpolation here
while notdone:
for name in notdone.keys():
value = notdone[name]
m = _findvar1_rx.search(value) or _findvar2_rx.search(value)
if m:
n = m.group(1)
found = True
if n in done:
item = str(done[n])
elif n in notdone:
# get it on a subsequent round
found = False
elif n in os.environ:
# do it like make: fall back to environment
item = os.environ[n]
else:
done[n] = item = ""
if found:
after = value[m.end():]
value = value[:m.start()] + item + after
if "$" in after:
notdone[name] = value
else:
try: value = int(value)
except ValueError:
done[name] = value.strip()
else:
done[name] = value
del notdone[name]
else:
# bogus variable reference; just drop it since we can't deal
del notdone[name]
fp.close()
# strip spurious spaces
for k, v in done.items():
if isinstance(v, str):
done[k] = v.strip()
# save the results in the global dictionary
g.update(done)
return g
def expand_makefile_vars(s, vars):
"""Expand Makefile-style variables -- "${foo}" or "$(foo)" -- in
'string' according to 'vars' (a dictionary mapping variable names to
values). Variables not present in 'vars' are silently expanded to the
empty string. The variable values in 'vars' should not contain further
variable expansions; if 'vars' is the output of 'parse_makefile()',
you're fine. Returns a variable-expanded version of 's'.
"""
# This algorithm does multiple expansion, so if vars['foo'] contains
# "${bar}", it will expand ${foo} to ${bar}, and then expand
# ${bar}... and so forth. This is fine as long as 'vars' comes from
# 'parse_makefile()', which takes care of such expansions eagerly,
# according to make's variable expansion semantics.
while 1:
m = _findvar1_rx.search(s) or _findvar2_rx.search(s)
if m:
(beg, end) = m.span()
s = s[0:beg] + vars.get(m.group(1)) + s[end:]
else:
break
return s
_config_vars = None
def _init_posix():
"""Initialize the module as appropriate for POSIX systems."""
g = {}
# load the installed Makefile:
try:
filename = get_makefile_filename()
parse_makefile(filename, g)
except IOError, msg:
my_msg = "invalid Python installation: unable to open %s" % filename
if hasattr(msg, "strerror"):
my_msg = my_msg + " (%s)" % msg.strerror
raise DistutilsPlatformError(my_msg)
# load the installed pyconfig.h:
try:
filename = get_config_h_filename()
parse_config_h(file(filename), g)
except IOError, msg:
my_msg = "invalid Python installation: unable to open %s" % filename
if hasattr(msg, "strerror"):
my_msg = my_msg + " (%s)" % msg.strerror
raise DistutilsPlatformError(my_msg)
# On MacOSX we need to check the setting of the environment variable
# MACOSX_DEPLOYMENT_TARGET: configure bases some choices on it so
# it needs to be compatible.
# If it isn't set we set it to the configure-time value
if sys.platform == 'darwin' and 'MACOSX_DEPLOYMENT_TARGET' in g:
cfg_target = g['MACOSX_DEPLOYMENT_TARGET']
cur_target = os.getenv('MACOSX_DEPLOYMENT_TARGET', '')
if cur_target == '':
cur_target = cfg_target
os.putenv('MACOSX_DEPLOYMENT_TARGET', cfg_target)
elif map(int, cfg_target.split('.')) > map(int, cur_target.split('.')):
my_msg = ('$MACOSX_DEPLOYMENT_TARGET mismatch: now "%s" but "%s" during configure'
% (cur_target, cfg_target))
raise DistutilsPlatformError(my_msg)
# On AIX, there are wrong paths to the linker scripts in the Makefile
# -- these paths are relative to the Python source, but when installed
# the scripts are in another directory.
if python_build:
g['LDSHARED'] = g['BLDSHARED']
elif get_python_version() < '2.1':
# The following two branches are for 1.5.2 compatibility.
if sys.platform == 'aix4': # what about AIX 3.x ?
# Linker script is in the config directory, not in Modules as the
# Makefile says.
python_lib = get_python_lib(standard_lib=1)
ld_so_aix = os.path.join(python_lib, 'config', 'ld_so_aix')
python_exp = os.path.join(python_lib, 'config', 'python.exp')
g['LDSHARED'] = "%s %s -bI:%s" % (ld_so_aix, g['CC'], python_exp)
elif sys.platform == 'beos':
# Linker script is in the config directory. In the Makefile it is
# relative to the srcdir, which after installation no longer makes
# sense.
python_lib = get_python_lib(standard_lib=1)
linkerscript_path = string.split(g['LDSHARED'])[0]
linkerscript_name = os.path.basename(linkerscript_path)
linkerscript = os.path.join(python_lib, 'config',
linkerscript_name)
# XXX this isn't the right place to do this: adding the Python
# library to the link, if needed, should be in the "build_ext"
# command. (It's also needed for non-MS compilers on Windows, and
# it's taken care of for them by the 'build_ext.get_libraries()'
# method.)
g['LDSHARED'] = ("%s -L%s/lib -lpython%s" %
(linkerscript, PREFIX, get_python_version()))
global _config_vars
_config_vars = g
def _init_nt():
"""Initialize the module as appropriate for NT"""
g = {}
# set basic install directories
g['LIBDEST'] = get_python_lib(plat_specific=0, standard_lib=1)
g['BINLIBDEST'] = get_python_lib(plat_specific=1, standard_lib=1)
# XXX hmmm.. a normal install puts include files here
g['INCLUDEPY'] = get_python_inc(plat_specific=0)
g['SO'] = '.pyd'
g['EXE'] = ".exe"
g['VERSION'] = get_python_version().replace(".", "")
g['BINDIR'] = os.path.dirname(os.path.abspath(sys.executable))
global _config_vars
_config_vars = g
def _init_os2():
"""Initialize the module as appropriate for OS/2"""
g = {}
# set basic install directories
g['LIBDEST'] = get_python_lib(plat_specific=0, standard_lib=1)
g['BINLIBDEST'] = get_python_lib(plat_specific=1, standard_lib=1)
# XXX hmmm.. a normal install puts include files here
g['INCLUDEPY'] = get_python_inc(plat_specific=0)
g['SO'] = '.pyd'
g['EXE'] = ".exe"
global _config_vars
_config_vars = g
def get_config_vars(*args):
"""With no arguments, return a dictionary of all configuration
variables relevant for the current platform. Generally this includes
everything needed to build extensions and install both pure modules and
extensions. On Unix, this means every variable defined in Python's
installed Makefile; on Windows and Mac OS it's a much smaller set.
With arguments, return a list of values that result from looking up
each argument in the configuration variable dictionary.
"""
global _config_vars
if _config_vars is None:
func = globals().get("_init_" + os.name)
if func:
func()
else:
_config_vars = {}
# Normalized versions of prefix and exec_prefix are handy to have;
# in fact, these are the standard versions used most places in the
# Distutils.
_config_vars['prefix'] = PREFIX
_config_vars['exec_prefix'] = EXEC_PREFIX
if sys.platform == 'darwin':
kernel_version = os.uname()[2] # Kernel version (8.4.3)
major_version = int(kernel_version.split('.')[0])
if major_version < 8:
# On Mac OS X before 10.4, check if -arch and -isysroot
# are in CFLAGS or LDFLAGS and remove them if they are.
# This is needed when building extensions on a 10.3 system
# using a universal build of python.
for key in ('LDFLAGS', 'BASECFLAGS', 'LDSHARED',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _config_vars[key]
flags = re.sub('-arch\s+\w+\s', ' ', flags)
flags = re.sub('-isysroot [^ \t]*', ' ', flags)
_config_vars[key] = flags
else:
# Allow the user to override the architecture flags using
# an environment variable.
# NOTE: This name was introduced by Apple in OSX 10.5 and
# is used by several scripting languages distributed with
# that OS release.
if 'ARCHFLAGS' in os.environ:
arch = os.environ['ARCHFLAGS']
for key in ('LDFLAGS', 'BASECFLAGS', 'LDSHARED',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _config_vars[key]
flags = re.sub('-arch\s+\w+\s', ' ', flags)
flags = flags + ' ' + arch
_config_vars[key] = flags
# If we're on OSX 10.5 or later and the user tries to
# compiles an extension using an SDK that is not present
# on the current machine it is better to not use an SDK
# than to fail.
#
# The major usecase for this is users using a Python.org
# binary installer on OSX 10.6: that installer uses
# the 10.4u SDK, but that SDK is not installed by default
# when you install Xcode.
#
m = re.search('-isysroot\s+(\S+)', _config_vars['CFLAGS'])
if m is not None:
sdk = m.group(1)
if not os.path.exists(sdk):
for key in ('LDFLAGS', 'BASECFLAGS', 'LDSHARED',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _config_vars[key]
flags = re.sub('-isysroot\s+\S+(\s|$)', ' ', flags)
_config_vars[key] = flags
if args:
vals = []
for name in args:
vals.append(_config_vars.get(name))
return vals
else:
return _config_vars
def get_config_var(name):
"""Return the value of a single variable using the dictionary
returned by 'get_config_vars()'. Equivalent to
get_config_vars().get(name)
"""
return get_config_vars().get(name)
| gpl-3.0 |
creighton/ADL_LRS | oauth_provider/models.py | 3 | 6783 | import uuid
import urllib
import urlparse
from datetime import datetime
from time import time
import oauth2 as oauth
from Crypto.PublicKey import RSA
from django.db import models
from oauth_provider.compat import AUTH_USER_MODEL, get_random_string
from oauth_provider.managers import TokenManager
from oauth_provider.consts import KEY_SIZE, RSA_SECRET_SIZE, CONSUMER_KEY_SIZE, CONSUMER_STATES,\
PENDING, VERIFIER_SIZE, MAX_URL_LENGTH, OUT_OF_BAND, REGULAR_SECRET_SIZE
from oauth_provider.utils import check_valid_callback
class Nonce(models.Model):
token_key = models.CharField(max_length=KEY_SIZE)
consumer_key = models.CharField(max_length=CONSUMER_KEY_SIZE)
key = models.CharField(max_length=255)
timestamp = models.PositiveIntegerField(db_index=True)
def __unicode__(self):
return u"Nonce %s for %s" % (self.key, self.consumer_key)
# LRS CHANGE - NOT NEEDED
# class Scope(models.Model):
# name = models.CharField(max_length=255)
# url = models.TextField(max_length=MAX_URL_LENGTH)
# is_readonly = models.BooleanField(default=True)
# def __unicode__(self):
# return u"Resource %s with url %s" % (self.name, self.url)
# LRS CHANGE - NOT NEEDED
# class Resource(Scope):
# def __init__(self, *args, **kwargs):
# warnings.warn("oauth_provider.Resource model is deprecated, use oauth_provider.Scope instead", DeprecationWarning)
# super(Resource, self).__init__(*args, **kwargs)
# class Meta:
# proxy = True
class Consumer(models.Model):
name = models.CharField(max_length=255)
description = models.TextField(blank=True)
# LRS CHANGE - ADDED DEFAULT SCOPES FOR CONSUMER WHEN FIRST REGISTERED
# default_scopes = models.CharField(max_length=100, default="statements/write statements/read/mine")
key = models.CharField(max_length=CONSUMER_KEY_SIZE)
secret = models.CharField(max_length=RSA_SECRET_SIZE, blank=True)
rsa_signature = models.BooleanField(default=False)
status = models.SmallIntegerField(choices=CONSUMER_STATES, default=PENDING)
user = models.ForeignKey(AUTH_USER_MODEL, null=True, blank=True)
xauth_allowed = models.BooleanField("Allow xAuth", default=False)
def __unicode__(self):
return u"Consumer %s with key %s" % (self.name, self.key)
def generate_random_codes(self):
"""
Used to generate random key/secret pairings.
Use this after you've added the other data in place of save().
"""
self.key = uuid.uuid4().hex
# LRS CHANGE - KEPT THE SECRET KEY AT 16 LIKE BEFORE (WHEN NOT USING
# RSA)
if not self.rsa_signature:
self.secret = get_random_string(length=REGULAR_SECRET_SIZE)
self.save()
def generate_rsa_key(self):
if not self.secret or len(self.secret) == REGULAR_SECRET_SIZE:
return None
return RSA.importKey(self.secret)
class Token(models.Model):
REQUEST = 1
ACCESS = 2
TOKEN_TYPES = ((REQUEST, u'Request'), (ACCESS, u'Access'))
key = models.CharField(max_length=KEY_SIZE, null=True, blank=True)
secret = models.CharField(
max_length=RSA_SECRET_SIZE, null=True, blank=True)
token_type = models.SmallIntegerField(choices=TOKEN_TYPES)
timestamp = models.IntegerField(default=long(time()))
is_approved = models.BooleanField(default=False)
user = models.ForeignKey(AUTH_USER_MODEL, null=True,
blank=True, related_name='tokens')
consumer = models.ForeignKey(Consumer)
# LRS CHANGE - LRS SCOPES AREN'T RESOURCES
# scope = models.ForeignKey(Scope, null=True, blank=True)
scope = models.CharField(
max_length=100, default="statements/write statements/read/mine")
@property
def resource(self):
return self.scope
@resource.setter
def resource(self, value):
self.scope = value
# OAuth 1.0a stuff
verifier = models.CharField(max_length=VERIFIER_SIZE)
callback = models.CharField(
max_length=MAX_URL_LENGTH, null=True, blank=True)
callback_confirmed = models.BooleanField(default=False)
objects = TokenManager()
def __unicode__(self):
return u"%s Token %s for %s" % (self.get_token_type_display(), self.key, self.consumer)
def to_string(self, only_key=False):
token_dict = {
'oauth_token': self.key,
'oauth_token_secret': self.secret,
'oauth_callback_confirmed': self.callback_confirmed and 'true' or 'error'
}
if self.verifier:
token_dict['oauth_verifier'] = self.verifier
if only_key:
del token_dict['oauth_token_secret']
del token_dict['oauth_callback_confirmed']
return urllib.urlencode(token_dict)
def generate_random_codes(self):
"""
Used to generate random key/secret pairings.
Use this after you've added the other data in place of save().
"""
self.key = uuid.uuid4().hex
if not self.consumer.rsa_signature:
self.secret = get_random_string(length=REGULAR_SECRET_SIZE)
self.save()
def get_callback_url(self, args=None):
"""
OAuth 1.0a, append the oauth_verifier.
"""
if self.callback and self.verifier:
parts = urlparse.urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
else:
query = 'oauth_verifier=%s' % self.verifier
# workaround for non-http scheme urlparse problem in py2.6 (issue
# #2)
if "?" in path:
query = "%s&%s" % (path.split("?")[-1], query)
path = "?".join(path[:-1])
if args is not None:
query += "&%s" % urllib.urlencode(args)
return urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
args = args is not None and "?%s" % urllib.urlencode(args) or ""
return self.callback and self.callback + args
def set_callback(self, callback):
if callback != OUT_OF_BAND: # out of band, says "we can't do this!"
if check_valid_callback(callback):
self.callback = callback
self.callback_confirmed = True
self.save()
else:
raise oauth.Error('Invalid callback URL.')
# LRS CHANGE - ADDED HELPER FUNCTIONS
def scope_to_list(self):
return self.scope.split(" ")
def timestamp_asdatetime(self):
return datetime.fromtimestamp(self.timestamp)
def key_partial(self):
return self.key[:10]
| apache-2.0 |
saisrisathya/whatsapps | build/lib/yowsup/layers/protocol_groups/protocolentities/iq_groups_create_success.py | 41 | 1076 | from yowsup.structs import ProtocolTreeNode
from yowsup.layers.protocol_iq.protocolentities import ResultIqProtocolEntity
class SuccessCreateGroupsIqProtocolEntity(ResultIqProtocolEntity):
'''
<iq type="result" id="{{id}}" from="g.us">
<group id="{group_id}"></group>
</iq>
'''
def __init__(self, _id, groupId):
super(SuccessCreateGroupsIqProtocolEntity, self).__init__(_from = "g.us", _id = _id)
self.setProps(groupId)
def setProps(self, groupId):
self.groupId = groupId
def toProtocolTreeNode(self):
node = super(SuccessCreateGroupsIqProtocolEntity, self).toProtocolTreeNode()
node.addChild(ProtocolTreeNode("group",{"id": self.groupId}))
return node
@staticmethod
def fromProtocolTreeNode(node):
entity = super(SuccessCreateGroupsIqProtocolEntity, SuccessCreateGroupsIqProtocolEntity).fromProtocolTreeNode(node)
entity.__class__ = SuccessCreateGroupsIqProtocolEntity
entity.setProps(node.getChild("group").getAttributeValue("id"))
return entity
| gpl-3.0 |
jirikuncar/invenio-base | invenio_base/config.py | 1 | 28704 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2012, 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Default configuration values."""
from __future__ import unicode_literals
import distutils.sysconfig
from os.path import join
from invenio.utils.shell import which
from invenio.version import __version__
EXTENSIONS = [
'invenio.ext.confighacks',
'invenio.ext.passlib:Passlib',
'invenio.ext.debug_toolbar',
'invenio.ext.babel',
'invenio.ext.sqlalchemy',
'invenio.ext.sslify',
'invenio.ext.cache',
'invenio.ext.session',
'invenio.ext.login',
'invenio.ext.principal',
'invenio.ext.email',
'invenio.ext.fixtures', # before legacy
'invenio.ext.legacy',
'invenio.ext.assets',
'invenio.ext.template',
'invenio.ext.admin',
'invenio.ext.logging',
'invenio.ext.logging.backends.fs',
'invenio.ext.logging.backends.legacy',
'invenio.ext.logging.backends.sentry',
'invenio.ext.gravatar',
'invenio.ext.collect',
'invenio.ext.restful',
'invenio.ext.menu',
'invenio.ext.jasmine', # after assets
'flask_breadcrumbs:Breadcrumbs',
'invenio_deposit.url_converters',
# TODO 'invenio.ext.iiif',
'invenio.ext.es',
]
PACKAGES = [
'invenio_records',
'invenio_search',
'invenio_comments',
'invenio_collections',
'invenio_documents',
'invenio_pidstore',
'invenio.modules.*',
'invenio_formatter',
'invenio_unapi',
'invenio_webhooks',
'invenio_deposit',
'invenio_workflows',
'invenio_knowledge',
'invenio_oauthclient',
'invenio_oauth2server',
'invenio_previewer',
# TODO 'invenio_messages',
'invenio_groups',
'invenio_access',
'invenio_accounts',
'invenio_upgrader',
'invenio.base',
]
PACKAGES_EXCLUDE = [
'invenio.modules.access',
'invenio.modules.accounts',
'invenio.modules.annotations',
'invenio.modules.archiver',
'invenio.modules.cloudconnector',
'invenio.modules.comments',
'invenio.modules.collections',
'invenio.modules.communities',
'invenio.modules.deposit',
'invenio.modules.documentation',
'invenio.modules.documents',
'invenio.modules.formatter',
'invenio.modules.groups',
'invenio.modules.knowledge',
'invenio.modules.messages',
'invenio.modules.multimedia',
'invenio.modules.oaiharvester',
'invenio.modules.oauth2server',
'invenio.modules.oauthclient',
'invenio.modules.pages',
'invenio.modules.pidstore',
'invenio.modules.previewer',
'invenio.modules.records',
'invenio.modules.search',
'invenio.modules.tags',
'invenio.modules.unapi',
'invenio.modules.upgrader',
'invenio.modules.webhooks',
'invenio.modules.workflows',
]
LEGACY_WEBINTERFACE_EXCLUDE = []
_cfg_prefix = distutils.sysconfig.get_config_var("prefix")
CFG_DATADIR = join(_cfg_prefix, 'var', 'data')
CFG_BATCHUPLOADER_DAEMON_DIR = join(_cfg_prefix, "var", "batchupload")
CFG_BATCHUPLOADER_DAEMON_DIR = \
CFG_BATCHUPLOADER_DAEMON_DIR[0] == '/' and CFG_BATCHUPLOADER_DAEMON_DIR \
or _cfg_prefix + '/' + CFG_BATCHUPLOADER_DAEMON_DIR
CFG_BIBDOCFILE_FILEDIR = join(CFG_DATADIR, "files")
CFG_BINDIR = join(_cfg_prefix, "bin")
CFG_ETCDIR = join(_cfg_prefix, "etc")
CFG_CACHEDIR = join(_cfg_prefix, "var", "cache")
CFG_LOGDIR = join(_cfg_prefix, "var", "log")
CFG_RUNDIR = join(_cfg_prefix, "var", "run")
CFG_TMPDIR = join(_cfg_prefix, "var", "tmp")
CFG_WEBDIR = join(_cfg_prefix, "var", "www")
CFG_PYLIBDIR = join(_cfg_prefix, "lib", "python")
CFG_LOCALEDIR = join(_cfg_prefix, "share", "locale")
CFG_TMPSHAREDDIR = join(_cfg_prefix, "var", "tmp-shared")
CFG_COMMENTSDIR = join(CFG_DATADIR, "comments")
# FIXME check the usage and replace by SQLALCHEMY_URL
CFG_DATABASE_HOST = "localhost"
CFG_DATABASE_NAME = "invenio"
CFG_DATABASE_PASS = "my123p$ss"
CFG_DATABASE_PORT = 3306
CFG_DATABASE_SLAVE = None
CFG_DATABASE_TYPE = "mysql"
CFG_DATABASE_USER = "invenio"
# CFG_FLASK_CACHE_TYPE has been deprecated.
CACHE_TYPE = "redis"
REQUIREJS_CONFIG = "js/build.js"
# DO NOT EDIT THIS FILE! IT WAS AUTOMATICALLY GENERATED
# FROM INVENIO.CONF BY EXECUTING:
# inveniocfg --update-all
CFG_SITE_NAME_INTL = {}
CFG_SITE_NAME_INTL['af'] = "Atlantis Instituut van Fiktiewe Wetenskap"
CFG_SITE_NAME_INTL['ar'] = "معهد أطلنطيس للعلوم الافتراضية"
CFG_SITE_NAME_INTL['bg'] = "Институт за фиктивни науки Атлантис"
CFG_SITE_NAME_INTL['ca'] = "Institut Atlantis de Ciència Fictícia"
CFG_SITE_NAME_INTL['cs'] = "Atlantis Institut Fiktivních Věd"
CFG_SITE_NAME_INTL['de'] = "Atlantis Institut der fiktiven Wissenschaft"
CFG_SITE_NAME_INTL['el'] = "Ινστιτούτο Φανταστικών Επιστημών Ατλαντίδος"
CFG_SITE_NAME_INTL['en'] = "Atlantis Institute of Fictive Science"
CFG_SITE_NAME_INTL['es'] = "Atlantis Instituto de la Ciencia Fictive"
CFG_SITE_NAME_INTL['fr'] = "Atlantis Institut des Sciences Fictives"
CFG_SITE_NAME_INTL['hr'] = "Institut Fiktivnih Znanosti Atlantis"
CFG_SITE_NAME_INTL['gl'] = "Instituto Atlantis de Ciencia Fictive"
CFG_SITE_NAME_INTL['ka'] = "ატლანტიდის ფიქტიური მეცნიერების ინსტიტუტი"
CFG_SITE_NAME_INTL['it'] = "Atlantis Istituto di Scienza Fittizia"
CFG_SITE_NAME_INTL['rw'] = "Atlantis Ishuri Rikuru Ry'ubuhanga"
CFG_SITE_NAME_INTL['lt'] = "Fiktyvių Mokslų Institutas Atlantis"
CFG_SITE_NAME_INTL['hu'] = "Kitalált Tudományok Atlantiszi Intézete"
CFG_SITE_NAME_INTL['ja'] = "Fictive 科学のAtlantis の協会"
CFG_SITE_NAME_INTL['no'] = "Atlantis Institutt for Fiktiv Vitenskap"
CFG_SITE_NAME_INTL['pl'] = "Instytut Fikcyjnej Nauki Atlantis"
CFG_SITE_NAME_INTL['pt'] = "Instituto Atlantis de Ciência Fictícia"
CFG_SITE_NAME_INTL['ro'] = "Institutul Atlantis al Ştiinţelor Fictive"
CFG_SITE_NAME_INTL['ru'] = "Институт Фиктивных Наук Атлантиды"
CFG_SITE_NAME_INTL['sk'] = "Atlantis Inštitút Fiktívnych Vied"
CFG_SITE_NAME_INTL['sv'] = "Atlantis Institut för Fiktiv Vetenskap"
CFG_SITE_NAME_INTL['uk'] = "Інститут вигаданих наук в Атлантісі"
CFG_SITE_NAME_INTL['zh_CN'] = "阿特兰提斯虚拟科学学院"
CFG_SITE_NAME_INTL['zh_TW'] = "阿特蘭提斯虛擬科學學院"
CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS = 0
CFG_ACCESS_CONTROL_LEVEL_GUESTS = 0
CFG_ACCESS_CONTROL_LEVEL_SITE = 0
CFG_ACCESS_CONTROL_LIMIT_REGISTRATION_TO_DOMAIN = ""
CFG_ACCESS_CONTROL_NOTIFY_ADMIN_ABOUT_NEW_ACCOUNTS = 0
CFG_ACCESS_CONTROL_NOTIFY_USER_ABOUT_ACTIVATION = 0
CFG_ACCESS_CONTROL_NOTIFY_USER_ABOUT_DELETION = 0
CFG_ACCESS_CONTROL_NOTIFY_USER_ABOUT_NEW_ACCOUNT = 1
CFG_ADS_SITE = 0
CFG_APACHE_GROUP_FILE = "demo-site-apache-user-groups"
CFG_APACHE_PASSWORD_FILE = "demo-site-apache-user-passwords"
CFG_ARXIV_URL_PATTERN = "http://export.arxiv.org/pdf/%sv%s.pdf"
CFG_BATCHUPLOADER_FILENAME_MATCHING_POLICY = ['reportnumber', 'recid', ]
CFG_BATCHUPLOADER_WEB_ROBOT_AGENTS = r"invenio_webupload|Invenio-.*"
CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS = {
'127.0.0.1': ['*'], # useful for testing
'127.0.1.1': ['*'], # useful for testing
'10.0.0.1': ['BOOK', 'REPORT'], # Example 1
'10.0.0.2': ['POETRY', 'PREPRINT'], # Example 2
}
CFG_BIBAUTHORID_AUTHOR_TICKET_ADMIN_EMAIL = "[email protected]"
CFG_BIBAUTHORID_ENABLED = True
CFG_BIBAUTHORID_EXTERNAL_CLAIMED_RECORDS_KEY = []
CFG_BIBAUTHORID_MAX_PROCESSES = 12
CFG_BIBAUTHORID_ON_AUTHORPAGES = True
CFG_BIBAUTHORID_PERSONID_SQL_MAX_THREADS = 12
CFG_BIBAUTHORID_UI_SKIP_ARXIV_STUB_PAGE = False
CFG_BIBAUTHORID_SEARCH_ENGINE_MAX_DATACHUNK_PER_INSERT_DB_QUERY = 10000000
CFG_BIBCATALOG_SYSTEM = "EMAIL"
CFG_BIBCATALOG_SYSTEM_EMAIL_ADDRESS = "[email protected]"
CFG_BIBCATALOG_SYSTEM_RT_CLI = "/usr/bin/rt"
CFG_BIBCATALOG_SYSTEM_RT_DEFAULT_PWD = ""
CFG_BIBCATALOG_SYSTEM_RT_DEFAULT_USER = ""
CFG_BIBCATALOG_SYSTEM_RT_URL = "http://localhost/rt3"
CFG_BIBCIRCULATION_ACQ_STATUS_CANCELLED = "cancelled"
CFG_BIBCIRCULATION_ACQ_STATUS_NEW = "new"
CFG_BIBCIRCULATION_ACQ_STATUS_ON_ORDER = "on order"
CFG_BIBCIRCULATION_ACQ_STATUS_PARTIAL_RECEIPT = "partial receipt"
CFG_BIBCIRCULATION_ACQ_STATUS_RECEIVED = "received"
CFG_BIBCIRCULATION_AMAZON_ACCESS_KEY = ""
CFG_BIBCIRCULATION_ILL_STATUS_CANCELLED = "cancelled"
CFG_BIBCIRCULATION_ILL_STATUS_NEW = "new"
CFG_BIBCIRCULATION_ILL_STATUS_ON_LOAN = "on loan"
CFG_BIBCIRCULATION_ILL_STATUS_RECEIVED = "received"
CFG_BIBCIRCULATION_ILL_STATUS_REQUESTED = "requested"
CFG_BIBCIRCULATION_ILL_STATUS_RETURNED = "returned"
CFG_BIBCIRCULATION_ITEM_STATUS_CANCELLED = "cancelled"
CFG_BIBCIRCULATION_ITEM_STATUS_CLAIMED = "claimed"
CFG_BIBCIRCULATION_ITEM_STATUS_IN_PROCESS = "in process"
CFG_BIBCIRCULATION_ITEM_STATUS_NOT_ARRIVED = "not arrived"
CFG_BIBCIRCULATION_ITEM_STATUS_ON_LOAN = "on loan"
CFG_BIBCIRCULATION_ITEM_STATUS_ON_ORDER = "on order"
CFG_BIBCIRCULATION_ITEM_STATUS_ON_SHELF = "on shelf"
CFG_BIBCIRCULATION_ITEM_STATUS_OPTIONAL = []
CFG_BIBCIRCULATION_ITEM_STATUS_UNDER_REVIEW = "under review"
CFG_BIBCIRCULATION_LIBRARY_TYPE_EXTERNAL = "external"
CFG_BIBCIRCULATION_LIBRARY_TYPE_HIDDEN = "hidden"
CFG_BIBCIRCULATION_LIBRARY_TYPE_INTERNAL = "internal"
CFG_BIBCIRCULATION_LIBRARY_TYPE_MAIN = "main"
CFG_BIBCIRCULATION_LOAN_STATUS_EXPIRED = "expired"
CFG_BIBCIRCULATION_LOAN_STATUS_ON_LOAN = "on loan"
CFG_BIBCIRCULATION_LOAN_STATUS_RETURNED = "returned"
CFG_BIBCIRCULATION_PROPOSAL_STATUS_NEW = "proposal-new"
CFG_BIBCIRCULATION_PROPOSAL_STATUS_ON_ORDER = "proposal-on order"
CFG_BIBCIRCULATION_PROPOSAL_STATUS_PUT_ASIDE = "proposal-put aside"
CFG_BIBCIRCULATION_PROPOSAL_STATUS_RECEIVED = "proposal-received"
CFG_BIBCIRCULATION_REQUEST_STATUS_CANCELLED = "cancelled"
CFG_BIBCIRCULATION_REQUEST_STATUS_DONE = "done"
CFG_BIBCIRCULATION_REQUEST_STATUS_PENDING = "pending"
CFG_BIBCIRCULATION_REQUEST_STATUS_PROPOSED = "proposed"
CFG_BIBCIRCULATION_REQUEST_STATUS_WAITING = "waiting"
CFG_BIBDOCFILE_ADDITIONAL_KNOWN_FILE_EXTENSIONS = [
'hpg', 'link', 'lis', 'llb', 'mat', 'mpp', 'msg', 'docx', 'docm', 'xlsx',
'xlsm', 'xlsb', 'pptx', 'pptm', 'ppsx', 'ppsm', ]
CFG_BIBDOCFILE_ADDITIONAL_KNOWN_MIMETYPES = {
"application/xml-dtd": ".dtd",
}
CFG_BIBDOCFILE_BEST_FORMATS_TO_EXTRACT_TEXT_FROM = (
'txt', 'html', 'xml', 'odt', 'doc', 'docx', 'djvu', 'pdf', 'ps', 'ps.gz')
CFG_BIBDOCFILE_DESIRED_CONVERSIONS = {
'pdf': ('pdf;pdfa', ),
'ps.gz': ('pdf;pdfa', ),
'djvu': ('pdf', ),
'sxw': ('doc', 'odt', 'pdf;pdfa', ),
'docx': ('doc', 'odt', 'pdf;pdfa', ),
'doc': ('odt', 'pdf;pdfa', 'docx'),
'rtf': ('pdf;pdfa', 'odt', ),
'odt': ('pdf;pdfa', 'doc', ),
'pptx': ('ppt', 'odp', 'pdf;pdfa', ),
'ppt': ('odp', 'pdf;pdfa', 'pptx'),
'sxi': ('odp', 'pdf;pdfa', ),
'odp': ('pdf;pdfa', 'ppt', ),
'xlsx': ('xls', 'ods', 'csv'),
'xls': ('ods', 'csv'),
'ods': ('xls', 'xlsx', 'csv'),
'sxc': ('xls', 'xlsx', 'csv'),
'tiff': ('pdf;pdfa', ),
'tif': ('pdf;pdfa', ), }
CFG_BIBDOCFILE_DOCUMENT_FILE_MANAGER_DOCTYPES = [
('Main', 'Main document'),
('LaTeX', 'LaTeX'),
('Source', 'Source'),
('Additional', 'Additional File'),
('Audio', 'Audio file'),
('Video', 'Video file'),
('Script', 'Script'),
('Data', 'Data'),
('Figure', 'Figure'),
('Schema', 'Schema'),
('Graph', 'Graph'),
('Image', 'Image'),
('Drawing', 'Drawing'),
('Slides', 'Slides')]
CFG_BIBDOCFILE_DOCUMENT_FILE_MANAGER_MISC = {
'can_revise_doctypes': ['*'],
'can_comment_doctypes': ['*'],
'can_describe_doctypes': ['*'],
'can_delete_doctypes': ['*'],
'can_keep_doctypes': ['*'],
'can_rename_doctypes': ['*'],
'can_add_format_to_doctypes': ['*'],
'can_restrict_doctypes': ['*'],
}
CFG_BIBDOCFILE_DOCUMENT_FILE_MANAGER_RESTRICTIONS = [
('', 'Public'),
('restricted', 'Restricted')]
CFG_BIBDOCFILE_ENABLE_BIBDOCFSINFO_CACHE = 0
CFG_BIBDOCFILE_FILESYSTEM_BIBDOC_GROUP_LIMIT = 5000
CFG_BIBDOCFILE_MD5_CHECK_PROBABILITY = 0.1
CFG_BIBDOCFILE_USE_XSENDFILE = 0
CFG_BIBDOCFILE_AFS_VOLUME_PATTERN = "p.invenio.%s"
CFG_BIBDOCFILE_AFS_VOLUME_QUOTA = 10000000
CFG_BIBDOCFILE_PREFERRED_MIMETYPES_MAPPING = {
'application/msword': '.doc',
'application/octet-stream': '.bin',
'application/postscript': '.ps',
'application/vnd.ms-excel': '.xls',
'application/vnd.ms-powerpoint': '.ppt',
'application/x-gtar-compressed': '.tgz',
'application/xhtml+xml': '.xhtml',
'application/xml': '.xml',
'audio/mpeg': '.mp3',
'audio/ogg': '.ogg',
'image/jpeg': '.jpeg',
'image/svg+xml': '.svg',
'image/tiff': '.tiff',
'message/rfc822': '.eml',
'text/calendar': '.ics',
'text/plain': '.txt',
'video/mpeg': '.mpeg',
}
CFG_BIBFIELD_MASTER_FORMATS = ['marc', ]
CFG_BIBFORMAT_ADDTHIS_ID = ""
CFG_BIBFORMAT_DISABLE_I18N_FOR_CACHED_FORMATS = []
CFG_BIBFORMAT_HIDDEN_FILE_FORMATS = []
CFG_BIBFORMAT_HIDDEN_TAGS = ['595', ]
CFG_BIBINDEX_AUTHOR_WORD_INDEX_EXCLUDE_FIRST_NAMES = False
CFG_BIBINDEX_CHARS_ALPHANUMERIC_SEPARATORS = \
r"[\!\"\#\$\%\&\'\(\)\*\+\,\-\.\/\:\;\<\=\>\?\@\[\\\]\^\_\`\{\|\}\~]"
CFG_BIBINDEX_CHARS_PUNCTUATION = r"[\.\,\:\;\?\!\"]"
CFG_BIBINDEX_FULLTEXT_INDEX_LOCAL_FILES_ONLY = 1
CFG_BIBINDEX_MIN_WORD_LENGTH = 0
CFG_BIBINDEX_PATH_TO_STOPWORDS_FILE = "etc/bibrank/stopwords.kb"
CFG_BIBINDEX_PERFORM_OCR_ON_DOCNAMES = r"scan-.*"
CFG_BIBINDEX_REMOVE_HTML_MARKUP = 0
CFG_BIBINDEX_REMOVE_LATEX_MARKUP = 0
CFG_BIBINDEX_REMOVE_STOPWORDS = 0
CFG_BIBINDEX_SPLASH_PAGES = {
"http://documents\.cern\.ch/setlink\?.*": ".*",
"http://ilcagenda\.linearcollider\.org/subContributionDisplay\.py\?.*|"
"http://ilcagenda\.linearcollider\.org/contributionDisplay\.py\?.*":
"http://ilcagenda\.linearcollider\.org/getFile\.py/access\?.*|"
"http://ilcagenda\.linearcollider\.org/materialDisplay\.py\?.*",
}
CFG_BIBINDEX_SYNONYM_KBRS = {
'global': ['INDEX-SYNONYM-TITLE', 'exact'],
'title': ['INDEX-SYNONYM-TITLE', 'exact'],
}
CFG_BIBINDEX_URLOPENER_PASSWORD = "mysuperpass"
CFG_BIBINDEX_URLOPENER_USERNAME = "mysuperuser"
CFG_BIBMATCH_FUZZY_EMPTY_RESULT_LIMIT = 1
CFG_BIBMATCH_FUZZY_MATCH_VALIDATION_LIMIT = 0.65
CFG_BIBMATCH_FUZZY_WORDLIMITS = {
'100__a': 2,
'245__a': 4
}
CFG_BIBMATCH_LOCAL_SLEEPTIME = 0.0
CFG_BIBMATCH_MATCH_VALIDATION_RULESETS = [
('default', [{'tags': '245__%,242__%',
'threshold': 0.8,
'compare_mode': 'lazy',
'match_mode': 'title',
'result_mode': 'normal'},
{'tags': '037__a,088__a',
'threshold': 1.0,
'compare_mode': 'lazy',
'match_mode': 'identifier',
'result_mode': 'final'},
{'tags': '100__a,700__a',
'threshold': 0.8,
'compare_mode': 'normal',
'match_mode': 'author',
'result_mode': 'normal'},
{'tags': '773__a',
'threshold': 1.0,
'compare_mode': 'lazy',
'match_mode': 'title',
'result_mode': 'normal'}]),
('980__ \$\$a(THESIS|Thesis)', [{'tags': '100__a',
'threshold': 0.8,
'compare_mode': 'strict',
'match_mode': 'author',
'result_mode': 'normal'},
{'tags': '700__a,701__a',
'threshold': 1.0,
'compare_mode': 'lazy',
'match_mode': 'author',
'result_mode': 'normal'},
{'tags': '100__a,700__a',
'threshold': 0.8,
'compare_mode': 'ignored',
'match_mode': 'author',
'result_mode': 'normal'}]),
('260__', [{'tags': '260__c',
'threshold': 0.8,
'compare_mode': 'lazy',
'match_mode': 'date',
'result_mode': 'normal'}]),
('0247_', [{'tags': '0247_a',
'threshold': 1.0,
'compare_mode': 'lazy',
'match_mode': 'identifier',
'result_mode': 'final'}]),
('020__', [{'tags': '020__a',
'threshold': 1.0,
'compare_mode': 'lazy',
'match_mode': 'identifier',
'result_mode': 'joker'}])
]
CFG_BIBMATCH_QUERY_TEMPLATES = {
'title': '[title]',
'title-author': '[title] [author]',
'reportnumber': 'reportnumber:[reportnumber]'
}
CFG_BIBMATCH_REMOTE_SLEEPTIME = 2.0
CFG_BIBMATCH_SEARCH_RESULT_MATCH_LIMIT = 15
CFG_BIBMATCH_MIN_VALIDATION_COMPARISONS = 2
CFG_BIBSCHED_EDITOR = which("vim")
CFG_BIBSCHED_GC_TASKS_OLDER_THAN = 30
CFG_BIBSCHED_GC_TASKS_TO_ARCHIVE = ['bibupload', ]
CFG_BIBSCHED_GC_TASKS_TO_REMOVE = [
'bibindex', 'bibreformat', 'webcoll', 'bibrank', 'inveniogc', ]
CFG_BIBSCHED_LOG_PAGER = which("less")
CFG_BIBSCHED_LOGDIR = join(_cfg_prefix, "var", "log", "bibsched")
CFG_BIBSCHED_MAX_ARCHIVED_ROWS_DISPLAY = 500
CFG_BIBSCHED_MAX_NUMBER_CONCURRENT_TASKS = 1
CFG_BIBSCHED_NODE_TASKS = {}
CFG_BIBSCHED_PROCESS_USER = ""
CFG_BIBSCHED_REFRESHTIME = 5
CFG_BIBSCHED_TASKLET_PACKAGES = [
'invenio.legacy.bibsched.tasklets',
]
CFG_BIBSCHED_NON_CONCURRENT_TASKS = []
CFG_BIBSCHED_FLUSH_LOGS = 0
CFG_BIBSCHED_INCOMPATIBLE_TASKS = ()
CFG_BIBSCHED_NEVER_STOPS = 0
CFG_BIBUPLOAD_CONFLICTING_REVISION_TICKET_QUEUE = ""
CFG_BIBUPLOAD_CONTROLLED_PROVENANCE_TAGS = ['6531_9', ]
CFG_BIBUPLOAD_DELETE_FORMATS = ['hb', 'recjson']
CFG_BIBUPLOAD_DISABLE_RECORD_REVISIONS = 0
CFG_BIBUPLOAD_EXTERNAL_OAIID_PROVENANCE_TAG = "035__9"
CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG = "035__a"
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG = "970__a"
CFG_BIBUPLOAD_FFT_ALLOWED_EXTERNAL_URLS = [
('http(s)?://.*', {}),
]
CFG_BIBUPLOAD_FFT_ALLOWED_LOCAL_PATHS = ['/tmp', '/home', '/Users']
CFG_BIBUPLOAD_REFERENCE_TAG = "999"
CFG_BIBUPLOAD_SERIALIZE_RECORD_STRUCTURE = 1
CFG_BIBUPLOAD_STRONG_TAGS = ['964', ]
CFG_BIBUPLOAD_INTERNAL_DOI_PATTERN = "[^\w\W]"
CFG_BIBUPLOAD_MATCH_DELETED_RECORDS = 1
CFG_BIBWORKFLOW_WORKER = "worker_celery"
CFG_BROKER_URL = "amqp://guest@localhost:5672//"
CFG_CELERY_RESULT_BACKEND = "amqp"
CFG_CERN_SITE = 0
CFG_ORGANIZATION_IDENTIFIER = ""
CFG_CROSSREF_EMAIL = ""
CFG_CROSSREF_PASSWORD = ""
CFG_CROSSREF_USERNAME = ""
CFG_DEVEL_SITE = 0
CFG_DEVEL_TEST_DATABASE_ENGINES = {}
CFG_DEVEL_TOOLS = []
CFG_EMAIL_BACKEND = "flask_email.backends.smtp.Mail"
CFG_ERRORLIB_RESET_EXCEPTION_NOTIFICATION_COUNTER_AFTER = 14400
CFG_FLASK_DISABLED_BLUEPRINTS = []
CFG_ICON_CREATION_FORMAT_MAPPINGS = {'*': ['jpg']}
CFG_INSPIRE_SITE = 0
CFG_INTBITSET_ENABLE_SANITY_CHECKS = False
CFG_JSTESTDRIVER_PORT = 9876
CFG_MATHJAX_HOSTING = "local"
CFG_MATHJAX_RENDERS_MATHML = True
CFG_MISCUTIL_DEFAULT_PROCESS_TIMEOUT = 300
CFG_MISCUTIL_SMTP_HOST = "localhost"
CFG_MISCUTIL_SMTP_PASS = ""
CFG_MISCUTIL_SMTP_PORT = 25
CFG_MISCUTIL_SMTP_TLS = False
CFG_MISCUTIL_SMTP_USER = ""
CFG_MISCUTIL_SQL_RUN_SQL_MANY_LIMIT = 10000
CFG_MISCUTIL_SQL_USE_SQLALCHEMY = False
CFG_OAI_DELETED_POLICY = "persistent"
CFG_OAI_EXPIRE = 90000
CFG_OAI_FAILED_HARVESTING_EMAILS_ADMIN = True
CFG_OAI_FAILED_HARVESTING_STOP_QUEUE = 1
CFG_OAI_FRIENDS = ['http://cds.cern.ch/oai2d',
'http://openaire.cern.ch/oai2d',
'http://export.arxiv.org/oai2',
]
CFG_OAI_ID_FIELD = "909COo"
CFG_OAI_ID_PREFIX = "atlantis.cern.ch"
CFG_OAI_IDENTIFY_DESCRIPTION = """<description>
<eprints xmlns="http://www.openarchives.org/OAI/1.1/eprints"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.openarchives.org/OAI/1.1/eprints
http://www.openarchives.org/OAI/1.1/eprints.xsd">
<content>
<URL>http://localhost</URL>
</content>
<metadataPolicy>
<text>Free and unlimited use by anybody with obligation to refer to original \
record</text>
</metadataPolicy>
<dataPolicy>
<text>Full content, i.e. preprints may not be harvested by robots</text>
</dataPolicy>
<submissionPolicy>
<text>Submission restricted. Submitted documents are subject of approval by \
OAI repository admins.</text>
</submissionPolicy>
</eprints>
</description>"""
CFG_OAI_LICENSE_FIELD = "540__"
CFG_OAI_LICENSE_PUBLISHER_SUBFIELD = "b"
CFG_OAI_LICENSE_TERMS_SUBFIELD = "a"
CFG_OAI_LICENSE_URI_SUBFIELD = "u"
CFG_OAI_LOAD = 500
CFG_OAI_METADATA_FORMATS = {
'oai_dc': ('XOAIDC', 'http://www.openarchives.org/OAI/1.1/dc.xsd',
'http://purl.org/dc/elements/1.1/'),
'marcxml': ('XOAIMARC',
'http://www.loc.gov/standards/marcxml/schema/MARC21slim.xsd',
'http://www.loc.gov/MARC21/slim'),
}
CFG_OAI_PREVIOUS_SET_FIELD = "909COq"
CFG_OAI_PROVENANCE_ALTERED_SUBFIELD = "t"
CFG_OAI_PROVENANCE_BASEURL_SUBFIELD = "u"
CFG_OAI_PROVENANCE_DATESTAMP_SUBFIELD = "d"
CFG_OAI_PROVENANCE_HARVESTDATE_SUBFIELD = "h"
CFG_OAI_PROVENANCE_METADATANAMESPACE_SUBFIELD = "m"
CFG_OAI_PROVENANCE_ORIGINDESCRIPTION_SUBFIELD = "d"
CFG_OAI_RIGHTS_CONTACT_SUBFIELD = "e"
CFG_OAI_RIGHTS_DATE_SUBFIELD = "g"
CFG_OAI_RIGHTS_FIELD = "542__"
CFG_OAI_RIGHTS_HOLDER_SUBFIELD = "d"
CFG_OAI_RIGHTS_STATEMENT_SUBFIELD = "f"
CFG_OAI_RIGHTS_URI_SUBFIELD = "u"
CFG_OAI_SAMPLE_IDENTIFIER = "oai:atlantis.cern.ch:123"
CFG_OAI_SET_FIELD = "909COp"
CFG_OAI_SLEEP = 2
CFG_OPENOFFICE_SERVER_HOST = "localhost"
CFG_OPENOFFICE_SERVER_PORT = 2002
CFG_OPENOFFICE_USER = "nobody"
CFG_PATH_ANY2DJVU = ""
CFG_PATH_CONVERT = which("convert")
CFG_PATH_DJVUPS = ""
CFG_PATH_DJVUTXT = ""
CFG_PATH_FFMPEG = ""
CFG_PATH_FFPROBE = ""
CFG_PATH_GFILE = which("file")
CFG_PATH_GIT = which("git")
CFG_PATH_GS = which("gs")
CFG_PATH_GUNZIP = which("gunzip")
CFG_PATH_GZIP = which("gzip")
CFG_PATH_MD5SUM = ""
CFG_PATH_MEDIAINFO = ""
CFG_PATH_MYSQL = which("mysql")
CFG_PATH_OCROSCRIPT = ""
CFG_PATH_OPENOFFICE_PYTHON = which("python")
CFG_PATH_PAMFILE = which("pdftoppm")
CFG_PATH_PDF2PS = which("pdf2ps")
CFG_PATH_PDFINFO = which("pdfinfo")
CFG_PATH_PDFLATEX = which("pdflatex")
CFG_PATH_PDFOPT = which("pdfopt") or which("cp")
CFG_PATH_PDFTK = which("pdftk")
CFG_PATH_PDFTOPPM = which("pdftoppm")
CFG_PATH_PDFTOPS = which("pdftops")
CFG_PATH_PDFTOTEXT = which("pdftotext")
CFG_PATH_PHP = which("php")
CFG_PATH_PS2PDF = which("ps2pdf")
CFG_PATH_PSTOASCII = which("ps2ascii")
CFG_PATH_PSTOTEXT = ""
CFG_PATH_SVN = which("svn")
CFG_PATH_TAR = which("tar")
CFG_PATH_TIFF2PDF = which("tiff2pdf")
CFG_PATH_WGET = which("wget")
CFG_PLOTEXTRACTOR_CONTEXT_EXTRACT_LIMIT = 750
CFG_PLOTEXTRACTOR_CONTEXT_SENTENCE_LIMIT = 2
CFG_PLOTEXTRACTOR_CONTEXT_WORD_LIMIT = 75
CFG_PLOTEXTRACTOR_DISALLOWED_TEX = [
'begin', 'end', 'section', 'includegraphics', 'caption',
'acknowledgements',
]
CFG_PLOTEXTRACTOR_DOWNLOAD_TIMEOUT = 2.0
CFG_PLOTEXTRACTOR_SOURCE_BASE_URL = "http://arxiv.org/"
CFG_PLOTEXTRACTOR_SOURCE_PDF_FOLDER = "pdf/"
CFG_PLOTEXTRACTOR_SOURCE_TARBALL_FOLDER = "e-print/"
CFG_REDIS_HOSTS = {'default': [{'db': 0, 'host': '127.0.0.1', 'port': 6379}]}
CFG_REFEXTRACT_KBS_OVERRIDE = {}
CFG_REFEXTRACT_TICKET_QUEUE = None
CFG_SCOAP3_SITE = 0
CFG_SITE_ADMIN_EMAIL = "[email protected]"
CFG_SITE_ADMIN_EMAIL_EXCEPTIONS = 1
CFG_SITE_EMERGENCY_EMAIL_ADDRESSES = {}
CFG_SITE_LANG = "en"
CFG_SITE_LANGS = ['af', 'ar', 'bg', 'ca', 'cs', 'de', 'el', 'en', 'es', 'fr',
'hr', 'gl', 'ka', 'it', 'rw', 'lt', 'hu', 'ja', 'no', 'pl',
'pt', 'ro', 'ru', 'sk', 'sv', 'uk', 'zh_CN', 'zh_TW', ]
CFG_SITE_NAME = "Atlantis Institute of Fictive Science"
CFG_SITE_RECORD = "record"
SECRET_KEY = "change_me"
CFG_SITE_SECURE_URL = "http://localhost:4000"
CFG_SITE_SUPPORT_EMAIL = "[email protected]"
CFG_SITE_URL = "http://localhost:4000"
CFG_VERSION = __version__
CFG_WEB_API_KEY_ALLOWED_URL = []
CFG_WEBALERT_ALERT_ENGINE_EMAIL = "[email protected]"
CFG_WEBALERT_MAX_NUM_OF_CHARS_PER_LINE_IN_ALERT_EMAIL = 72
CFG_WEBALERT_MAX_NUM_OF_RECORDS_IN_ALERT_EMAIL = 20
CFG_WEBALERT_SEND_EMAIL_NUMBER_OF_TRIES = 3
CFG_WEBALERT_SEND_EMAIL_SLEEPTIME_BETWEEN_TRIES = 300
CFG_WEBAUTHORPROFILE_CACHE_EXPIRED_DELAY_BIBSCHED = 5
CFG_WEBAUTHORPROFILE_CACHE_EXPIRED_DELAY_LIVE = 7
CFG_WEBAUTHORPROFILE_MAX_AFF_LIST = 100
CFG_WEBAUTHORPROFILE_MAX_COAUTHOR_LIST = 100
CFG_WEBAUTHORPROFILE_MAX_COLLAB_LIST = 100
CFG_WEBAUTHORPROFILE_MAX_HEP_CHOICES = 10
CFG_WEBAUTHORPROFILE_MAX_KEYWORD_LIST = 100
CFG_WEBAUTHORPROFILE_USE_BIBAUTHORID = False
CFG_WEBAUTHORPROFILE_ALLOWED_FIELDCODES = [
'Astrophysics', 'Accelerators', 'Computing', 'Experiment-HEP',
'Gravitation and Cosmology', 'Instrumentation', 'Lattice',
'Math and Math Physics', 'Theory-Nucl', 'Other', 'Phenomenology-HEP',
'General Physics', 'Theory-HEP', 'Experiment-Nucl'
]
CFG_WEBAUTHORPROFILE_CFG_HEPNAMES_EMAIL = "[email protected]"
CFG_WEBAUTHORPROFILE_MAX_FIELDCODE_LIST = 100
CFG_WEBAUTHORPROFILE_ORCID_ENDPOINT_PUBLIC = "http://pub.orcid.org/"
CFG_WEBAUTHORPROFILE_ORCID_ENDPOINT_MEMBER = "http://api.orcid.org/"
CFG_WEBAUTHORPROFILE_USE_ALLOWED_FIELDCODES = True
CFG_WEBDEPOSIT_UPLOAD_FOLDER = "var/tmp/webdeposit_uploads"
CFG_WEBMESSAGE_DAYS_BEFORE_DELETE_ORPHANS = 60
CFG_WEBMESSAGE_MAX_NB_OF_MESSAGES = 30
CFG_WEBMESSAGE_MAX_SIZE_OF_MESSAGE = 20000
CFG_WEBSEARCH_ADVANCEDSEARCH_PATTERN_BOX_WIDTH = 30
CFG_WEBSEARCH_AUTHOR_ET_AL_THRESHOLD = 3
CFG_WEBSEARCH_CALL_BIBFORMAT = 0
CFG_WEBSEARCH_CITESUMMARY_SELFCITES_THRESHOLD = 2000
CFG_WEBSEARCH_CITESUMMARY_SCAN_THRESHOLD = 20000
CFG_WEBSEARCH_CREATE_SIMILARLY_NAMED_AUTHORS_LINK_BOX = 1
CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS = 10
CFG_WEBSEARCH_DETAILED_META_FORMAT = "hdm"
CFG_WEBSEARCH_DISPLAY_NEAREST_TERMS = 1
CFG_WEBSEARCH_ENABLE_GOOGLESCHOLAR = True
CFG_WEBSEARCH_ENABLE_OPENGRAPH = False
CFG_WEBSEARCH_EXTERNAL_COLLECTION_SEARCH_MAXRESULTS = 10
CFG_WEBSEARCH_EXTERNAL_COLLECTION_SEARCH_TIMEOUT = 5
CFG_WEBSEARCH_FIELDS_CONVERT = {}
CFG_WEBSEARCH_FULLTEXT_SNIPPETS = {
'': 4,
}
CFG_WEBSEARCH_FULLTEXT_SNIPPETS_CHARS = {
'': 100,
}
CFG_WEBSEARCH_FULLTEXT_SNIPPETS_GENERATOR = "native"
CFG_WEBSEARCH_I18N_LATEST_ADDITIONS = 0
CFG_WEBSEARCH_INSTANT_BROWSE = 10
CFG_WEBSEARCH_INSTANT_BROWSE_RSS = 25
CFG_WEBSEARCH_LIGHTSEARCH_PATTERN_BOX_WIDTH = 60
CFG_WEBSEARCH_MAX_RECORDS_IN_GROUPS = 200
CFG_WEBSEARCH_NARROW_SEARCH_SHOW_GRANDSONS = 1
CFG_WEBSEARCH_NB_RECORDS_TO_SORT = 1000
CFG_WEBSEARCH_PREV_NEXT_HIT_FOR_GUESTS = 1
CFG_WEBSEARCH_PREV_NEXT_HIT_LIMIT = 1000
CFG_WEBSEARCH_RSS_I18N_COLLECTIONS = []
CFG_WEBSEARCH_RSS_MAX_CACHED_REQUESTS = 1000
CFG_WEBSEARCH_RSS_TTL = 360
CFG_WEBSEARCH_SEARCH_CACHE_SIZE = 1
CFG_WEBSEARCH_SEARCH_CACHE_TIMEOUT = 600
CFG_WEBSEARCH_SHOW_COMMENT_COUNT = 1
CFG_WEBSEARCH_SHOW_REVIEW_COUNT = 1
CFG_WEBSEARCH_SIMPLESEARCH_PATTERN_BOX_WIDTH = 40
CFG_WEBSEARCH_SPIRES_SYNTAX = 1
CFG_WEBSEARCH_SPLIT_BY_COLLECTION = 1
CFG_WEBSEARCH_SYNONYM_KBRS = {
'journal': ['SEARCH-SYNONYM-JOURNAL', 'leading_to_number'],
}
CFG_WEBSEARCH_USE_ALEPH_SYSNOS = 0
CFG_WEBSEARCH_USE_MATHJAX_FOR_FORMATS = []
CFG_WEBSEARCH_VIEWRESTRCOLL_POLICY = "ANY"
CFG_WEBSEARCH_WILDCARD_LIMIT = 50000
CFG_WEBSESSION_ADDRESS_ACTIVATION_EXPIRE_IN_DAYS = 3
CFG_WEBSESSION_EXPIRY_LIMIT_DEFAULT = 2
CFG_WEBSESSION_EXPIRY_LIMIT_REMEMBER = 365
CFG_WEBSESSION_IPADDR_CHECK_SKIP_BITS = 0
CFG_WEBSESSION_NOT_CONFIRMED_EMAIL_ADDRESS_EXPIRE_IN_DAYS = 10
CFG_WEBSESSION_RESET_PASSWORD_EXPIRE_IN_DAYS = 3
CFG_WEBSESSION_STORAGE = "redis"
CFG_WEBSTAT_BIBCIRCULATION_START_YEAR = ""
CFG_WEBSTYLE_CDSPAGEBOXLEFTBOTTOM = ""
CFG_WEBSTYLE_CDSPAGEBOXLEFTTOP = ""
CFG_WEBSTYLE_CDSPAGEBOXRIGHTBOTTOM = ""
CFG_WEBSTYLE_CDSPAGEBOXRIGHTTOP = ""
CFG_WEBSTYLE_EMAIL_ADDRESSES_OBFUSCATION_MODE = 2
CFG_WEBSTYLE_HTTP_STATUS_ALERT_LIST = ['404r', '400', '5*', '41*', ]
CFG_WEBSTYLE_HTTP_USE_COMPRESSION = 0
CFG_WEBSTYLE_REVERSE_PROXY_IPS = []
CFG_WEBSTYLE_TEMPLATE_SKIN = "default"
CFG_WEBSEARCH_DEFAULT_SEARCH_INTERFACE = 0
CFG_WEBSEARCH_ENABLED_SEARCH_INTERFACES = [0, 1, 2]
# END OF GENERATED FILE
| gpl-2.0 |
cokrzys/mindh2jsondh | source/worker.py | 1 | 7160 | """
worker.py
Worker class to manage an export for the mindh2jsondh application.
For more information see: https://github.com/cokrzys/mindh2jsondh
Copyright (C) 2015 cokrzys
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NON-INFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import os
import sys
import psycopg2
import dbhelper as db
import analyte
import jsonfile
import jsondh
import point3d
import mindhcollar
class Worker:
something = 1
con = None
json = None
selection = None
verbose = False
json_file = None
def get_box_sql(self, func):
#------------------------------------------------------------------------------
sql = """
SELECT
{func}(ST_X(ST_Transform(geom, {crs}))),
{func}(ST_Y(ST_Transform(geom, {crs}))),
{func}(ST_Z(ST_Transform(geom, {crs})))
FROM dh.collar WHERE rowid IN ({select})
""".format(func=func, crs=self.json.crs, select=self.selection)
return sql
def get_lat_long_sql(self, point):
#------------------------------------------------------------------------------
sql = """
SELECT
ST_X(ST_Transform(ST_SetSRID({pt}, {crs}), 4326)),
ST_Y(ST_Transform(ST_SetSRID({pt}, {crs}), 4326)),
ST_Z(ST_Transform(ST_SetSRID({pt}, {crs}), 4326))
""".format(pt=point, crs=self.json.crs)
return sql
def read_project_box(self):
#------------------------------------------------------------------------------
sql = self.get_box_sql('MIN')
self.json.boxMin = db.get_point3d(self.con, sql)
sql = self.get_box_sql('MAX')
self.json.boxMax = db.get_point3d(self.con, sql)
sql = """
SELECT MAX(max_depth_meters)
FROM dh.collar WHERE rowid IN ({select})
""".format(select=self.selection)
max_depth = db.get_scalar_float(self.con, sql, 0.0)
#
# ----- expand box so it's past the data extents
#
self.json.expand_box(max_depth)
#
# ----- build box min/max in lat/long coordinates
#
sql = self.get_lat_long_sql(self.json.boxMin.get_as_makepoint(self.json.coordinate_decimals))
self.json.latLongMin = db.get_point3d(self.con, sql)
sql = self.get_lat_long_sql(self.json.boxMax.get_as_makepoint(self.json.coordinate_decimals))
self.json.latLongMax = db.get_point3d(self.con, sql)
if self.verbose:
print "Maximum hole depth = %r" % max_depth
def start_main_loop(self):
#------------------------------------------------------------------------------
sql = """
SELECT rowid, name, max_depth_meters,
ST_X(ST_Transform(geom, {crs})),
ST_Y(ST_Transform(geom, {crs})),
ST_Z(ST_Transform(geom, {crs})),
ST_X(geom),
ST_Y(geom),
ST_Z(geom)
FROM dh.collar WHERE rowid IN ({select})
""".format(crs=self.json.crs, select=self.selection)
#
# ----- open cursor and loop through holes to export
#
try:
cur = self.con.cursor()
cur.execute(sql)
while True:
row = cur.fetchone()
if row == None:
break
if self.verbose:
print "Processing hole: %r" % row[1]
col = mindhcollar.Mindhcollar()
col.rowid = int(row[0])
col.name = row[1]
col.depth = row[2]
col.location.x = float(row[3])
col.location.y = float(row[4])
col.location.z = float(row[5])
col.lat_long.x = float(row[6])
col.lat_long.y = float(row[7])
col.lat_long.z = float(row[8])
if self.json.zero_origin:
col.location.subtract(self.json.shift)
col.read_downhole_surveys(self.con)
col.add_dummy_surveys()
col.check_surveys()
col.desurvey_midpoint_split()
col.read_assays(self.con, self.json.analytes_list)
col.desurvey_assays()
self.json.write_hole(self.json_file, col)
except psycopg2.DatabaseError, e:
print 'ERROR: %s' % e
def start_export(self, args, connection, num_holes):
#------------------------------------------------------------------------------
"""
Start the data export.
"""
if args.verbose:
print "Starting export."
self.con = connection
self.selection = args.selection
self.verbose = args.verbose
self.json = jsondh.Jsondh()
self.json.name = args.name
self.json.description = args.description
self.json.crs = args.crs
self.json.coordinate_decimals = args.coordinate_decimals
self.json.zero_origin = args.zero_origin
self.json.desurvey_method = args.desurvey_method
self.json.num_holes_expected = num_holes
self.json.trace_color = args.trace_color
#
# ----- analytes to export
#
self.json.build_analytes_list(args.analytes, args.analyte_descriptions, args.analyte_colors)
if self.verbose: self.json.show_analytes()
#
# ----- create output file and move on
#
self.json_file = jsonfile.Jsonfile()
self.json_file.minify = args.minify
self.json_file.open(args.output_file)
#
# ----- project box and write header
#
self.read_project_box()
self.json.setup_shift()
self.json.write_header(self.json_file)
#
# ----- start the main loop to read and export data
#
self.json.start_holes_section(self.json_file)
self.start_main_loop()
self.json.end_holes_section(self.json_file)
self.json.write_footer(self.json_file)
self.json_file.close()
| mit |
ojii/sandlib | lib/lib-python/2.7/plat-mac/lib-scriptpackages/StdSuites/Standard_Suite.py | 73 | 25045 | """Suite Standard Suite: Common terms for most applications
Level 1, version 1
Generated from /Volumes/Sap/System Folder/Extensions/AppleScript
AETE/AEUT resource version 1/0, language 0, script 0
"""
import aetools
import MacOS
_code = 'core'
from _builtinSuites.builtin_Suite import *
class Standard_Suite_Events(builtin_Suite_Events):
_argmap_class_info = {
'in_' : 'wrcd',
}
def class_info(self, _object=None, _attributes={}, **_arguments):
"""class info: (optional) Get information about an object class
Required argument: the object class about which information is requested
Keyword argument in_: the human language and script system in which to return information
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: a record containing the object\xd5s properties and elements
"""
_code = 'core'
_subcode = 'qobj'
aetools.keysubst(_arguments, self._argmap_class_info)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_close = {
'saving' : 'savo',
'saving_in' : 'kfil',
}
def close(self, _object, _attributes={}, **_arguments):
"""close: Close an object
Required argument: the object to close
Keyword argument saving: specifies whether changes should be saved before closing
Keyword argument saving_in: the file or alias in which to save the object
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'core'
_subcode = 'clos'
aetools.keysubst(_arguments, self._argmap_close)
_arguments['----'] = _object
aetools.enumsubst(_arguments, 'savo', _Enum_savo)
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_count = {
'each' : 'kocl',
}
def count(self, _object, _attributes={}, **_arguments):
"""count: Return the number of elements of an object
Required argument: the object whose elements are to be counted
Keyword argument each: if specified, restricts counting to objects of this class
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: the number of elements
"""
_code = 'core'
_subcode = 'cnte'
aetools.keysubst(_arguments, self._argmap_count)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_data_size = {
'as' : 'rtyp',
}
def data_size(self, _object, _attributes={}, **_arguments):
"""data size: (optional) Return the size in bytes of an object
Required argument: the object whose data size is to be returned
Keyword argument as: the data type for which the size is calculated
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: the size of the object in bytes
"""
_code = 'core'
_subcode = 'dsiz'
aetools.keysubst(_arguments, self._argmap_data_size)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def delete(self, _object, _attributes={}, **_arguments):
"""delete: Delete an object from its container. Note this does not work on script variables, only on elements of application classes.
Required argument: the element to delete
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'core'
_subcode = 'delo'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_duplicate = {
'to' : 'insh',
'with_properties' : 'prdt',
}
def duplicate(self, _object, _attributes={}, **_arguments):
"""duplicate: Duplicate one or more objects
Required argument: the object(s) to duplicate
Keyword argument to: the new location for the object(s)
Keyword argument with_properties: the initial values for properties of the new object that are to be different from the original
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: to the duplicated object(s)
"""
_code = 'core'
_subcode = 'clon'
aetools.keysubst(_arguments, self._argmap_duplicate)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_event_info = {
'in_' : 'wrcd',
}
def event_info(self, _object, _attributes={}, **_arguments):
"""event info: (optional) Get information about the Apple events in a suite
Required argument: the event class of the Apple events for which to return information
Keyword argument in_: the human language and script system in which to return information
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: a record containing the events and their parameters
"""
_code = 'core'
_subcode = 'gtei'
aetools.keysubst(_arguments, self._argmap_event_info)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def exists(self, _object, _attributes={}, **_arguments):
"""exists: Verify if an object exists
Required argument: the object in question
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: true if it exists, false if not
"""
_code = 'core'
_subcode = 'doex'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def handleBreakpoint(self, _object, _attributes={}, **_arguments):
"""handleBreakpoint: return true to stop at a breakpoint
Required argument: the call frame of the breakpoint
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: true to stop, false if not
"""
_code = 'core'
_subcode = 'brak'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_make = {
'new' : 'kocl',
'at' : 'insh',
'with_data' : 'data',
'with_properties' : 'prdt',
}
def make(self, _no_object=None, _attributes={}, **_arguments):
"""make: Make a new element
Keyword argument new: the class of the new element
Keyword argument at: the location at which to insert the element
Keyword argument with_data: the initial data for the element
Keyword argument with_properties: the initial values for the properties of the element
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: to the new object(s)
"""
_code = 'core'
_subcode = 'crel'
aetools.keysubst(_arguments, self._argmap_make)
if _no_object is not None: raise TypeError, 'No direct arg expected'
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_move = {
'to' : 'insh',
}
def move(self, _object, _attributes={}, **_arguments):
"""move: Move object(s) to a new location
Required argument: the object(s) to move
Keyword argument to: the new location for the object(s)
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: to the object(s) after they have been moved
"""
_code = 'core'
_subcode = 'move'
aetools.keysubst(_arguments, self._argmap_move)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def open(self, _object, _attributes={}, **_arguments):
"""open: Open the specified object(s)
Required argument: list of objects to open
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'aevt'
_subcode = 'odoc'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def print_(self, _object, _attributes={}, **_arguments):
"""print: Print the specified object(s)
Required argument: list of objects to print
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'aevt'
_subcode = 'pdoc'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_quit = {
'saving' : 'savo',
}
def quit(self, _no_object=None, _attributes={}, **_arguments):
"""quit: Quit an application
Keyword argument saving: specifies whether to save currently open documents
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'aevt'
_subcode = 'quit'
aetools.keysubst(_arguments, self._argmap_quit)
if _no_object is not None: raise TypeError, 'No direct arg expected'
aetools.enumsubst(_arguments, 'savo', _Enum_savo)
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def reopen(self, _no_object=None, _attributes={}, **_arguments):
"""reopen: Reactivate a running application. Some applications will open a new untitled window if no window is open.
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'aevt'
_subcode = 'rapp'
if _arguments: raise TypeError, 'No optional args expected'
if _no_object is not None: raise TypeError, 'No direct arg expected'
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def run(self, _no_object=None, _attributes={}, **_arguments):
"""run: Run an application. Most applications will open an empty, untitled window.
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'aevt'
_subcode = 'oapp'
if _arguments: raise TypeError, 'No optional args expected'
if _no_object is not None: raise TypeError, 'No direct arg expected'
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_save = {
'in_' : 'kfil',
'as' : 'fltp',
}
def save(self, _object, _attributes={}, **_arguments):
"""save: Save an object
Required argument: the object to save, usually a document or window
Keyword argument in_: the file or alias in which to save the object
Keyword argument as: the file type of the document in which to save the data
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'core'
_subcode = 'save'
aetools.keysubst(_arguments, self._argmap_save)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def select(self, _object, _attributes={}, **_arguments):
"""select: Make a selection
Required argument: the object to select
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'misc'
_subcode = 'slct'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_suite_info = {
'in_' : 'wrcd',
}
def suite_info(self, _object, _attributes={}, **_arguments):
"""suite info: (optional) Get information about event suite(s)
Required argument: the suite for which to return information
Keyword argument in_: the human language and script system in which to return information
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: a record containing the suites and their versions
"""
_code = 'core'
_subcode = 'gtsi'
aetools.keysubst(_arguments, self._argmap_suite_info)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
class alias(aetools.ComponentItem):
"""alias - a file on a disk or server. The file must exist when you check the syntax of your script. """
want = 'alis'
class _Prop_POSIX_path(aetools.NProperty):
"""POSIX path - the POSIX path of the file """
which = 'psxp'
want = 'TEXT'
aliases = alias
class application(aetools.ComponentItem):
"""application - An application program """
want = 'capp'
class _Prop_clipboard(aetools.NProperty):
"""clipboard - the contents of the clipboard for this application """
which = 'pcli'
want = '****'
clipboard = _Prop_clipboard()
class _Prop_frontmost(aetools.NProperty):
"""frontmost - Is this the frontmost application? """
which = 'pisf'
want = 'bool'
frontmost = _Prop_frontmost()
class _Prop_name(aetools.NProperty):
"""name - the name of the application """
which = 'pnam'
want = 'itxt'
name = _Prop_name()
class _Prop_selection(aetools.NProperty):
"""selection - the selection visible to the user. Use the \xd4select\xd5 command to set a new selection; use \xd4contents of selection\xd5 to get or change information in the document. """
which = 'sele'
want = 'csel'
selection = _Prop_selection()
class _Prop_version(aetools.NProperty):
"""version - the version of the application """
which = 'vers'
want = 'vers'
version = _Prop_version()
applications = application
class insertion_points(aetools.ComponentItem):
"""insertion points - """
want = 'cins'
insertion_point = insertion_points
class selection_2d_object(aetools.ComponentItem):
"""selection-object - A way to refer to the state of the current of the selection. Use the \xd4select\xd5 command to make a new selection. """
want = 'csel'
class _Prop_contents(aetools.NProperty):
"""contents - the information currently selected. Use \xd4contents of selection\xd5 to get or change information in a document. """
which = 'pcnt'
want = '****'
class window(aetools.ComponentItem):
"""window - A window """
want = 'cwin'
class _Prop_bounds(aetools.NProperty):
"""bounds - the boundary rectangle for the window """
which = 'pbnd'
want = 'qdrt'
class _Prop_closeable(aetools.NProperty):
"""closeable - Does the window have a close box? """
which = 'hclb'
want = 'bool'
class _Prop_floating(aetools.NProperty):
"""floating - Does the window float? """
which = 'isfl'
want = 'bool'
class _Prop_index(aetools.NProperty):
"""index - the number of the window """
which = 'pidx'
want = 'long'
class _Prop_modal(aetools.NProperty):
"""modal - Is the window modal? """
which = 'pmod'
want = 'bool'
class _Prop_resizable(aetools.NProperty):
"""resizable - Is the window resizable? """
which = 'prsz'
want = 'bool'
class _Prop_titled(aetools.NProperty):
"""titled - Does the window have a title bar? """
which = 'ptit'
want = 'bool'
class _Prop_visible(aetools.NProperty):
"""visible - Is the window visible? """
which = 'pvis'
want = 'bool'
class _Prop_zoomable(aetools.NProperty):
"""zoomable - Is the window zoomable? """
which = 'iszm'
want = 'bool'
class _Prop_zoomed(aetools.NProperty):
"""zoomed - Is the window zoomed? """
which = 'pzum'
want = 'bool'
windows = window
class document(aetools.ComponentItem):
"""document - A document of a scriptable application """
want = 'docu'
class _Prop_modified(aetools.NProperty):
"""modified - Has the document been modified since the last save? """
which = 'imod'
want = 'bool'
documents = document
class file(aetools.ComponentItem):
"""file - a file on a disk or server """
want = 'file'
files = file
alias._superclassnames = []
alias._privpropdict = {
'POSIX_path' : _Prop_POSIX_path,
}
alias._privelemdict = {
}
application._superclassnames = []
application._privpropdict = {
'clipboard' : _Prop_clipboard,
'frontmost' : _Prop_frontmost,
'name' : _Prop_name,
'selection' : _Prop_selection,
'version' : _Prop_version,
}
application._privelemdict = {
}
insertion_points._superclassnames = []
insertion_points._privpropdict = {
}
insertion_points._privelemdict = {
}
selection_2d_object._superclassnames = []
selection_2d_object._privpropdict = {
'contents' : _Prop_contents,
}
selection_2d_object._privelemdict = {
}
window._superclassnames = []
window._privpropdict = {
'bounds' : _Prop_bounds,
'closeable' : _Prop_closeable,
'floating' : _Prop_floating,
'index' : _Prop_index,
'modal' : _Prop_modal,
'resizable' : _Prop_resizable,
'titled' : _Prop_titled,
'visible' : _Prop_visible,
'zoomable' : _Prop_zoomable,
'zoomed' : _Prop_zoomed,
}
window._privelemdict = {
}
document._superclassnames = []
document._privpropdict = {
'modified' : _Prop_modified,
}
document._privelemdict = {
}
file._superclassnames = []
file._privpropdict = {
'POSIX_path' : _Prop_POSIX_path,
}
file._privelemdict = {
}
class _3c_(aetools.NComparison):
"""< - Less than """
class _3d_(aetools.NComparison):
"""= - Equal """
class _3e_(aetools.NComparison):
"""> - Greater than """
class contains(aetools.NComparison):
"""contains - Contains """
class ends_with(aetools.NComparison):
"""ends with - Ends with """
class starts_with(aetools.NComparison):
"""starts with - Starts with """
class _b2_(aetools.NComparison):
"""\xb2 - Less than or equal to """
class _b3_(aetools.NComparison):
"""\xb3 - Greater than or equal to """
_Enum_kfrm = {
'index' : 'indx', # keyform designating indexed access
'named' : 'name', # keyform designating named access
'id' : 'ID ', # keyform designating access by unique identifier
}
_Enum_savo = {
'yes' : 'yes ', # Save objects now
'no' : 'no ', # Do not save objects
'ask' : 'ask ', # Ask the user whether to save
}
_Enum_styl = {
'plain' : 'plan', # Plain
'bold' : 'bold', # Bold
'italic' : 'ital', # Italic
'outline' : 'outl', # Outline
'shadow' : 'shad', # Shadow
'underline' : 'undl', # Underline
'superscript' : 'spsc', # Superscript
'subscript' : 'sbsc', # Subscript
'strikethrough' : 'strk', # Strikethrough
'small_caps' : 'smcp', # Small caps
'all_caps' : 'alcp', # All capital letters
'all_lowercase' : 'lowc', # Lowercase
'condensed' : 'cond', # Condensed
'expanded' : 'pexp', # Expanded
'hidden' : 'hidn', # Hidden
}
#
# Indices of types declared in this module
#
_classdeclarations = {
'alis' : alias,
'capp' : application,
'cins' : insertion_points,
'csel' : selection_2d_object,
'cwin' : window,
'docu' : document,
'file' : file,
}
_propdeclarations = {
'hclb' : _Prop_closeable,
'imod' : _Prop_modified,
'isfl' : _Prop_floating,
'iszm' : _Prop_zoomable,
'pbnd' : _Prop_bounds,
'pcli' : _Prop_clipboard,
'pcnt' : _Prop_contents,
'pidx' : _Prop_index,
'pisf' : _Prop_frontmost,
'pmod' : _Prop_modal,
'pnam' : _Prop_name,
'prsz' : _Prop_resizable,
'psxp' : _Prop_POSIX_path,
'ptit' : _Prop_titled,
'pvis' : _Prop_visible,
'pzum' : _Prop_zoomed,
'sele' : _Prop_selection,
'vers' : _Prop_version,
}
_compdeclarations = {
'< ' : _3c_,
'<= ' : _b2_,
'= ' : _3d_,
'> ' : _3e_,
'>= ' : _b3_,
'bgwt' : starts_with,
'cont' : contains,
'ends' : ends_with,
}
_enumdeclarations = {
'kfrm' : _Enum_kfrm,
'savo' : _Enum_savo,
'styl' : _Enum_styl,
}
| bsd-3-clause |
Zlash65/erpnext | erpnext/education/doctype/course/test_course.py | 9 | 1595 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies and Contributors
# See license.txt
from __future__ import unicode_literals
from erpnext.education.doctype.topic.test_topic import make_topic
from erpnext.education.doctype.topic.test_topic import make_topic_and_linked_content
import frappe
import unittest
# test_records = frappe.get_test_records('Course')
class TestCourse(unittest.TestCase):
def setUp(self):
make_topic_and_linked_content("_Test Topic 1", [{"type":"Article", "name": "_Test Article 1"}])
make_topic_and_linked_content("_Test Topic 2", [{"type":"Article", "name": "_Test Article 2"}])
make_course_and_linked_topic("_Test Course 1", ["_Test Topic 1", "_Test Topic 2"])
def test_get_topics(self):
course = frappe.get_doc("Course", "_Test Course 1")
topics = course.get_topics()
self.assertEqual(topics[0].name, "_Test Topic 1")
self.assertEqual(topics[1].name, "_Test Topic 2")
frappe.db.rollback()
def make_course(name):
try:
course = frappe.get_doc("Course", name)
except frappe.DoesNotExistError:
course = frappe.get_doc({
"doctype": "Course",
"course_name": name,
"course_code": name
}).insert()
return course.name
def make_course_and_linked_topic(course_name, topic_name_list):
try:
course = frappe.get_doc("Course", course_name)
except frappe.DoesNotExistError:
make_course(course_name)
course = frappe.get_doc("Course", course_name)
topic_list = [make_topic(topic_name) for topic_name in topic_name_list]
for topic in topic_list:
course.append("topics", {"topic": topic})
course.save()
return course
| gpl-3.0 |
stdweird/aquilon | lib/python2.6/aquilon/worker/commands/search_hardware.py | 2 | 1816 | # -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the logic for `aq search hardware`."""
from sqlalchemy.orm import subqueryload, joinedload
from aquilon.worker.broker import BrokerCommand # pylint: disable=W0611
from aquilon.worker.formats.hardware_entity import SimpleHardwareEntityList
from aquilon.aqdb.model import HardwareEntity
from aquilon.worker.dbwrappers.hardware_entity import (
search_hardware_entity_query)
class CommandSearchHardware(BrokerCommand):
required_parameters = []
def render(self, session, fullinfo, style, **arguments):
if fullinfo or style != "raw":
q = search_hardware_entity_query(session, HardwareEntity, **arguments)
else:
q = search_hardware_entity_query(session, HardwareEntity.label, **arguments)
if fullinfo:
q = q.options(joinedload('location'),
subqueryload('interfaces'),
joinedload('interfaces.assignments'),
joinedload('interfaces.assignments.dns_records'))
return q.all()
else:
return SimpleHardwareEntityList(q.all())
| apache-2.0 |
jboecker/dcs-witchcraft | windows/nodejs/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/MSVSUtil.py | 566 | 9386 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions shared amongst the Windows generators."""
import copy
import os
_TARGET_TYPE_EXT = {
'executable': '.exe',
'loadable_module': '.dll',
'shared_library': '.dll',
}
def _GetLargePdbShimCcPath():
"""Returns the path of the large_pdb_shim.cc file."""
this_dir = os.path.abspath(os.path.dirname(__file__))
src_dir = os.path.abspath(os.path.join(this_dir, '..', '..'))
win_data_dir = os.path.join(src_dir, 'data', 'win')
large_pdb_shim_cc = os.path.join(win_data_dir, 'large-pdb-shim.cc')
return large_pdb_shim_cc
def _DeepCopySomeKeys(in_dict, keys):
"""Performs a partial deep-copy on |in_dict|, only copying the keys in |keys|.
Arguments:
in_dict: The dictionary to copy.
keys: The keys to be copied. If a key is in this list and doesn't exist in
|in_dict| this is not an error.
Returns:
The partially deep-copied dictionary.
"""
d = {}
for key in keys:
if key not in in_dict:
continue
d[key] = copy.deepcopy(in_dict[key])
return d
def _SuffixName(name, suffix):
"""Add a suffix to the end of a target.
Arguments:
name: name of the target (foo#target)
suffix: the suffix to be added
Returns:
Target name with suffix added (foo_suffix#target)
"""
parts = name.rsplit('#', 1)
parts[0] = '%s_%s' % (parts[0], suffix)
return '#'.join(parts)
def _ShardName(name, number):
"""Add a shard number to the end of a target.
Arguments:
name: name of the target (foo#target)
number: shard number
Returns:
Target name with shard added (foo_1#target)
"""
return _SuffixName(name, str(number))
def ShardTargets(target_list, target_dicts):
"""Shard some targets apart to work around the linkers limits.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
Returns:
Tuple of the new sharded versions of the inputs.
"""
# Gather the targets to shard, and how many pieces.
targets_to_shard = {}
for t in target_dicts:
shards = int(target_dicts[t].get('msvs_shard', 0))
if shards:
targets_to_shard[t] = shards
# Shard target_list.
new_target_list = []
for t in target_list:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
new_target_list.append(_ShardName(t, i))
else:
new_target_list.append(t)
# Shard target_dict.
new_target_dicts = {}
for t in target_dicts:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
name = _ShardName(t, i)
new_target_dicts[name] = copy.copy(target_dicts[t])
new_target_dicts[name]['target_name'] = _ShardName(
new_target_dicts[name]['target_name'], i)
sources = new_target_dicts[name].get('sources', [])
new_sources = []
for pos in range(i, len(sources), targets_to_shard[t]):
new_sources.append(sources[pos])
new_target_dicts[name]['sources'] = new_sources
else:
new_target_dicts[t] = target_dicts[t]
# Shard dependencies.
for t in new_target_dicts:
dependencies = copy.copy(new_target_dicts[t].get('dependencies', []))
new_dependencies = []
for d in dependencies:
if d in targets_to_shard:
for i in range(targets_to_shard[d]):
new_dependencies.append(_ShardName(d, i))
else:
new_dependencies.append(d)
new_target_dicts[t]['dependencies'] = new_dependencies
return (new_target_list, new_target_dicts)
def _GetPdbPath(target_dict, config_name, vars):
"""Returns the path to the PDB file that will be generated by a given
configuration.
The lookup proceeds as follows:
- Look for an explicit path in the VCLinkerTool configuration block.
- Look for an 'msvs_large_pdb_path' variable.
- Use '<(PRODUCT_DIR)/<(product_name).(exe|dll).pdb' if 'product_name' is
specified.
- Use '<(PRODUCT_DIR)/<(target_name).(exe|dll).pdb'.
Arguments:
target_dict: The target dictionary to be searched.
config_name: The name of the configuration of interest.
vars: A dictionary of common GYP variables with generator-specific values.
Returns:
The path of the corresponding PDB file.
"""
config = target_dict['configurations'][config_name]
msvs = config.setdefault('msvs_settings', {})
linker = msvs.get('VCLinkerTool', {})
pdb_path = linker.get('ProgramDatabaseFile')
if pdb_path:
return pdb_path
variables = target_dict.get('variables', {})
pdb_path = variables.get('msvs_large_pdb_path', None)
if pdb_path:
return pdb_path
pdb_base = target_dict.get('product_name', target_dict['target_name'])
pdb_base = '%s%s.pdb' % (pdb_base, _TARGET_TYPE_EXT[target_dict['type']])
pdb_path = vars['PRODUCT_DIR'] + '/' + pdb_base
return pdb_path
def InsertLargePdbShims(target_list, target_dicts, vars):
"""Insert a shim target that forces the linker to use 4KB pagesize PDBs.
This is a workaround for targets with PDBs greater than 1GB in size, the
limit for the 1KB pagesize PDBs created by the linker by default.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
vars: A dictionary of common GYP variables with generator-specific values.
Returns:
Tuple of the shimmed version of the inputs.
"""
# Determine which targets need shimming.
targets_to_shim = []
for t in target_dicts:
target_dict = target_dicts[t]
# We only want to shim targets that have msvs_large_pdb enabled.
if not int(target_dict.get('msvs_large_pdb', 0)):
continue
# This is intended for executable, shared_library and loadable_module
# targets where every configuration is set up to produce a PDB output.
# If any of these conditions is not true then the shim logic will fail
# below.
targets_to_shim.append(t)
large_pdb_shim_cc = _GetLargePdbShimCcPath()
for t in targets_to_shim:
target_dict = target_dicts[t]
target_name = target_dict.get('target_name')
base_dict = _DeepCopySomeKeys(target_dict,
['configurations', 'default_configuration', 'toolset'])
# This is the dict for copying the source file (part of the GYP tree)
# to the intermediate directory of the project. This is necessary because
# we can't always build a relative path to the shim source file (on Windows
# GYP and the project may be on different drives), and Ninja hates absolute
# paths (it ends up generating the .obj and .obj.d alongside the source
# file, polluting GYPs tree).
copy_suffix = 'large_pdb_copy'
copy_target_name = target_name + '_' + copy_suffix
full_copy_target_name = _SuffixName(t, copy_suffix)
shim_cc_basename = os.path.basename(large_pdb_shim_cc)
shim_cc_dir = vars['SHARED_INTERMEDIATE_DIR'] + '/' + copy_target_name
shim_cc_path = shim_cc_dir + '/' + shim_cc_basename
copy_dict = copy.deepcopy(base_dict)
copy_dict['target_name'] = copy_target_name
copy_dict['type'] = 'none'
copy_dict['sources'] = [ large_pdb_shim_cc ]
copy_dict['copies'] = [{
'destination': shim_cc_dir,
'files': [ large_pdb_shim_cc ]
}]
# This is the dict for the PDB generating shim target. It depends on the
# copy target.
shim_suffix = 'large_pdb_shim'
shim_target_name = target_name + '_' + shim_suffix
full_shim_target_name = _SuffixName(t, shim_suffix)
shim_dict = copy.deepcopy(base_dict)
shim_dict['target_name'] = shim_target_name
shim_dict['type'] = 'static_library'
shim_dict['sources'] = [ shim_cc_path ]
shim_dict['dependencies'] = [ full_copy_target_name ]
# Set up the shim to output its PDB to the same location as the final linker
# target.
for config_name, config in shim_dict.get('configurations').iteritems():
pdb_path = _GetPdbPath(target_dict, config_name, vars)
# A few keys that we don't want to propagate.
for key in ['msvs_precompiled_header', 'msvs_precompiled_source', 'test']:
config.pop(key, None)
msvs = config.setdefault('msvs_settings', {})
# Update the compiler directives in the shim target.
compiler = msvs.setdefault('VCCLCompilerTool', {})
compiler['DebugInformationFormat'] = '3'
compiler['ProgramDataBaseFileName'] = pdb_path
# Set the explicit PDB path in the appropriate configuration of the
# original target.
config = target_dict['configurations'][config_name]
msvs = config.setdefault('msvs_settings', {})
linker = msvs.setdefault('VCLinkerTool', {})
linker['GenerateDebugInformation'] = 'true'
linker['ProgramDatabaseFile'] = pdb_path
# Add the new targets. They must go to the beginning of the list so that
# the dependency generation works as expected in ninja.
target_list.insert(0, full_copy_target_name)
target_list.insert(0, full_shim_target_name)
target_dicts[full_copy_target_name] = copy_dict
target_dicts[full_shim_target_name] = shim_dict
# Update the original target to depend on the shim target.
target_dict.setdefault('dependencies', []).append(full_shim_target_name)
return (target_list, target_dicts) | gpl-3.0 |
tcstewar/genome_browser | server.py | 1 | 2209 | import swi
import os
import json
import StringIO
import matplotlib
matplotlib.use('Agg')
import pylab
import bigwig
import numpy as np
bigwigs = {}
def get_bigwig(name):
bw = bigwigs.get(name, None)
if bw is None:
bw = bigwig.BigWig(name)
bigwigs[name] = bw
return bw
class Server(swi.SimpleWebInterface):
def swi(self):
return '<ul><li><a href="bigwigs">list of BigWig files</a></li></ul>'
def swi_bigwigs(self):
files = []
for f in os.listdir('.'):
if f.endswith('.bw'):
files.append(f)
items = ['<li><a href="bigwig?name=%s">%s</a></li>' % (f, f)
for f in files]
return '<ul>%s</ul>' % ''.join(items)
def swi_bigwig(self, name):
bw = get_bigwig(name)
parts = []
for c in sorted(bw.sizes.keys()):
params = 'name=%s&c=%s&start=%d&end=%d&count=%d' % (name, c, 0, bw.sizes[c], 100)
part = '<li><a href="query?%s">%s</a> (length=%d) (<a href="img?%s">img</a>)</li>' % (params, c, bw.sizes[c], params)
parts.append(part)
page = '''
<h1>%s</h1>
<h2>Parts</h2>
<ul>%s</ul>
''' % (name, ''.join(parts))
return page
def swi_query(self, name, c, start, end, count):
start = int(start)
end = int(end)
count = int(count)
bw = get_bigwig(name)
q = bw.query(c, start, end, count)
return json.dumps(q)
def swi_img(self, name, c, start, end, count):
start = int(start)
end = int(end)
count = int(count)
bw = get_bigwig(name)
q = bw.query(c, start, end, count)
img = StringIO.StringIO()
pylab.figure()
loc = np.linspace(start, end, count)
mean = [x['mean'] for x in q]
maximum = [x['max'] for x in q]
minimum = [x['min'] for x in q]
pylab.fill_between(loc, minimum, maximum, color='#888888')
pylab.plot(loc, mean, color='k')
pylab.savefig(img, dpi=80, format='png')
return 'image/png', img.getvalue()
if __name__ == '__main__':
port = 8080
swi.browser(port)
swi.start(Server, port=port)
| gpl-2.0 |
HugoKuo/keystone-essex3 | keystone/backends/sqlalchemy/api/tenant.py | 1 | 12120 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from keystone.backends.sqlalchemy import get_session, models, aliased
from keystone.backends import api
from keystone.models import Tenant
# pylint: disable=E1103,W0221
class TenantAPI(api.BaseTenantAPI):
def __init__(self, *args, **kw):
super(TenantAPI, self).__init__(*args, **kw)
# pylint: disable=W0221
@staticmethod
def transpose(values):
""" Handles transposing field names from Keystone model to
sqlalchemy mode
Differences:
desc <-> description
id <-> uid (coming soon)
"""
if 'id' in values:
values['uid'] = values['id']
del values['id']
if 'description' in values:
values['desc'] = values['description']
del values['description']
if 'enabled' in values:
if values['enabled'] in [1, 'true', 'True', True]:
values['enabled'] = True
else:
values['enabled'] = False
@staticmethod
def to_model(ref):
""" Returns Keystone model object based on SQLAlchemy model"""
if ref:
return Tenant(id=ref.uid, name=ref.name, description=ref.desc,
enabled=bool(ref.enabled))
@staticmethod
def to_model_list(refs):
return [TenantAPI.to_model(ref) for ref in refs]
def create(self, values):
data = values.copy()
TenantAPI.transpose(data)
tenant_ref = models.Tenant()
tenant_ref.update(data)
if tenant_ref.uid is None:
tenant_ref.uid = uuid.uuid4().hex
tenant_ref.save()
return TenantAPI.to_model(tenant_ref)
def get(self, id, session=None):
"""Returns a tenant by ID.
.warning::
Internally, the provided ID is matched against the ``tenants.UID``,
not the PK (``tenants.id``) column.
For PK lookups from within the sqlalchemy backend,
use ``_get_by_id()`` instead.
"""
session = session or get_session()
result = session.query(models.Tenant).filter_by(uid=id).first()
return TenantAPI.to_model(result)
@staticmethod
def _get_by_id(id, session=None):
"""Returns a tenant by ID (PK).
.warning::
The provided ID is matched against the PK (``tenants.ID``).
This is **only** for use within the sqlalchemy backend.
"""
session = session or get_session()
return session.query(models.Tenant).filter_by(id=id).first()
@staticmethod
def id_to_uid(id, session=None):
session = session or get_session()
tenant = session.query(models.Tenant).filter_by(id=id).first()
return tenant.uid if tenant else None
@staticmethod
def uid_to_id(uid, session=None):
session = session or get_session()
tenant = session.query(models.Tenant).filter_by(uid=uid).first()
return tenant.id if tenant else None
def get_by_name(self, name, session=None):
session = session or get_session()
result = session.query(models.Tenant).filter_by(name=name).first()
return TenantAPI.to_model(result)
def get_all(self, session=None):
if not session:
session = get_session()
results = session.query(models.Tenant).all()
return TenantAPI.to_model_list(results)
def list_for_user_get_page(self, user_id, marker, limit, session=None):
if not session:
session = get_session()
user = api.USER.get(user_id)
if hasattr(api.USER, 'uid_to_id'):
backend_user_id = api.USER.uid_to_id(user_id)
else:
backend_user_id = user_id
ura = aliased(models.UserRoleAssociation)
tenant = aliased(models.Tenant)
q1 = session.query(tenant).join((ura, ura.tenant_id == tenant.id)).\
filter(ura.user_id == backend_user_id)
if 'tenant_id' in user:
if hasattr(api.TENANT, 'uid_to_id'):
backend_tenant_id = api.TENANT.uid_to_id(user.tenant_id)
else:
backend_tenant_id = user.tenant_id
q2 = session.query(tenant).filter(tenant.id == backend_tenant_id)
q3 = q1.union(q2)
else:
q3 = q1
if marker:
results = q3.filter("tenant.id>:marker").params(
marker='%s' % marker).order_by(
tenant.id.desc()).limit(int(limit)).all()
else:
results = q3.order_by(tenant.id.desc()).limit(int(limit)).all()
return TenantAPI.to_model_list(results)
# pylint: disable=R0912
def list_for_user_get_page_markers(self, user_id, marker, limit,
session=None):
if not session:
session = get_session()
user = api.USER.get(user_id)
if hasattr(api.USER, 'uid_to_id'):
backend_user_id = api.USER.uid_to_id(user_id)
else:
backend_user_id = user_id
ura = aliased(models.UserRoleAssociation)
tenant = aliased(models.Tenant)
q1 = session.query(tenant).join((ura, ura.tenant_id == tenant.id)).\
filter(ura.user_id == backend_user_id)
if 'tenant_id' in user:
if hasattr(api.TENANT, 'uid_to_id'):
backend_tenant_id = api.TENANT.uid_to_id(user.tenant_id)
else:
backend_tenant_id = user.tenant_id
q2 = session.query(tenant).filter(tenant.id == backend_tenant_id)
q3 = q1.union(q2)
else:
q3 = q1
first = q3.order_by(tenant.id).first()
last = q3.order_by(tenant.id.desc()).first()
if first is None:
return (None, None)
if marker is None:
marker = first.id
next_page = q3.filter(tenant.id > marker).order_by(
tenant.id).limit(int(limit)).all()
prev_page = q3.filter(tenant.id > marker).order_by(
tenant.id.desc()).limit(int(limit)).all()
if len(next_page) == 0:
next_page = last
else:
for t in next_page:
next_page = t
if len(prev_page) == 0:
prev_page = first
else:
for t in prev_page:
prev_page = t
if prev_page.id == marker:
prev_page = None
else:
prev_page = prev_page.id
if next_page.id == last.id:
next_page = None
else:
next_page = next_page.id
return (prev_page, next_page)
def get_page(self, marker, limit, session=None):
if not session:
session = get_session()
if marker:
tenants = session.query(models.Tenant).\
filter("id>:marker").params(
marker='%s' % marker).order_by(
models.Tenant.id.desc()).limit(int(limit)).all()
else:
tenants = session.query(models.Tenant).order_by(
models.Tenant.id.desc()).limit(int(limit)).all()
return self.to_model_list(tenants)
# pylint: disable=R0912
def get_page_markers(self, marker, limit, session=None):
if not session:
session = get_session()
first = session.query(models.Tenant).order_by(
models.Tenant.id).first()
last = session.query(models.Tenant).order_by(
models.Tenant.id.desc()).first()
if first is None:
return (None, None)
if marker is None:
marker = first.id
next_page = session.query(models.Tenant).\
filter("id > :marker").\
params(marker='%s' % marker).\
order_by(models.Tenant.id).\
limit(int(limit)).\
all()
prev_page = session.query(models.Tenant).\
filter("id < :marker").\
params(marker='%s' % marker).\
order_by(models.Tenant.id.desc()).\
limit(int(limit)).\
all()
if len(next_page) == 0:
next_page = last
else:
for t in next_page:
next_page = t
if len(prev_page) == 0:
prev_page = first
else:
for t in prev_page:
prev_page = t
if prev_page.id == marker:
prev_page = None
else:
prev_page = prev_page.id
if next_page.id == last.id:
next_page = None
else:
next_page = next_page.id
return (prev_page, next_page)
def is_empty(self, id, session=None):
if not session:
session = get_session()
if hasattr(api.TENANT, 'uid_to_id'):
id = self.uid_to_id(id)
a_user = session.query(models.UserRoleAssociation).filter_by(
tenant_id=id).first()
if a_user is not None:
return False
a_user = session.query(models.User).filter_by(tenant_id=id).first()
if a_user is not None:
return False
return True
def update(self, id, values, session=None):
if not session:
session = get_session()
if hasattr(api.TENANT, 'uid_to_id'):
pkid = self.uid_to_id(id)
else:
pkid = id
data = values.copy()
TenantAPI.transpose(data)
with session.begin():
tenant_ref = self._get_by_id(pkid, session)
tenant_ref.update(data)
tenant_ref.save(session=session)
return self.get(id, session)
def delete(self, id, session=None):
if not session:
session = get_session()
if not self.is_empty(id):
raise fault.ForbiddenFault("You may not delete a tenant that "
"contains users")
if hasattr(api.TENANT, 'uid_to_id'):
id = self.uid_to_id(id)
with session.begin():
tenant_ref = self._get_by_id(id, session)
session.delete(tenant_ref)
def get_all_endpoints(self, tenant_id, session=None):
if not session:
session = get_session()
if hasattr(api.TENANT, 'uid_to_id'):
tenant_id = self.uid_to_id(tenant_id)
endpoint_templates = aliased(models.EndpointTemplates)
q = session.query(endpoint_templates).\
filter(endpoint_templates.is_global == True)
if tenant_id:
ep = aliased(models.Endpoints)
q1 = session.query(endpoint_templates).join((ep,
ep.endpoint_template_id == endpoint_templates.id)).\
filter(ep.tenant_id == tenant_id)
q = q.union(q1)
return q.all()
def get_role_assignments(self, tenant_id, session=None):
if not session:
session = get_session()
if hasattr(api.TENANT, 'uid_to_id'):
tenant_id = TenantAPI.uid_to_id(tenant_id)
results = session.query(models.UserRoleAssociation).\
filter_by(tenant_id=tenant_id)
for result in results:
if hasattr(api.USER, 'uid_to_id'):
result.user_id = api.USER.id_to_uid(result.user_id)
if hasattr(api.TENANT, 'uid_to_id'):
result.tenant_id = api.TENANT.id_to_uid(result.tenant_id)
return results
def get():
return TenantAPI()
| apache-2.0 |
hydroshare/hydroshare | hs_core/management/commands/solr_prepare.py | 1 | 4767 | """
This calls all preparation routines involved in creating SOLR records.
It is used to debug SOLR harvesting. If any of these routines fails on
any resource, all harvesting ceases. This has caused many bugs.
"""
from django.core.management.base import BaseCommand
from django.db.models import Q
from hs_core.models import BaseResource
from hs_core.search_indexes import BaseResourceIndex
from pprint import pprint
def debug_harvest():
ind = BaseResourceIndex()
for obj in BaseResource.objects.filter(Q(raccess__discoverable=True) |
Q(raccess__public=True)).distinct():
print(("TESTING RESOURCE {}".format(obj.title)))
print('sample_medium')
pprint(ind.prepare_sample_medium(obj))
print('creator')
pprint(ind.prepare_creator(obj))
print('title')
pprint(ind.prepare_title(obj))
print('abstract')
pprint(ind.prepare_abstract(obj))
print('author')
pprint(ind.prepare_author(obj))
print('author_url')
pprint(ind.prepare_author_url(obj))
print('creator')
pprint(ind.prepare_creator(obj))
print('contributor')
pprint(ind.prepare_contributor(obj))
print('subject')
pprint(ind.prepare_subject(obj))
print('organization')
pprint(ind.prepare_organization(obj))
print('publisher')
pprint(ind.prepare_publisher(obj))
print('availability')
pprint(ind.prepare_availability(obj))
print('replaced')
pprint(ind.prepare_replaced(obj))
print('coverage')
pprint(ind.prepare_coverage(obj))
print('coverage_type')
pprint(ind.prepare_coverage_type(obj))
print('east')
pprint(ind.prepare_east(obj))
print('north')
pprint(ind.prepare_north(obj))
print('northlimit')
pprint(ind.prepare_northlimit(obj))
print('eastlimit')
pprint(ind.prepare_eastlimit(obj))
print('southlimit')
pprint(ind.prepare_southlimit(obj))
print('westlimit')
pprint(ind.prepare_westlimit(obj))
print('start_date')
pprint(ind.prepare_start_date(obj))
print('end_date')
pprint(ind.prepare_end_date(obj))
print('format')
pprint(ind.prepare_format(obj))
print('identifier')
pprint(ind.prepare_identifier(obj))
print('language')
pprint(ind.prepare_language(obj))
print('source')
pprint(ind.prepare_source(obj))
print('relation')
pprint(ind.prepare_relation(obj))
print('resource_type')
pprint(ind.prepare_resource_type(obj))
print('comment')
pprint(ind.prepare_comment(obj))
print('owner_login')
pprint(ind.prepare_owner_login(obj))
print('owner')
pprint(ind.prepare_owner(obj))
print('geometry_type')
pprint(ind.prepare_geometry_type(obj))
print('field_name')
pprint(ind.prepare_field_name(obj))
print('field_type')
pprint(ind.prepare_field_type(obj))
print('field_type_code')
pprint(ind.prepare_field_type_code(obj))
print('variable')
pprint(ind.prepare_variable(obj))
print('variable_type')
pprint(ind.prepare_variable_type(obj))
print('variable_shape')
pprint(ind.prepare_variable_shape(obj))
print('variable_descriptive_name')
pprint(ind.prepare_variable_descriptive_name(obj))
print('variable_speciation')
pprint(ind.prepare_variable_speciation(obj))
print('site')
pprint(ind.prepare_site(obj))
print('method')
pprint(ind.prepare_method(obj))
print('quality_level')
pprint(ind.prepare_quality_level(obj))
print('data_source')
pprint(ind.prepare_data_source(obj))
print('sample_medium')
pprint(ind.prepare_sample_medium(obj))
print('units')
pprint(ind.prepare_units(obj))
print('units_type')
pprint(ind.prepare_units_type(obj))
print('absolute_url')
pprint(ind.prepare_absolute_url(obj))
print('extra')
pprint(ind.prepare_extra(obj))
# check whether this resource was found in SOLR right now.
from haystack.query import SearchQuerySet
found_in_solr = SearchQuerySet().filter(short_id=obj.short_id).count() > 0
if found_in_solr:
print("found in solr")
else:
print("NOT FOUND in solr")
class Command(BaseCommand):
help = "Print debugging information about logical files."
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
debug_harvest()
| bsd-3-clause |
ccastell/Transfer-System | Website/env/lib/python3.5/site-packages/django/utils/_os.py | 502 | 3581 | from __future__ import unicode_literals
import os
import sys
import tempfile
from os.path import abspath, dirname, isabs, join, normcase, normpath, sep
from django.core.exceptions import SuspiciousFileOperation
from django.utils import six
from django.utils.encoding import force_text
if six.PY2:
fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
# Under Python 2, define our own abspath function that can handle joining
# unicode paths to a current working directory that has non-ASCII characters
# in it. This isn't necessary on Windows since the Windows version of abspath
# handles this correctly. It also handles drive letters differently than the
# pure Python implementation, so it's best not to replace it.
if six.PY3 or os.name == 'nt':
abspathu = abspath
else:
def abspathu(path):
"""
Version of os.path.abspath that uses the unicode representation
of the current working directory, thus avoiding a UnicodeDecodeError
in join when the cwd has non-ASCII characters.
"""
if not isabs(path):
path = join(os.getcwdu(), path)
return normpath(path)
def upath(path):
"""
Always return a unicode path.
"""
if six.PY2 and not isinstance(path, six.text_type):
return path.decode(fs_encoding)
return path
def npath(path):
"""
Always return a native path, that is unicode on Python 3 and bytestring on
Python 2.
"""
if six.PY2 and not isinstance(path, bytes):
return path.encode(fs_encoding)
return path
def safe_join(base, *paths):
"""
Joins one or more path components to the base path component intelligently.
Returns a normalized, absolute version of the final path.
The final path must be located inside of the base path component (otherwise
a ValueError is raised).
"""
base = force_text(base)
paths = [force_text(p) for p in paths]
final_path = abspathu(join(base, *paths))
base_path = abspathu(base)
# Ensure final_path starts with base_path (using normcase to ensure we
# don't false-negative on case insensitive operating systems like Windows),
# further, one of the following conditions must be true:
# a) The next character is the path separator (to prevent conditions like
# safe_join("/dir", "/../d"))
# b) The final path must be the same as the base path.
# c) The base path must be the most root path (meaning either "/" or "C:\\")
if (not normcase(final_path).startswith(normcase(base_path + sep)) and
normcase(final_path) != normcase(base_path) and
dirname(normcase(base_path)) != normcase(base_path)):
raise SuspiciousFileOperation(
'The joined path ({}) is located outside of the base path '
'component ({})'.format(final_path, base_path))
return final_path
def symlinks_supported():
"""
A function to check if creating symlinks are supported in the
host platform and/or if they are allowed to be created (e.g.
on Windows it requires admin permissions).
"""
tmpdir = tempfile.mkdtemp()
original_path = os.path.join(tmpdir, 'original')
symlink_path = os.path.join(tmpdir, 'symlink')
os.makedirs(original_path)
try:
os.symlink(original_path, symlink_path)
supported = True
except (OSError, NotImplementedError, AttributeError):
supported = False
else:
os.remove(symlink_path)
finally:
os.rmdir(original_path)
os.rmdir(tmpdir)
return supported
| apache-2.0 |
biswajitsahu/kuma | vendor/packages/pygments/lexers/dalvik.py | 72 | 4420 | # -*- coding: utf-8 -*-
"""
pygments.lexers.dalvik
~~~~~~~~~~~~~~~~~~~~~~
Pygments lexers for Dalvik VM-related languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups
from pygments.token import Keyword, Text, Comment, Name, String, Number, \
Punctuation
__all__ = ['SmaliLexer']
class SmaliLexer(RegexLexer):
"""
For `Smali <http://code.google.com/p/smali/>`_ (Android/Dalvik) assembly
code.
.. versionadded:: 1.6
"""
name = 'Smali'
aliases = ['smali']
filenames = ['*.smali']
mimetypes = ['text/smali']
tokens = {
'root': [
include('comment'),
include('label'),
include('field'),
include('method'),
include('class'),
include('directive'),
include('access-modifier'),
include('instruction'),
include('literal'),
include('punctuation'),
include('type'),
include('whitespace')
],
'directive': [
(r'^[ \t]*\.(class|super|implements|field|subannotation|annotation|'
r'enum|method|registers|locals|array-data|packed-switch|'
r'sparse-switch|catchall|catch|line|parameter|local|prologue|'
r'epilogue|source)', Keyword),
(r'^[ \t]*\.end (field|subannotation|annotation|method|array-data|'
'packed-switch|sparse-switch|parameter|local)', Keyword),
(r'^[ \t]*\.restart local', Keyword),
],
'access-modifier': [
(r'(public|private|protected|static|final|synchronized|bridge|'
r'varargs|native|abstract|strictfp|synthetic|constructor|'
r'declared-synchronized|interface|enum|annotation|volatile|'
r'transient)', Keyword),
],
'whitespace': [
(r'\n', Text),
(r'\s+', Text),
],
'instruction': [
(r'\b[vp]\d+\b', Name.Builtin), # registers
(r'\b[a-z][A-Za-z0-9/-]+\s+', Text), # instructions
],
'literal': [
(r'".*"', String),
(r'0x[0-9A-Fa-f]+t?', Number.Hex),
(r'[0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'[0-9]+L?', Number.Integer),
],
'field': [
(r'(\$?\b)([\w$]*)(:)',
bygroups(Punctuation, Name.Variable, Punctuation)),
],
'method': [
(r'<(?:cl)?init>', Name.Function), # constructor
(r'(\$?\b)([\w$]*)(\()',
bygroups(Punctuation, Name.Function, Punctuation)),
],
'label': [
(r':\w+', Name.Label),
],
'class': [
# class names in the form Lcom/namespace/ClassName;
# I only want to color the ClassName part, so the namespace part is
# treated as 'Text'
(r'(L)((?:[\w$]+/)*)([\w$]+)(;)',
bygroups(Keyword.Type, Text, Name.Class, Text)),
],
'punctuation': [
(r'->', Punctuation),
(r'[{},():=.-]', Punctuation),
],
'type': [
(r'[ZBSCIJFDV\[]+', Keyword.Type),
],
'comment': [
(r'#.*?\n', Comment),
],
}
def analyse_text(text):
score = 0
if re.search(r'^\s*\.class\s', text, re.MULTILINE):
score += 0.5
if re.search(r'\b((check-cast|instance-of|throw-verification-error'
r')\b|(-to|add|[ais]get|[ais]put|and|cmpl|const|div|'
r'if|invoke|move|mul|neg|not|or|rem|return|rsub|shl|'
r'shr|sub|ushr)[-/])|{|}', text, re.MULTILINE):
score += 0.3
if re.search(r'(\.(catchall|epilogue|restart local|prologue)|'
r'\b(array-data|class-change-error|declared-synchronized|'
r'(field|inline|vtable)@0x[0-9a-fA-F]|generic-error|'
r'illegal-class-access|illegal-field-access|'
r'illegal-method-access|instantiation-error|no-error|'
r'no-such-class|no-such-field|no-such-method|'
r'packed-switch|sparse-switch))\b', text, re.MULTILINE):
score += 0.6
return score
| mpl-2.0 |
kevinarpe/kevinarpe-rambutan3 | rambutan3/check_args/base/RStrictInstanceMatcher.py | 1 | 1930 | from rambutan3.check_args.base.RAbstractForwardingTypeMatcher import RAbstractForwardingTypeMatcher
from rambutan3.check_args.base.RAbstractTypeMatcher import RAbstractTypeMatcher
from rambutan3.check_args.base.RInstanceMatcher import RInstanceMatcher
from rambutan3.check_args.base.traverse.RTypeMatcherError import RTypeMatcherError
class RStrictInstanceMatcher(RAbstractForwardingTypeMatcher):
"""Strict type instance matcher -- subclasses do not match
Example: Instance of Y will match type Y, but not type X.
class X:
pass
class Y(X):
pass
This class is fully tested.
@author Kevin Connor ARPE ([email protected])
@see builtins#type()
"""
# noinspection PyMissingConstructor
def __init__(self, *class_or_type_tuple):
"""
@param *class_or_type_tuple
one or more value type classes, e.g., {@link str} or {@link float}
@throws ValueError
if {@code *class_or_type_tuple} is empty
@throws TypeError
if {@code *class_or_type_tuple} contains a item that is not a type/class
"""
# Intentional: Do not call super(RAbstractForwardingTypeMatcher, self).__init__()
self.__matcher = RInstanceMatcher(*class_or_type_tuple)
self.__class_or_type_tuple = class_or_type_tuple
# @overrides
@property
def _delegate(self) -> RAbstractTypeMatcher:
return self.__matcher
# @overrides
def matches(self, value, matcher_error: RTypeMatcherError=None) -> bool:
value_type = type(value)
# Intentional override: Do not defer to delegate / RInstanceMatcher here.
# Notice we use 'is' operator here and not isinstance().
x = any(value_type is class_or_type for class_or_type in self.__class_or_type_tuple)
if not x and matcher_error:
matcher_error.add_failed_match(self, value)
return x
| gpl-3.0 |
dcifuen/cloudbday | src/lib/requests/packages/charade/jpcntx.py | 949 | 19104 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .compat import wrap_ord
NUM_OF_CATEGORY = 6
DONT_KNOW = -1
ENOUGH_REL_THRESHOLD = 100
MAX_REL_THRESHOLD = 1000
MINIMUM_DATA_THRESHOLD = 4
# This is hiragana 2-char sequence table, the number in each cell represents its frequency category
jp2CharContext = (
(0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1),
(2,4,0,4,0,3,0,4,0,3,4,4,4,2,4,3,3,4,3,2,3,3,4,2,3,3,3,2,4,1,4,3,3,1,5,4,3,4,3,4,3,5,3,0,3,5,4,2,0,3,1,0,3,3,0,3,3,0,1,1,0,4,3,0,3,3,0,4,0,2,0,3,5,5,5,5,4,0,4,1,0,3,4),
(0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2),
(0,4,0,5,0,5,0,4,0,4,5,4,4,3,5,3,5,1,5,3,4,3,4,4,3,4,3,3,4,3,5,4,4,3,5,5,3,5,5,5,3,5,5,3,4,5,5,3,1,3,2,0,3,4,0,4,2,0,4,2,1,5,3,2,3,5,0,4,0,2,0,5,4,4,5,4,5,0,4,0,0,4,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,4,0,3,0,3,0,4,5,4,3,3,3,3,4,3,5,4,4,3,5,4,4,3,4,3,4,4,4,4,5,3,4,4,3,4,5,5,4,5,5,1,4,5,4,3,0,3,3,1,3,3,0,4,4,0,3,3,1,5,3,3,3,5,0,4,0,3,0,4,4,3,4,3,3,0,4,1,1,3,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,4,0,3,0,3,0,4,0,3,4,4,3,2,2,1,2,1,3,1,3,3,3,3,3,4,3,1,3,3,5,3,3,0,4,3,0,5,4,3,3,5,4,4,3,4,4,5,0,1,2,0,1,2,0,2,2,0,1,0,0,5,2,2,1,4,0,3,0,1,0,4,4,3,5,4,3,0,2,1,0,4,3),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,5,0,4,0,2,1,4,4,2,4,1,4,2,4,2,4,3,3,3,4,3,3,3,3,1,4,2,3,3,3,1,4,4,1,1,1,4,3,3,2,0,2,4,3,2,0,3,3,0,3,1,1,0,0,0,3,3,0,4,2,2,3,4,0,4,0,3,0,4,4,5,3,4,4,0,3,0,0,1,4),
(1,4,0,4,0,4,0,4,0,3,5,4,4,3,4,3,5,4,3,3,4,3,5,4,4,4,4,3,4,2,4,3,3,1,5,4,3,2,4,5,4,5,5,4,4,5,4,4,0,3,2,2,3,3,0,4,3,1,3,2,1,4,3,3,4,5,0,3,0,2,0,4,5,5,4,5,4,0,4,0,0,5,4),
(0,5,0,5,0,4,0,3,0,4,4,3,4,3,3,3,4,0,4,4,4,3,4,3,4,3,3,1,4,2,4,3,4,0,5,4,1,4,5,4,4,5,3,2,4,3,4,3,2,4,1,3,3,3,2,3,2,0,4,3,3,4,3,3,3,4,0,4,0,3,0,4,5,4,4,4,3,0,4,1,0,1,3),
(0,3,1,4,0,3,0,2,0,3,4,4,3,1,4,2,3,3,4,3,4,3,4,3,4,4,3,2,3,1,5,4,4,1,4,4,3,5,4,4,3,5,5,4,3,4,4,3,1,2,3,1,2,2,0,3,2,0,3,1,0,5,3,3,3,4,3,3,3,3,4,4,4,4,5,4,2,0,3,3,2,4,3),
(0,2,0,3,0,1,0,1,0,0,3,2,0,0,2,0,1,0,2,1,3,3,3,1,2,3,1,0,1,0,4,2,1,1,3,3,0,4,3,3,1,4,3,3,0,3,3,2,0,0,0,0,1,0,0,2,0,0,0,0,0,4,1,0,2,3,2,2,2,1,3,3,3,4,4,3,2,0,3,1,0,3,3),
(0,4,0,4,0,3,0,3,0,4,4,4,3,3,3,3,3,3,4,3,4,2,4,3,4,3,3,2,4,3,4,5,4,1,4,5,3,5,4,5,3,5,4,0,3,5,5,3,1,3,3,2,2,3,0,3,4,1,3,3,2,4,3,3,3,4,0,4,0,3,0,4,5,4,4,5,3,0,4,1,0,3,4),
(0,2,0,3,0,3,0,0,0,2,2,2,1,0,1,0,0,0,3,0,3,0,3,0,1,3,1,0,3,1,3,3,3,1,3,3,3,0,1,3,1,3,4,0,0,3,1,1,0,3,2,0,0,0,0,1,3,0,1,0,0,3,3,2,0,3,0,0,0,0,0,3,4,3,4,3,3,0,3,0,0,2,3),
(2,3,0,3,0,2,0,1,0,3,3,4,3,1,3,1,1,1,3,1,4,3,4,3,3,3,0,0,3,1,5,4,3,1,4,3,2,5,5,4,4,4,4,3,3,4,4,4,0,2,1,1,3,2,0,1,2,0,0,1,0,4,1,3,3,3,0,3,0,1,0,4,4,4,5,5,3,0,2,0,0,4,4),
(0,2,0,1,0,3,1,3,0,2,3,3,3,0,3,1,0,0,3,0,3,2,3,1,3,2,1,1,0,0,4,2,1,0,2,3,1,4,3,2,0,4,4,3,1,3,1,3,0,1,0,0,1,0,0,0,1,0,0,0,0,4,1,1,1,2,0,3,0,0,0,3,4,2,4,3,2,0,1,0,0,3,3),
(0,1,0,4,0,5,0,4,0,2,4,4,2,3,3,2,3,3,5,3,3,3,4,3,4,2,3,0,4,3,3,3,4,1,4,3,2,1,5,5,3,4,5,1,3,5,4,2,0,3,3,0,1,3,0,4,2,0,1,3,1,4,3,3,3,3,0,3,0,1,0,3,4,4,4,5,5,0,3,0,1,4,5),
(0,2,0,3,0,3,0,0,0,2,3,1,3,0,4,0,1,1,3,0,3,4,3,2,3,1,0,3,3,2,3,1,3,0,2,3,0,2,1,4,1,2,2,0,0,3,3,0,0,2,0,0,0,1,0,0,0,0,2,2,0,3,2,1,3,3,0,2,0,2,0,0,3,3,1,2,4,0,3,0,2,2,3),
(2,4,0,5,0,4,0,4,0,2,4,4,4,3,4,3,3,3,1,2,4,3,4,3,4,4,5,0,3,3,3,3,2,0,4,3,1,4,3,4,1,4,4,3,3,4,4,3,1,2,3,0,4,2,0,4,1,0,3,3,0,4,3,3,3,4,0,4,0,2,0,3,5,3,4,5,2,0,3,0,0,4,5),
(0,3,0,4,0,1,0,1,0,1,3,2,2,1,3,0,3,0,2,0,2,0,3,0,2,0,0,0,1,0,1,1,0,0,3,1,0,0,0,4,0,3,1,0,2,1,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,4,2,2,3,1,0,3,0,0,0,1,4,4,4,3,0,0,4,0,0,1,4),
(1,4,1,5,0,3,0,3,0,4,5,4,4,3,5,3,3,4,4,3,4,1,3,3,3,3,2,1,4,1,5,4,3,1,4,4,3,5,4,4,3,5,4,3,3,4,4,4,0,3,3,1,2,3,0,3,1,0,3,3,0,5,4,4,4,4,4,4,3,3,5,4,4,3,3,5,4,0,3,2,0,4,4),
(0,2,0,3,0,1,0,0,0,1,3,3,3,2,4,1,3,0,3,1,3,0,2,2,1,1,0,0,2,0,4,3,1,0,4,3,0,4,4,4,1,4,3,1,1,3,3,1,0,2,0,0,1,3,0,0,0,0,2,0,0,4,3,2,4,3,5,4,3,3,3,4,3,3,4,3,3,0,2,1,0,3,3),
(0,2,0,4,0,3,0,2,0,2,5,5,3,4,4,4,4,1,4,3,3,0,4,3,4,3,1,3,3,2,4,3,0,3,4,3,0,3,4,4,2,4,4,0,4,5,3,3,2,2,1,1,1,2,0,1,5,0,3,3,2,4,3,3,3,4,0,3,0,2,0,4,4,3,5,5,0,0,3,0,2,3,3),
(0,3,0,4,0,3,0,1,0,3,4,3,3,1,3,3,3,0,3,1,3,0,4,3,3,1,1,0,3,0,3,3,0,0,4,4,0,1,5,4,3,3,5,0,3,3,4,3,0,2,0,1,1,1,0,1,3,0,1,2,1,3,3,2,3,3,0,3,0,1,0,1,3,3,4,4,1,0,1,2,2,1,3),
(0,1,0,4,0,4,0,3,0,1,3,3,3,2,3,1,1,0,3,0,3,3,4,3,2,4,2,0,1,0,4,3,2,0,4,3,0,5,3,3,2,4,4,4,3,3,3,4,0,1,3,0,0,1,0,0,1,0,0,0,0,4,2,3,3,3,0,3,0,0,0,4,4,4,5,3,2,0,3,3,0,3,5),
(0,2,0,3,0,0,0,3,0,1,3,0,2,0,0,0,1,0,3,1,1,3,3,0,0,3,0,0,3,0,2,3,1,0,3,1,0,3,3,2,0,4,2,2,0,2,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,2,1,2,0,1,0,1,0,0,0,1,3,1,2,0,0,0,1,0,0,1,4),
(0,3,0,3,0,5,0,1,0,2,4,3,1,3,3,2,1,1,5,2,1,0,5,1,2,0,0,0,3,3,2,2,3,2,4,3,0,0,3,3,1,3,3,0,2,5,3,4,0,3,3,0,1,2,0,2,2,0,3,2,0,2,2,3,3,3,0,2,0,1,0,3,4,4,2,5,4,0,3,0,0,3,5),
(0,3,0,3,0,3,0,1,0,3,3,3,3,0,3,0,2,0,2,1,1,0,2,0,1,0,0,0,2,1,0,0,1,0,3,2,0,0,3,3,1,2,3,1,0,3,3,0,0,1,0,0,0,0,0,2,0,0,0,0,0,2,3,1,2,3,0,3,0,1,0,3,2,1,0,4,3,0,1,1,0,3,3),
(0,4,0,5,0,3,0,3,0,4,5,5,4,3,5,3,4,3,5,3,3,2,5,3,4,4,4,3,4,3,4,5,5,3,4,4,3,4,4,5,4,4,4,3,4,5,5,4,2,3,4,2,3,4,0,3,3,1,4,3,2,4,3,3,5,5,0,3,0,3,0,5,5,5,5,4,4,0,4,0,1,4,4),
(0,4,0,4,0,3,0,3,0,3,5,4,4,2,3,2,5,1,3,2,5,1,4,2,3,2,3,3,4,3,3,3,3,2,5,4,1,3,3,5,3,4,4,0,4,4,3,1,1,3,1,0,2,3,0,2,3,0,3,0,0,4,3,1,3,4,0,3,0,2,0,4,4,4,3,4,5,0,4,0,0,3,4),
(0,3,0,3,0,3,1,2,0,3,4,4,3,3,3,0,2,2,4,3,3,1,3,3,3,1,1,0,3,1,4,3,2,3,4,4,2,4,4,4,3,4,4,3,2,4,4,3,1,3,3,1,3,3,0,4,1,0,2,2,1,4,3,2,3,3,5,4,3,3,5,4,4,3,3,0,4,0,3,2,2,4,4),
(0,2,0,1,0,0,0,0,0,1,2,1,3,0,0,0,0,0,2,0,1,2,1,0,0,1,0,0,0,0,3,0,0,1,0,1,1,3,1,0,0,0,1,1,0,1,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,1,2,2,0,3,4,0,0,0,1,1,0,0,1,0,0,0,0,0,1,1),
(0,1,0,0,0,1,0,0,0,0,4,0,4,1,4,0,3,0,4,0,3,0,4,0,3,0,3,0,4,1,5,1,4,0,0,3,0,5,0,5,2,0,1,0,0,0,2,1,4,0,1,3,0,0,3,0,0,3,1,1,4,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0),
(1,4,0,5,0,3,0,2,0,3,5,4,4,3,4,3,5,3,4,3,3,0,4,3,3,3,3,3,3,2,4,4,3,1,3,4,4,5,4,4,3,4,4,1,3,5,4,3,3,3,1,2,2,3,3,1,3,1,3,3,3,5,3,3,4,5,0,3,0,3,0,3,4,3,4,4,3,0,3,0,2,4,3),
(0,1,0,4,0,0,0,0,0,1,4,0,4,1,4,2,4,0,3,0,1,0,1,0,0,0,0,0,2,0,3,1,1,1,0,3,0,0,0,1,2,1,0,0,1,1,1,1,0,1,0,0,0,1,0,0,3,0,0,0,0,3,2,0,2,2,0,1,0,0,0,2,3,2,3,3,0,0,0,0,2,1,0),
(0,5,1,5,0,3,0,3,0,5,4,4,5,1,5,3,3,0,4,3,4,3,5,3,4,3,3,2,4,3,4,3,3,0,3,3,1,4,4,3,4,4,4,3,4,5,5,3,2,3,1,1,3,3,1,3,1,1,3,3,2,4,5,3,3,5,0,4,0,3,0,4,4,3,5,3,3,0,3,4,0,4,3),
(0,5,0,5,0,3,0,2,0,4,4,3,5,2,4,3,3,3,4,4,4,3,5,3,5,3,3,1,4,0,4,3,3,0,3,3,0,4,4,4,4,5,4,3,3,5,5,3,2,3,1,2,3,2,0,1,0,0,3,2,2,4,4,3,1,5,0,4,0,3,0,4,3,1,3,2,1,0,3,3,0,3,3),
(0,4,0,5,0,5,0,4,0,4,5,5,5,3,4,3,3,2,5,4,4,3,5,3,5,3,4,0,4,3,4,4,3,2,4,4,3,4,5,4,4,5,5,0,3,5,5,4,1,3,3,2,3,3,1,3,1,0,4,3,1,4,4,3,4,5,0,4,0,2,0,4,3,4,4,3,3,0,4,0,0,5,5),
(0,4,0,4,0,5,0,1,1,3,3,4,4,3,4,1,3,0,5,1,3,0,3,1,3,1,1,0,3,0,3,3,4,0,4,3,0,4,4,4,3,4,4,0,3,5,4,1,0,3,0,0,2,3,0,3,1,0,3,1,0,3,2,1,3,5,0,3,0,1,0,3,2,3,3,4,4,0,2,2,0,4,4),
(2,4,0,5,0,4,0,3,0,4,5,5,4,3,5,3,5,3,5,3,5,2,5,3,4,3,3,4,3,4,5,3,2,1,5,4,3,2,3,4,5,3,4,1,2,5,4,3,0,3,3,0,3,2,0,2,3,0,4,1,0,3,4,3,3,5,0,3,0,1,0,4,5,5,5,4,3,0,4,2,0,3,5),
(0,5,0,4,0,4,0,2,0,5,4,3,4,3,4,3,3,3,4,3,4,2,5,3,5,3,4,1,4,3,4,4,4,0,3,5,0,4,4,4,4,5,3,1,3,4,5,3,3,3,3,3,3,3,0,2,2,0,3,3,2,4,3,3,3,5,3,4,1,3,3,5,3,2,0,0,0,0,4,3,1,3,3),
(0,1,0,3,0,3,0,1,0,1,3,3,3,2,3,3,3,0,3,0,0,0,3,1,3,0,0,0,2,2,2,3,0,0,3,2,0,1,2,4,1,3,3,0,0,3,3,3,0,1,0,0,2,1,0,0,3,0,3,1,0,3,0,0,1,3,0,2,0,1,0,3,3,1,3,3,0,0,1,1,0,3,3),
(0,2,0,3,0,2,1,4,0,2,2,3,1,1,3,1,1,0,2,0,3,1,2,3,1,3,0,0,1,0,4,3,2,3,3,3,1,4,2,3,3,3,3,1,0,3,1,4,0,1,1,0,1,2,0,1,1,0,1,1,0,3,1,3,2,2,0,1,0,0,0,2,3,3,3,1,0,0,0,0,0,2,3),
(0,5,0,4,0,5,0,2,0,4,5,5,3,3,4,3,3,1,5,4,4,2,4,4,4,3,4,2,4,3,5,5,4,3,3,4,3,3,5,5,4,5,5,1,3,4,5,3,1,4,3,1,3,3,0,3,3,1,4,3,1,4,5,3,3,5,0,4,0,3,0,5,3,3,1,4,3,0,4,0,1,5,3),
(0,5,0,5,0,4,0,2,0,4,4,3,4,3,3,3,3,3,5,4,4,4,4,4,4,5,3,3,5,2,4,4,4,3,4,4,3,3,4,4,5,5,3,3,4,3,4,3,3,4,3,3,3,3,1,2,2,1,4,3,3,5,4,4,3,4,0,4,0,3,0,4,4,4,4,4,1,0,4,2,0,2,4),
(0,4,0,4,0,3,0,1,0,3,5,2,3,0,3,0,2,1,4,2,3,3,4,1,4,3,3,2,4,1,3,3,3,0,3,3,0,0,3,3,3,5,3,3,3,3,3,2,0,2,0,0,2,0,0,2,0,0,1,0,0,3,1,2,2,3,0,3,0,2,0,4,4,3,3,4,1,0,3,0,0,2,4),
(0,0,0,4,0,0,0,0,0,0,1,0,1,0,2,0,0,0,0,0,1,0,2,0,1,0,0,0,0,0,3,1,3,0,3,2,0,0,0,1,0,3,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,4,0,2,0,0,0,0,0,0,2),
(0,2,1,3,0,2,0,2,0,3,3,3,3,1,3,1,3,3,3,3,3,3,4,2,2,1,2,1,4,0,4,3,1,3,3,3,2,4,3,5,4,3,3,3,3,3,3,3,0,1,3,0,2,0,0,1,0,0,1,0,0,4,2,0,2,3,0,3,3,0,3,3,4,2,3,1,4,0,1,2,0,2,3),
(0,3,0,3,0,1,0,3,0,2,3,3,3,0,3,1,2,0,3,3,2,3,3,2,3,2,3,1,3,0,4,3,2,0,3,3,1,4,3,3,2,3,4,3,1,3,3,1,1,0,1,1,0,1,0,1,0,1,0,0,0,4,1,1,0,3,0,3,1,0,2,3,3,3,3,3,1,0,0,2,0,3,3),
(0,0,0,0,0,0,0,0,0,0,3,0,2,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,3,0,3,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,2,0,2,3,0,0,0,0,0,0,0,0,3),
(0,2,0,3,1,3,0,3,0,2,3,3,3,1,3,1,3,1,3,1,3,3,3,1,3,0,2,3,1,1,4,3,3,2,3,3,1,2,2,4,1,3,3,0,1,4,2,3,0,1,3,0,3,0,0,1,3,0,2,0,0,3,3,2,1,3,0,3,0,2,0,3,4,4,4,3,1,0,3,0,0,3,3),
(0,2,0,1,0,2,0,0,0,1,3,2,2,1,3,0,1,1,3,0,3,2,3,1,2,0,2,0,1,1,3,3,3,0,3,3,1,1,2,3,2,3,3,1,2,3,2,0,0,1,0,0,0,0,0,0,3,0,1,0,0,2,1,2,1,3,0,3,0,0,0,3,4,4,4,3,2,0,2,0,0,2,4),
(0,0,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,3,1,0,0,0,0,0,0,0,3),
(0,3,0,3,0,2,0,3,0,3,3,3,2,3,2,2,2,0,3,1,3,3,3,2,3,3,0,0,3,0,3,2,2,0,2,3,1,4,3,4,3,3,2,3,1,5,4,4,0,3,1,2,1,3,0,3,1,1,2,0,2,3,1,3,1,3,0,3,0,1,0,3,3,4,4,2,1,0,2,1,0,2,4),
(0,1,0,3,0,1,0,2,0,1,4,2,5,1,4,0,2,0,2,1,3,1,4,0,2,1,0,0,2,1,4,1,1,0,3,3,0,5,1,3,2,3,3,1,0,3,2,3,0,1,0,0,0,0,0,0,1,0,0,0,0,4,0,1,0,3,0,2,0,1,0,3,3,3,4,3,3,0,0,0,0,2,3),
(0,0,0,1,0,0,0,0,0,0,2,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,1,0,0,1,0,0,0,0,0,3),
(0,1,0,3,0,4,0,3,0,2,4,3,1,0,3,2,2,1,3,1,2,2,3,1,1,1,2,1,3,0,1,2,0,1,3,2,1,3,0,5,5,1,0,0,1,3,2,1,0,3,0,0,1,0,0,0,0,0,3,4,0,1,1,1,3,2,0,2,0,1,0,2,3,3,1,2,3,0,1,0,1,0,4),
(0,0,0,1,0,3,0,3,0,2,2,1,0,0,4,0,3,0,3,1,3,0,3,0,3,0,1,0,3,0,3,1,3,0,3,3,0,0,1,2,1,1,1,0,1,2,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,2,2,1,2,0,0,2,0,0,0,0,2,3,3,3,3,0,0,0,0,1,4),
(0,0,0,3,0,3,0,0,0,0,3,1,1,0,3,0,1,0,2,0,1,0,0,0,0,0,0,0,1,0,3,0,2,0,2,3,0,0,2,2,3,1,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,2,3),
(2,4,0,5,0,5,0,4,0,3,4,3,3,3,4,3,3,3,4,3,4,4,5,4,5,5,5,2,3,0,5,5,4,1,5,4,3,1,5,4,3,4,4,3,3,4,3,3,0,3,2,0,2,3,0,3,0,0,3,3,0,5,3,2,3,3,0,3,0,3,0,3,4,5,4,5,3,0,4,3,0,3,4),
(0,3,0,3,0,3,0,3,0,3,3,4,3,2,3,2,3,0,4,3,3,3,3,3,3,3,3,0,3,2,4,3,3,1,3,4,3,4,4,4,3,4,4,3,2,4,4,1,0,2,0,0,1,1,0,2,0,0,3,1,0,5,3,2,1,3,0,3,0,1,2,4,3,2,4,3,3,0,3,2,0,4,4),
(0,3,0,3,0,1,0,0,0,1,4,3,3,2,3,1,3,1,4,2,3,2,4,2,3,4,3,0,2,2,3,3,3,0,3,3,3,0,3,4,1,3,3,0,3,4,3,3,0,1,1,0,1,0,0,0,4,0,3,0,0,3,1,2,1,3,0,4,0,1,0,4,3,3,4,3,3,0,2,0,0,3,3),
(0,3,0,4,0,1,0,3,0,3,4,3,3,0,3,3,3,1,3,1,3,3,4,3,3,3,0,0,3,1,5,3,3,1,3,3,2,5,4,3,3,4,5,3,2,5,3,4,0,1,0,0,0,0,0,2,0,0,1,1,0,4,2,2,1,3,0,3,0,2,0,4,4,3,5,3,2,0,1,1,0,3,4),
(0,5,0,4,0,5,0,2,0,4,4,3,3,2,3,3,3,1,4,3,4,1,5,3,4,3,4,0,4,2,4,3,4,1,5,4,0,4,4,4,4,5,4,1,3,5,4,2,1,4,1,1,3,2,0,3,1,0,3,2,1,4,3,3,3,4,0,4,0,3,0,4,4,4,3,3,3,0,4,2,0,3,4),
(1,4,0,4,0,3,0,1,0,3,3,3,1,1,3,3,2,2,3,3,1,0,3,2,2,1,2,0,3,1,2,1,2,0,3,2,0,2,2,3,3,4,3,0,3,3,1,2,0,1,1,3,1,2,0,0,3,0,1,1,0,3,2,2,3,3,0,3,0,0,0,2,3,3,4,3,3,0,1,0,0,1,4),
(0,4,0,4,0,4,0,0,0,3,4,4,3,1,4,2,3,2,3,3,3,1,4,3,4,0,3,0,4,2,3,3,2,2,5,4,2,1,3,4,3,4,3,1,3,3,4,2,0,2,1,0,3,3,0,0,2,0,3,1,0,4,4,3,4,3,0,4,0,1,0,2,4,4,4,4,4,0,3,2,0,3,3),
(0,0,0,1,0,4,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,3,2,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,2),
(0,2,0,3,0,4,0,4,0,1,3,3,3,0,4,0,2,1,2,1,1,1,2,0,3,1,1,0,1,0,3,1,0,0,3,3,2,0,1,1,0,0,0,0,0,1,0,2,0,2,2,0,3,1,0,0,1,0,1,1,0,1,2,0,3,0,0,0,0,1,0,0,3,3,4,3,1,0,1,0,3,0,2),
(0,0,0,3,0,5,0,0,0,0,1,0,2,0,3,1,0,1,3,0,0,0,2,0,0,0,1,0,0,0,1,1,0,0,4,0,0,0,2,3,0,1,4,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,1,0,0,0,0,0,0,0,2,0,0,3,0,0,0,0,0,3),
(0,2,0,5,0,5,0,1,0,2,4,3,3,2,5,1,3,2,3,3,3,0,4,1,2,0,3,0,4,0,2,2,1,1,5,3,0,0,1,4,2,3,2,0,3,3,3,2,0,2,4,1,1,2,0,1,1,0,3,1,0,1,3,1,2,3,0,2,0,0,0,1,3,5,4,4,4,0,3,0,0,1,3),
(0,4,0,5,0,4,0,4,0,4,5,4,3,3,4,3,3,3,4,3,4,4,5,3,4,5,4,2,4,2,3,4,3,1,4,4,1,3,5,4,4,5,5,4,4,5,5,5,2,3,3,1,4,3,1,3,3,0,3,3,1,4,3,4,4,4,0,3,0,4,0,3,3,4,4,5,0,0,4,3,0,4,5),
(0,4,0,4,0,3,0,3,0,3,4,4,4,3,3,2,4,3,4,3,4,3,5,3,4,3,2,1,4,2,4,4,3,1,3,4,2,4,5,5,3,4,5,4,1,5,4,3,0,3,2,2,3,2,1,3,1,0,3,3,3,5,3,3,3,5,4,4,2,3,3,4,3,3,3,2,1,0,3,2,1,4,3),
(0,4,0,5,0,4,0,3,0,3,5,5,3,2,4,3,4,0,5,4,4,1,4,4,4,3,3,3,4,3,5,5,2,3,3,4,1,2,5,5,3,5,5,2,3,5,5,4,0,3,2,0,3,3,1,1,5,1,4,1,0,4,3,2,3,5,0,4,0,3,0,5,4,3,4,3,0,0,4,1,0,4,4),
(1,3,0,4,0,2,0,2,0,2,5,5,3,3,3,3,3,0,4,2,3,4,4,4,3,4,0,0,3,4,5,4,3,3,3,3,2,5,5,4,5,5,5,4,3,5,5,5,1,3,1,0,1,0,0,3,2,0,4,2,0,5,2,3,2,4,1,3,0,3,0,4,5,4,5,4,3,0,4,2,0,5,4),
(0,3,0,4,0,5,0,3,0,3,4,4,3,2,3,2,3,3,3,3,3,2,4,3,3,2,2,0,3,3,3,3,3,1,3,3,3,0,4,4,3,4,4,1,1,4,4,2,0,3,1,0,1,1,0,4,1,0,2,3,1,3,3,1,3,4,0,3,0,1,0,3,1,3,0,0,1,0,2,0,0,4,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,3,0,2,0,3,0,1,5,4,3,3,3,1,4,2,1,2,3,4,4,2,4,4,5,0,3,1,4,3,4,0,4,3,3,3,2,3,2,5,3,4,3,2,2,3,0,0,3,0,2,1,0,1,2,0,0,0,0,2,1,1,3,1,0,2,0,4,0,3,4,4,4,5,2,0,2,0,0,1,3),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,1,0,0,1,1,0,0,0,4,2,1,1,0,1,0,3,2,0,0,3,1,1,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,1,0,0,0,2,0,0,0,1,4,0,4,2,1,0,0,0,0,0,1),
(0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,1,0,1,0,0,0,0,3,1,0,0,0,2,0,2,1,0,0,1,2,1,0,1,1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,1,3,1,0,0,0,0,0,1,0,0,2,1,0,0,0,0,0,0,0,0,2),
(0,4,0,4,0,4,0,3,0,4,4,3,4,2,4,3,2,0,4,4,4,3,5,3,5,3,3,2,4,2,4,3,4,3,1,4,0,2,3,4,4,4,3,3,3,4,4,4,3,4,1,3,4,3,2,1,2,1,3,3,3,4,4,3,3,5,0,4,0,3,0,4,3,3,3,2,1,0,3,0,0,3,3),
(0,4,0,3,0,3,0,3,0,3,5,5,3,3,3,3,4,3,4,3,3,3,4,4,4,3,3,3,3,4,3,5,3,3,1,3,2,4,5,5,5,5,4,3,4,5,5,3,2,2,3,3,3,3,2,3,3,1,2,3,2,4,3,3,3,4,0,4,0,2,0,4,3,2,2,1,2,0,3,0,0,4,1),
)
class JapaneseContextAnalysis:
def __init__(self):
self.reset()
def reset(self):
self._mTotalRel = 0 # total sequence received
# category counters, each interger counts sequence in its category
self._mRelSample = [0] * NUM_OF_CATEGORY
# if last byte in current buffer is not the last byte of a character,
# we need to know how many bytes to skip in next buffer
self._mNeedToSkipCharNum = 0
self._mLastCharOrder = -1 # The order of previous char
# If this flag is set to True, detection is done and conclusion has
# been made
self._mDone = False
def feed(self, aBuf, aLen):
if self._mDone:
return
# The buffer we got is byte oriented, and a character may span in more than one
# buffers. In case the last one or two byte in last buffer is not
# complete, we record how many byte needed to complete that character
# and skip these bytes here. We can choose to record those bytes as
# well and analyse the character once it is complete, but since a
# character will not make much difference, by simply skipping
# this character will simply our logic and improve performance.
i = self._mNeedToSkipCharNum
while i < aLen:
order, charLen = self.get_order(aBuf[i:i + 2])
i += charLen
if i > aLen:
self._mNeedToSkipCharNum = i - aLen
self._mLastCharOrder = -1
else:
if (order != -1) and (self._mLastCharOrder != -1):
self._mTotalRel += 1
if self._mTotalRel > MAX_REL_THRESHOLD:
self._mDone = True
break
self._mRelSample[jp2CharContext[self._mLastCharOrder][order]] += 1
self._mLastCharOrder = order
def got_enough_data(self):
return self._mTotalRel > ENOUGH_REL_THRESHOLD
def get_confidence(self):
# This is just one way to calculate confidence. It works well for me.
if self._mTotalRel > MINIMUM_DATA_THRESHOLD:
return (self._mTotalRel - self._mRelSample[0]) / self._mTotalRel
else:
return DONT_KNOW
def get_order(self, aBuf):
return -1, 1
class SJISContextAnalysis(JapaneseContextAnalysis):
def get_order(self, aBuf):
if not aBuf:
return -1, 1
# find out current char's byte length
first_char = wrap_ord(aBuf[0])
if ((0x81 <= first_char <= 0x9F) or (0xE0 <= first_char <= 0xFC)):
charLen = 2
else:
charLen = 1
# return its order if it is hiragana
if len(aBuf) > 1:
second_char = wrap_ord(aBuf[1])
if (first_char == 202) and (0x9F <= second_char <= 0xF1):
return second_char - 0x9F, charLen
return -1, charLen
class EUCJPContextAnalysis(JapaneseContextAnalysis):
def get_order(self, aBuf):
if not aBuf:
return -1, 1
# find out current char's byte length
first_char = wrap_ord(aBuf[0])
if (first_char == 0x8E) or (0xA1 <= first_char <= 0xFE):
charLen = 2
elif first_char == 0x8F:
charLen = 3
else:
charLen = 1
# return its order if it is hiragana
if len(aBuf) > 1:
second_char = wrap_ord(aBuf[1])
if (first_char == 0xA4) and (0xA1 <= second_char <= 0xF3):
return second_char - 0xA1, charLen
return -1, charLen
# flake8: noqa
| mit |
brenolf/k-flow | thm.py | 1 | 3692 | import sys
N = -1
G = None
H = None
vis = None
vis_aux = None
valence = None
flows = {}
answer = []
allowed_flows = {
3 : [-1, 1],
4 : [-1, 1, 2],
5 : [-1, 1, 2, -2]
}
def has_k_flow (graph):
global N, G, H, vis, valence, flows
G = graph
N = len(G)
H = [[0] * N for i in xrange(0, N)]
vis = [False] * N
valence = [0] * N
for v in xrange(0, N):
valence[v] = len(G[v])
if valence[v] not in flows and valence[v] != 0:
flows[valence[v]] = getWeights(valence[v])
for v in xrange(0, N):
G[v] = sorted(G[v], key=lambda u : valence[u], reverse=True)
del answer[:]
v = find_next()
return dfs(v)
def getWeights (VALENCE, e = 0):
global answer, E, edges
if e == 0:
del answer[:]
edges = [0] * VALENCE
elif e >= VALENCE:
return None
isLast = (e == (VALENCE - 1))
weight2 = [0, 2]
for w in xrange(0, 2):
edges[e] = weight2[w]
getWeights(VALENCE, e + 1)
if isLast:
edges2 = sum(edges) / 2
if (VALENCE - edges2) % 2 == 0 and not (edges2 == VALENCE and edges2 % 2 != 0):
answer.append(edges[:])
if e == 0:
return answer[:]
def find_next ():
vertices = xrange(0, N)
vertices = filter(lambda v : not vis[v], vertices)
# pick most constrained variable
vertices = sorted(vertices, key=lambda v : valence[v], reverse=True)
return vertices.pop(0)
def dfs (v = 0):
vis[v] = True
if valence[v] == 0:
sys.stderr.write ('error: vertex "%d" is 0-valent. Have you forgotten it?\n' % v)
exit(1)
constraints, neighbours = getConstraints(v)
weights = flows[valence[v]]
W = select(constraints, weights, v)
isLast = (sum(vis) == N)
if len(W) == 0:
vis[v] = False
return False
for w in W:
clear(v, neighbours)
assign(v, w)
counter = 0
for u in G[v]:
if not vis[u]:
counter += 1
if dfs(u):
return True
else:
break
deadlock = (not isLast and counter == 0)
if deadlock and dfs(find_next()):
return True
elif isLast and checkEulerian():
answer.append(H[:][:])
return True
vis[v] = False
clear(v, neighbours)
return False
def dfs_check(v, one_vertex, component, path):
global vis_aux
vis_aux[v] = component
path.append(v)
recursive_ones = 0
for u in G[v]:
if vis_aux[u] == 0 and H[v][u] == 0:
recursive_ones += dfs_check(u, one_vertex, component, path)
return int(one_vertex[v]) + recursive_ones
def checkEulerian():
global vis_aux
# for v in xrange(0, N):
# weight2 = sum(H[v]) / 2
# if (valence[v] - weight2) % 2 != 0:
# return False
vis_aux = [False] * N
one_vertex = [(sum(H[v]) / 2) % 2 != 0 for v in xrange(0, N)]
components = 0
result = True
paths = {}
for v in xrange(0, N):
if vis_aux[v] == 0:
components += 1
path = []
C_ones = dfs_check(v, one_vertex, components, path)
paths[components] = path
if C_ones % 2 != 0:
result = False
if result and False:
for i in xrange(0, components):
print i + 1, paths[i + 1]
return result
def getConstraints (v):
constraints = {}
neighbours = []
i = 0
for u in G[v]:
if H[v][u] != 0 or H[u][v] != 0:
constraints[i] = 2
neighbours.append(u)
i += 1
return constraints, neighbours
def select (constraints, possibilities, v):
r = []
for p in possibilities:
for field in constraints:
if p[field] != constraints[field]:
break
else:
r.append(p[:])
def valid (vector):
for i in xrange(0, len(vector)):
if vis[G[v][i]] and vector[i] == 2 and i not in constraints:
return False
return True
return [i for i in r if valid(i)]
def assign (v, weights):
for u in G[v]:
w = weights.pop(0)
H[u][v] = H[v][u] = w
def clear (v, neighbours):
for u in G[v]:
if u not in neighbours:
H[u][v] = H[v][u] = 0 | apache-2.0 |
rubenfonseca/titanium-dropboxsync | build.py | 1 | 6556 | #!/usr/bin/env python
#
# Appcelerator Titanium Module Packager
#
#
import os, subprocess, sys, glob, string
import zipfile
from datetime import date
cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
os.chdir(cwd)
required_module_keys = ['name','version','moduleid','description','copyright','license','copyright','platform','minsdk']
module_defaults = {
'description':'My module',
'author': 'Your Name',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year),
}
module_license_default = "TODO: place your license here and we'll include it in the module distribution"
def find_sdk(config):
sdk = config['TITANIUM_SDK']
return os.path.expandvars(os.path.expanduser(sdk))
def replace_vars(config,token):
idx = token.find('$(')
while idx != -1:
idx2 = token.find(')',idx+2)
if idx2 == -1: break
key = token[idx+2:idx2]
if not config.has_key(key): break
token = token.replace('$(%s)' % key, config[key])
idx = token.find('$(')
return token
def read_ti_xcconfig():
contents = open(os.path.join(cwd,'titanium.xcconfig')).read()
config = {}
for line in contents.splitlines(False):
line = line.strip()
if line[0:2]=='//': continue
idx = line.find('=')
if idx > 0:
key = line[0:idx].strip()
value = line[idx+1:].strip()
config[key] = replace_vars(config,value)
return config
def generate_doc(config):
docdir = os.path.join(cwd,'documentation')
if not os.path.exists(docdir):
print "Couldn't find documentation file at: %s" % docdir
return None
try:
import markdown2 as markdown
except ImportError:
import markdown
documentation = []
for file in os.listdir(docdir):
if file in ignoreFiles or os.path.isdir(os.path.join(docdir, file)):
continue
md = open(os.path.join(docdir,file)).read()
html = markdown.markdown(md)
documentation.append({file:html});
return documentation
def compile_js(manifest,config):
js_file = os.path.join(cwd,'assets','com.0x82.dropboxsync.js')
if not os.path.exists(js_file): return
from compiler import Compiler
try:
import json
except:
import simplejson as json
path = os.path.basename(js_file)
compiler = Compiler(cwd, manifest['moduleid'], manifest['name'], 'commonjs')
method = compiler.compile_commonjs_file(path,js_file)
exports = open('metadata.json','w')
json.dump({'exports':compiler.exports }, exports)
exports.close()
method += '\treturn filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[0]);'
f = os.path.join(cwd,'Classes','Com0x82DropboxsyncModuleAssets.m')
c = open(f).read()
templ_search = ' moduleAsset\n{\n'
idx = c.find(templ_search) + len(templ_search)
before = c[0:idx]
after = """
}
@end
"""
newc = before + method + after
if newc!=c:
x = open(f,'w')
x.write(newc)
x.close()
def die(msg):
print msg
sys.exit(1)
def warn(msg):
print "[WARN] %s" % msg
def validate_license():
c = open(os.path.join(cwd,'LICENSE')).read()
if c.find(module_license_default)!=-1:
warn('please update the LICENSE file with your license text before distributing')
def validate_manifest():
path = os.path.join(cwd,'manifest')
f = open(path)
if not os.path.exists(path): die("missing %s" % path)
manifest = {}
for line in f.readlines():
line = line.strip()
if line[0:1]=='#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
manifest[key.strip()]=value.strip()
for key in required_module_keys:
if not manifest.has_key(key): die("missing required manifest key '%s'" % key)
if module_defaults.has_key(key):
defvalue = module_defaults[key]
curvalue = manifest[key]
if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key)
return manifest,path
ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README','com.0x82.dropboxsync.js']
ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT']
def zip_dir(zf,dir,basepath,ignore=[]):
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles: continue
e = os.path.splitext(file)
if len(e)==2 and e[1]=='.pyc':continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, basepath, 1)
zf.write(from_, to_)
def glob_libfiles():
files = []
for libfile in glob.glob('build/**/*.a'):
if libfile.find('Release-')!=-1:
files.append(libfile)
return files
def build_module(manifest,config):
from tools import ensure_dev_path
ensure_dev_path()
os.system("rm -rf build/")
rc = os.system("xcodebuild -sdk iphoneos -configuration Release")
if rc != 0:
die("xcodebuild failed")
rc = os.system("xcodebuild -sdk iphonesimulator -configuration Release")
if rc != 0:
die("xcodebuild failed")
# build the merged library using lipo
moduleid = manifest['moduleid']
libpaths = ''
for libfile in glob_libfiles():
libpaths+='%s ' % libfile
os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid))
def package_module(manifest,mf,config):
name = manifest['name'].lower()
moduleid = manifest['moduleid'].lower()
version = manifest['version']
modulezip = '%s-iphone-%s.zip' % (moduleid,version)
if os.path.exists(modulezip): os.remove(modulezip)
zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED)
modulepath = 'modules/iphone/%s/%s' % (moduleid,version)
zf.write(mf,'%s/manifest' % modulepath)
libname = 'lib%s.a' % moduleid
zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname))
docs = generate_doc(config)
if docs!=None:
for doc in docs:
for file, html in doc.iteritems():
filename = string.replace(file,'.md','.html')
zf.writestr('%s/documentation/%s'%(modulepath,filename),html)
for dn in ('assets','example','platform'):
if os.path.exists(dn):
zip_dir(zf,dn,'%s/%s' % (modulepath,dn),['README'])
zf.write('LICENSE','%s/LICENSE' % modulepath)
zf.write('module.xcconfig','%s/module.xcconfig' % modulepath)
exports_file = 'metadata.json'
if os.path.exists(exports_file):
zf.write(exports_file, '%s/%s' % (modulepath, exports_file))
zf.close()
if __name__ == '__main__':
manifest,mf = validate_manifest()
validate_license()
config = read_ti_xcconfig()
sdk = find_sdk(config)
sys.path.insert(0,os.path.join(sdk,'iphone'))
sys.path.append(os.path.join(sdk, "common"))
compile_js(manifest,config)
build_module(manifest,config)
package_module(manifest,mf,config)
sys.exit(0)
| mit |
HoracioAlvarado/fwd | venv/Lib/site-packages/pip/utils/deprecation.py | 23 | 2282 | """
A module that implments tooling to enable easy warnings about deprecations.
"""
from __future__ import absolute_import
import logging
import warnings
class PipDeprecationWarning(Warning):
pass
class RemovedInPip9Warning(PipDeprecationWarning, DeprecationWarning):
pass
class RemovedInPip10Warning(PipDeprecationWarning, PendingDeprecationWarning):
pass
class Python26DeprecationWarning(
PipDeprecationWarning, PendingDeprecationWarning
):
pass
DEPRECATIONS = [
RemovedInPip9Warning, RemovedInPip10Warning, Python26DeprecationWarning
]
# Warnings <-> Logging Integration
_warnings_showwarning = None
def _showwarning(message, category, filename, lineno, file=None, line=None):
if file is not None:
if _warnings_showwarning is not None:
_warnings_showwarning(
message, category, filename, lineno, file, line,
)
else:
if issubclass(category, PipDeprecationWarning):
# We use a specially named logger which will handle all of the
# deprecation messages for pip.
logger = logging.getLogger("pip.deprecations")
# This is purposely using the % formatter here instead of letting
# the logging module handle the interpolation. This is because we
# want it to appear as if someone typed this entire message out.
log_message = "DEPRECATION: %s" % message
# Things that are DeprecationWarnings will be removed in the very
# next version of pip. We want these to be more obvious so we
# use the ERROR logging level while the PendingDeprecationWarnings
# are still have at least 2 versions to go until they are removed
# so they can just be warnings.
if issubclass(category, DeprecationWarning):
logger.error(log_message)
else:
logger.warning(log_message)
else:
_warnings_showwarning(
message, category, filename, lineno, file, line,
)
def install_warning_logger():
global _warnings_showwarning
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = _showwarning
| mit |
sestrella/ansible | lib/ansible/modules/network/fortios/fortios_system_replacemsg_admin.py | 13 | 10079 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_system_replacemsg_admin
short_description: Replacement messages in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify system_replacemsg feature and admin category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
system_replacemsg_admin:
description:
- Replacement messages.
default: null
type: dict
suboptions:
buffer:
description:
- Message string.
type: str
format:
description:
- Format flag.
type: str
choices:
- none
- text
- html
- wml
header:
description:
- Header flag.
type: str
choices:
- none
- http
- 8bit
msg_type:
description:
- Message type.
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Replacement messages.
fortios_system_replacemsg_admin:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
system_replacemsg_admin:
buffer: "<your_own_value>"
format: "none"
header: "none"
msg_type: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_system_replacemsg_admin_data(json):
option_list = ['buffer', 'format', 'header',
'msg_type']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def system_replacemsg_admin(data, fos):
vdom = data['vdom']
state = data['state']
system_replacemsg_admin_data = data['system_replacemsg_admin']
filtered_data = underscore_to_hyphen(filter_system_replacemsg_admin_data(system_replacemsg_admin_data))
if state == "present":
return fos.set('system.replacemsg',
'admin',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('system.replacemsg',
'admin',
mkey=filtered_data['msg-type'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_system_replacemsg(data, fos):
if data['system_replacemsg_admin']:
resp = system_replacemsg_admin(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"system_replacemsg_admin": {
"required": False, "type": "dict", "default": None,
"options": {
"buffer": {"required": False, "type": "str"},
"format": {"required": False, "type": "str",
"choices": ["none", "text", "html",
"wml"]},
"header": {"required": False, "type": "str",
"choices": ["none", "http", "8bit"]},
"msg_type": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_system_replacemsg(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_system_replacemsg(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
aliyun/oss-ftp | python27/win32/Lib/email/generator.py | 64 | 14203 | # Copyright (C) 2001-2010 Python Software Foundation
# Contact: [email protected]
"""Classes to generate plain text from a message object tree."""
__all__ = ['Generator', 'DecodedGenerator']
import re
import sys
import time
import random
import warnings
from cStringIO import StringIO
from email.header import Header
UNDERSCORE = '_'
NL = '\n'
fcre = re.compile(r'^From ', re.MULTILINE)
def _is8bitstring(s):
if isinstance(s, str):
try:
unicode(s, 'us-ascii')
except UnicodeError:
return True
return False
class Generator:
"""Generates output from a Message object tree.
This basic generator writes the message to the given file object as plain
text.
"""
#
# Public interface
#
def __init__(self, outfp, mangle_from_=True, maxheaderlen=78):
"""Create the generator for message flattening.
outfp is the output file-like object for writing the message to. It
must have a write() method.
Optional mangle_from_ is a flag that, when True (the default), escapes
From_ lines in the body of the message by putting a `>' in front of
them.
Optional maxheaderlen specifies the longest length for a non-continued
header. When a header line is longer (in characters, with tabs
expanded to 8 spaces) than maxheaderlen, the header will split as
defined in the Header class. Set maxheaderlen to zero to disable
header wrapping. The default is 78, as recommended (but not required)
by RFC 2822.
"""
self._fp = outfp
self._mangle_from_ = mangle_from_
self._maxheaderlen = maxheaderlen
def write(self, s):
# Just delegate to the file object
self._fp.write(s)
def flatten(self, msg, unixfrom=False):
"""Print the message object tree rooted at msg to the output file
specified when the Generator instance was created.
unixfrom is a flag that forces the printing of a Unix From_ delimiter
before the first object in the message tree. If the original message
has no From_ delimiter, a `standard' one is crafted. By default, this
is False to inhibit the printing of any From_ delimiter.
Note that for subobjects, no From_ line is printed.
"""
if unixfrom:
ufrom = msg.get_unixfrom()
if not ufrom:
ufrom = 'From nobody ' + time.ctime(time.time())
print >> self._fp, ufrom
self._write(msg)
def clone(self, fp):
"""Clone this generator with the exact same options."""
return self.__class__(fp, self._mangle_from_, self._maxheaderlen)
#
# Protected interface - undocumented ;/
#
def _write(self, msg):
# We can't write the headers yet because of the following scenario:
# say a multipart message includes the boundary string somewhere in
# its body. We'd have to calculate the new boundary /before/ we write
# the headers so that we can write the correct Content-Type:
# parameter.
#
# The way we do this, so as to make the _handle_*() methods simpler,
# is to cache any subpart writes into a StringIO. The we write the
# headers and the StringIO contents. That way, subpart handlers can
# Do The Right Thing, and can still modify the Content-Type: header if
# necessary.
oldfp = self._fp
try:
self._fp = sfp = StringIO()
self._dispatch(msg)
finally:
self._fp = oldfp
# Write the headers. First we see if the message object wants to
# handle that itself. If not, we'll do it generically.
meth = getattr(msg, '_write_headers', None)
if meth is None:
self._write_headers(msg)
else:
meth(self)
self._fp.write(sfp.getvalue())
def _dispatch(self, msg):
# Get the Content-Type: for the message, then try to dispatch to
# self._handle_<maintype>_<subtype>(). If there's no handler for the
# full MIME type, then dispatch to self._handle_<maintype>(). If
# that's missing too, then dispatch to self._writeBody().
main = msg.get_content_maintype()
sub = msg.get_content_subtype()
specific = UNDERSCORE.join((main, sub)).replace('-', '_')
meth = getattr(self, '_handle_' + specific, None)
if meth is None:
generic = main.replace('-', '_')
meth = getattr(self, '_handle_' + generic, None)
if meth is None:
meth = self._writeBody
meth(msg)
#
# Default handlers
#
def _write_headers(self, msg):
for h, v in msg.items():
print >> self._fp, '%s:' % h,
if self._maxheaderlen == 0:
# Explicit no-wrapping
print >> self._fp, v
elif isinstance(v, Header):
# Header instances know what to do
print >> self._fp, v.encode()
elif _is8bitstring(v):
# If we have raw 8bit data in a byte string, we have no idea
# what the encoding is. There is no safe way to split this
# string. If it's ascii-subset, then we could do a normal
# ascii split, but if it's multibyte then we could break the
# string. There's no way to know so the least harm seems to
# be to not split the string and risk it being too long.
print >> self._fp, v
else:
# Header's got lots of smarts, so use it. Note that this is
# fundamentally broken though because we lose idempotency when
# the header string is continued with tabs. It will now be
# continued with spaces. This was reversedly broken before we
# fixed bug 1974. Either way, we lose.
print >> self._fp, Header(
v, maxlinelen=self._maxheaderlen, header_name=h).encode()
# A blank line always separates headers from body
print >> self._fp
#
# Handlers for writing types and subtypes
#
def _handle_text(self, msg):
payload = msg.get_payload()
if payload is None:
return
if not isinstance(payload, basestring):
raise TypeError('string payload expected: %s' % type(payload))
if self._mangle_from_:
payload = fcre.sub('>From ', payload)
self._fp.write(payload)
# Default body handler
_writeBody = _handle_text
def _handle_multipart(self, msg):
# The trick here is to write out each part separately, merge them all
# together, and then make sure that the boundary we've chosen isn't
# present in the payload.
msgtexts = []
subparts = msg.get_payload()
if subparts is None:
subparts = []
elif isinstance(subparts, basestring):
# e.g. a non-strict parse of a message with no starting boundary.
self._fp.write(subparts)
return
elif not isinstance(subparts, list):
# Scalar payload
subparts = [subparts]
for part in subparts:
s = StringIO()
g = self.clone(s)
g.flatten(part, unixfrom=False)
msgtexts.append(s.getvalue())
# BAW: What about boundaries that are wrapped in double-quotes?
boundary = msg.get_boundary()
if not boundary:
# Create a boundary that doesn't appear in any of the
# message texts.
alltext = NL.join(msgtexts)
boundary = _make_boundary(alltext)
msg.set_boundary(boundary)
# If there's a preamble, write it out, with a trailing CRLF
if msg.preamble is not None:
if self._mangle_from_:
preamble = fcre.sub('>From ', msg.preamble)
else:
preamble = msg.preamble
print >> self._fp, preamble
# dash-boundary transport-padding CRLF
print >> self._fp, '--' + boundary
# body-part
if msgtexts:
self._fp.write(msgtexts.pop(0))
# *encapsulation
# --> delimiter transport-padding
# --> CRLF body-part
for body_part in msgtexts:
# delimiter transport-padding CRLF
print >> self._fp, '\n--' + boundary
# body-part
self._fp.write(body_part)
# close-delimiter transport-padding
self._fp.write('\n--' + boundary + '--' + NL)
if msg.epilogue is not None:
if self._mangle_from_:
epilogue = fcre.sub('>From ', msg.epilogue)
else:
epilogue = msg.epilogue
self._fp.write(epilogue)
def _handle_multipart_signed(self, msg):
# The contents of signed parts has to stay unmodified in order to keep
# the signature intact per RFC1847 2.1, so we disable header wrapping.
# RDM: This isn't enough to completely preserve the part, but it helps.
old_maxheaderlen = self._maxheaderlen
try:
self._maxheaderlen = 0
self._handle_multipart(msg)
finally:
self._maxheaderlen = old_maxheaderlen
def _handle_message_delivery_status(self, msg):
# We can't just write the headers directly to self's file object
# because this will leave an extra newline between the last header
# block and the boundary. Sigh.
blocks = []
for part in msg.get_payload():
s = StringIO()
g = self.clone(s)
g.flatten(part, unixfrom=False)
text = s.getvalue()
lines = text.split('\n')
# Strip off the unnecessary trailing empty line
if lines and lines[-1] == '':
blocks.append(NL.join(lines[:-1]))
else:
blocks.append(text)
# Now join all the blocks with an empty line. This has the lovely
# effect of separating each block with an empty line, but not adding
# an extra one after the last one.
self._fp.write(NL.join(blocks))
def _handle_message(self, msg):
s = StringIO()
g = self.clone(s)
# The payload of a message/rfc822 part should be a multipart sequence
# of length 1. The zeroth element of the list should be the Message
# object for the subpart. Extract that object, stringify it, and
# write it out.
# Except, it turns out, when it's a string instead, which happens when
# and only when HeaderParser is used on a message of mime type
# message/rfc822. Such messages are generated by, for example,
# Groupwise when forwarding unadorned messages. (Issue 7970.) So
# in that case we just emit the string body.
payload = msg.get_payload()
if isinstance(payload, list):
g.flatten(msg.get_payload(0), unixfrom=False)
payload = s.getvalue()
self._fp.write(payload)
_FMT = '[Non-text (%(type)s) part of message omitted, filename %(filename)s]'
class DecodedGenerator(Generator):
"""Generates a text representation of a message.
Like the Generator base class, except that non-text parts are substituted
with a format string representing the part.
"""
def __init__(self, outfp, mangle_from_=True, maxheaderlen=78, fmt=None):
"""Like Generator.__init__() except that an additional optional
argument is allowed.
Walks through all subparts of a message. If the subpart is of main
type `text', then it prints the decoded payload of the subpart.
Otherwise, fmt is a format string that is used instead of the message
payload. fmt is expanded with the following keywords (in
%(keyword)s format):
type : Full MIME type of the non-text part
maintype : Main MIME type of the non-text part
subtype : Sub-MIME type of the non-text part
filename : Filename of the non-text part
description: Description associated with the non-text part
encoding : Content transfer encoding of the non-text part
The default value for fmt is None, meaning
[Non-text (%(type)s) part of message omitted, filename %(filename)s]
"""
Generator.__init__(self, outfp, mangle_from_, maxheaderlen)
if fmt is None:
self._fmt = _FMT
else:
self._fmt = fmt
def _dispatch(self, msg):
for part in msg.walk():
maintype = part.get_content_maintype()
if maintype == 'text':
print >> self, part.get_payload(decode=True)
elif maintype == 'multipart':
# Just skip this
pass
else:
print >> self, self._fmt % {
'type' : part.get_content_type(),
'maintype' : part.get_content_maintype(),
'subtype' : part.get_content_subtype(),
'filename' : part.get_filename('[no filename]'),
'description': part.get('Content-Description',
'[no description]'),
'encoding' : part.get('Content-Transfer-Encoding',
'[no encoding]'),
}
# Helper
_width = len(repr(sys.maxint-1))
_fmt = '%%0%dd' % _width
def _make_boundary(text=None):
# Craft a random boundary. If text is given, ensure that the chosen
# boundary doesn't appear in the text.
token = random.randrange(sys.maxint)
boundary = ('=' * 15) + (_fmt % token) + '=='
if text is None:
return boundary
b = boundary
counter = 0
while True:
cre = re.compile('^--' + re.escape(b) + '(--)?$', re.MULTILINE)
if not cre.search(text):
break
b = boundary + '.' + str(counter)
counter += 1
return b
| mit |
Big-B702/python-for-android | python-modules/twisted/twisted/internet/unix.py | 49 | 11359 | # -*- test-case-name: twisted.test.test_unix,twisted.internet.test.test_unix,twisted.internet.test.test_posixbase -*-
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Various asynchronous TCP/IP classes.
End users shouldn't use this module directly - use the reactor APIs instead.
Maintainer: Itamar Shtull-Trauring
"""
# System imports
import os, sys, stat, socket
from errno import EINTR, EMSGSIZE, EAGAIN, EWOULDBLOCK, ECONNREFUSED
from zope.interface import implements, implementsOnly, implementedBy
if not hasattr(socket, 'AF_UNIX'):
raise ImportError("UNIX sockets not supported on this platform")
# Twisted imports
from twisted.internet import base, tcp, udp, error, interfaces, protocol, address
from twisted.internet.error import CannotListenError
from twisted.python import lockfile, log, reflect, failure
class Server(tcp.Server):
def __init__(self, sock, protocol, client, server, sessionno, reactor):
tcp.Server.__init__(self, sock, protocol, (client, None), server, sessionno, reactor)
def getHost(self):
return address.UNIXAddress(self.socket.getsockname())
def getPeer(self):
return address.UNIXAddress(self.hostname)
def _inFilesystemNamespace(path):
"""
Determine whether the given unix socket path is in a filesystem namespace.
While most PF_UNIX sockets are entries in the filesystem, Linux 2.2 and
above support PF_UNIX sockets in an "abstract namespace" that does not
correspond to any path. This function returns C{True} if the given socket
path is stored in the filesystem and C{False} if the path is in this
abstract namespace.
"""
return path[:1] != "\0"
class _UNIXPort(object):
def getHost(self):
"""Returns a UNIXAddress.
This indicates the server's address.
"""
if sys.version_info > (2, 5) or _inFilesystemNamespace(self.port):
path = self.socket.getsockname()
else:
# Abstract namespace sockets aren't well supported on Python 2.4.
# getsockname() always returns ''.
path = self.port
return address.UNIXAddress(path)
class Port(_UNIXPort, tcp.Port):
addressFamily = socket.AF_UNIX
socketType = socket.SOCK_STREAM
transport = Server
lockFile = None
def __init__(self, fileName, factory, backlog=50, mode=0666, reactor=None, wantPID = 0):
tcp.Port.__init__(self, fileName, factory, backlog, reactor=reactor)
self.mode = mode
self.wantPID = wantPID
def __repr__(self):
factoryName = reflect.qual(self.factory.__class__)
if hasattr(self, 'socket'):
return '<%s on %r>' % (factoryName, self.port)
else:
return '<%s (not listening)>' % (factoryName,)
def _buildAddr(self, name):
return address.UNIXAddress(name)
def startListening(self):
"""Create and bind my socket, and begin listening on it.
This is called on unserialization, and must be called after creating a
server to begin listening on the specified port.
"""
log.msg("%s starting on %r" % (self.factory.__class__, repr(self.port)))
if self.wantPID:
self.lockFile = lockfile.FilesystemLock(self.port + ".lock")
if not self.lockFile.lock():
raise CannotListenError, (None, self.port, "Cannot acquire lock")
else:
if not self.lockFile.clean:
try:
# This is a best-attempt at cleaning up
# left-over unix sockets on the filesystem.
# If it fails, there's not much else we can
# do. The bind() below will fail with an
# exception that actually propegates.
if stat.S_ISSOCK(os.stat(self.port).st_mode):
os.remove(self.port)
except:
pass
self.factory.doStart()
try:
skt = self.createInternetSocket()
skt.bind(self.port)
except socket.error, le:
raise CannotListenError, (None, self.port, le)
else:
if _inFilesystemNamespace(self.port):
# Make the socket readable and writable to the world.
os.chmod(self.port, self.mode)
skt.listen(self.backlog)
self.connected = True
self.socket = skt
self.fileno = self.socket.fileno
self.numberAccepts = 100
self.startReading()
def _logConnectionLostMsg(self):
"""
Log message for closing socket
"""
log.msg('(UNIX Port %s Closed)' % (repr(self.port),))
def connectionLost(self, reason):
if _inFilesystemNamespace(self.port):
os.unlink(self.port)
if self.lockFile is not None:
self.lockFile.unlock()
tcp.Port.connectionLost(self, reason)
class Client(tcp.BaseClient):
"""A client for Unix sockets."""
addressFamily = socket.AF_UNIX
socketType = socket.SOCK_STREAM
def __init__(self, filename, connector, reactor=None, checkPID = 0):
self.connector = connector
self.realAddress = self.addr = filename
if checkPID and not lockfile.isLocked(filename + ".lock"):
self._finishInit(None, None, error.BadFileError(filename), reactor)
self._finishInit(self.doConnect, self.createInternetSocket(),
None, reactor)
def getPeer(self):
return address.UNIXAddress(self.addr)
def getHost(self):
return address.UNIXAddress(None)
class Connector(base.BaseConnector):
def __init__(self, address, factory, timeout, reactor, checkPID):
base.BaseConnector.__init__(self, factory, timeout, reactor)
self.address = address
self.checkPID = checkPID
def _makeTransport(self):
return Client(self.address, self, self.reactor, self.checkPID)
def getDestination(self):
return address.UNIXAddress(self.address)
class DatagramPort(_UNIXPort, udp.Port):
"""Datagram UNIX port, listening for packets."""
implements(interfaces.IUNIXDatagramTransport)
addressFamily = socket.AF_UNIX
def __init__(self, addr, proto, maxPacketSize=8192, mode=0666, reactor=None):
"""Initialize with address to listen on.
"""
udp.Port.__init__(self, addr, proto, maxPacketSize=maxPacketSize, reactor=reactor)
self.mode = mode
def __repr__(self):
protocolName = reflect.qual(self.protocol.__class__,)
if hasattr(self, 'socket'):
return '<%s on %r>' % (protocolName, self.port)
else:
return '<%s (not listening)>' % (protocolName,)
def _bindSocket(self):
log.msg("%s starting on %s"%(self.protocol.__class__, repr(self.port)))
try:
skt = self.createInternetSocket() # XXX: haha misnamed method
if self.port:
skt.bind(self.port)
except socket.error, le:
raise error.CannotListenError, (None, self.port, le)
if self.port and _inFilesystemNamespace(self.port):
# Make the socket readable and writable to the world.
os.chmod(self.port, self.mode)
self.connected = 1
self.socket = skt
self.fileno = self.socket.fileno
def write(self, datagram, address):
"""Write a datagram."""
try:
return self.socket.sendto(datagram, address)
except socket.error, se:
no = se.args[0]
if no == EINTR:
return self.write(datagram, address)
elif no == EMSGSIZE:
raise error.MessageLengthError, "message too long"
elif no == EAGAIN:
# oh, well, drop the data. The only difference from UDP
# is that UDP won't ever notice.
# TODO: add TCP-like buffering
pass
else:
raise
def connectionLost(self, reason=None):
"""Cleans up my socket.
"""
log.msg('(Port %s Closed)' % repr(self.port))
base.BasePort.connectionLost(self, reason)
if hasattr(self, "protocol"):
# we won't have attribute in ConnectedPort, in cases
# where there was an error in connection process
self.protocol.doStop()
self.connected = 0
self.socket.close()
del self.socket
del self.fileno
if hasattr(self, "d"):
self.d.callback(None)
del self.d
def setLogStr(self):
self.logstr = reflect.qual(self.protocol.__class__) + " (UDP)"
class ConnectedDatagramPort(DatagramPort):
"""
A connected datagram UNIX socket.
"""
implementsOnly(interfaces.IUNIXDatagramConnectedTransport,
*(implementedBy(base.BasePort)))
def __init__(self, addr, proto, maxPacketSize=8192, mode=0666,
bindAddress=None, reactor=None):
assert isinstance(proto, protocol.ConnectedDatagramProtocol)
DatagramPort.__init__(self, bindAddress, proto, maxPacketSize, mode,
reactor)
self.remoteaddr = addr
def startListening(self):
try:
self._bindSocket()
self.socket.connect(self.remoteaddr)
self._connectToProtocol()
except:
self.connectionFailed(failure.Failure())
def connectionFailed(self, reason):
"""
Called when a connection fails. Stop listening on the socket.
@type reason: L{Failure}
@param reason: Why the connection failed.
"""
self.stopListening()
self.protocol.connectionFailed(reason)
del self.protocol
def doRead(self):
"""
Called when my socket is ready for reading.
"""
read = 0
while read < self.maxThroughput:
try:
data, addr = self.socket.recvfrom(self.maxPacketSize)
read += len(data)
self.protocol.datagramReceived(data)
except socket.error, se:
no = se.args[0]
if no in (EAGAIN, EINTR, EWOULDBLOCK):
return
if no == ECONNREFUSED:
self.protocol.connectionRefused()
else:
raise
except:
log.deferr()
def write(self, data):
"""
Write a datagram.
"""
try:
return self.socket.send(data)
except socket.error, se:
no = se.args[0]
if no == EINTR:
return self.write(data)
elif no == EMSGSIZE:
raise error.MessageLengthError, "message too long"
elif no == ECONNREFUSED:
self.protocol.connectionRefused()
elif no == EAGAIN:
# oh, well, drop the data. The only difference from UDP
# is that UDP won't ever notice.
# TODO: add TCP-like buffering
pass
else:
raise
def getPeer(self):
return address.UNIXAddress(self.remoteaddr)
| apache-2.0 |
vladmm/intellij-community | python/lib/Lib/smtplib.py | 87 | 27148 | #! /usr/bin/env python
'''SMTP/ESMTP client class.
This should follow RFC 821 (SMTP), RFC 1869 (ESMTP), RFC 2554 (SMTP
Authentication) and RFC 2487 (Secure SMTP over TLS).
Notes:
Please remember, when doing ESMTP, that the names of the SMTP service
extensions are NOT the same thing as the option keywords for the RCPT
and MAIL commands!
Example:
>>> import smtplib
>>> s=smtplib.SMTP("localhost")
>>> print s.help()
This is Sendmail version 8.8.4
Topics:
HELO EHLO MAIL RCPT DATA
RSET NOOP QUIT HELP VRFY
EXPN VERB ETRN DSN
For more info use "HELP <topic>".
To report bugs in the implementation send email to
[email protected].
For local information send email to Postmaster at your site.
End of HELP info
>>> s.putcmd("vrfy","someone@here")
>>> s.getreply()
(250, "Somebody OverHere <[email protected]>")
>>> s.quit()
'''
# Author: The Dragon De Monsyne <[email protected]>
# ESMTP support, test code and doc fixes added by
# Eric S. Raymond <[email protected]>
# Better RFC 821 compliance (MAIL and RCPT, and CRLF in data)
# by Carey Evans <[email protected]>, for picky mail servers.
# RFC 2554 (authentication) support by Gerhard Haering <[email protected]>.
#
# This was modified from the Python 1.5 library HTTP lib.
import socket
import re
import email.Utils
import base64
import hmac
from email.base64MIME import encode as encode_base64
from sys import stderr
__all__ = ["SMTPException","SMTPServerDisconnected","SMTPResponseException",
"SMTPSenderRefused","SMTPRecipientsRefused","SMTPDataError",
"SMTPConnectError","SMTPHeloError","SMTPAuthenticationError",
"quoteaddr","quotedata","SMTP"]
SMTP_PORT = 25
CRLF="\r\n"
OLDSTYLE_AUTH = re.compile(r"auth=(.*)", re.I)
# Exception classes used by this module.
class SMTPException(Exception):
"""Base class for all exceptions raised by this module."""
class SMTPServerDisconnected(SMTPException):
"""Not connected to any SMTP server.
This exception is raised when the server unexpectedly disconnects,
or when an attempt is made to use the SMTP instance before
connecting it to a server.
"""
class SMTPResponseException(SMTPException):
"""Base class for all exceptions that include an SMTP error code.
These exceptions are generated in some instances when the SMTP
server returns an error code. The error code is stored in the
`smtp_code' attribute of the error, and the `smtp_error' attribute
is set to the error message.
"""
def __init__(self, code, msg):
self.smtp_code = code
self.smtp_error = msg
self.args = (code, msg)
class SMTPSenderRefused(SMTPResponseException):
"""Sender address refused.
In addition to the attributes set by on all SMTPResponseException
exceptions, this sets `sender' to the string that the SMTP refused.
"""
def __init__(self, code, msg, sender):
self.smtp_code = code
self.smtp_error = msg
self.sender = sender
self.args = (code, msg, sender)
class SMTPRecipientsRefused(SMTPException):
"""All recipient addresses refused.
The errors for each recipient are accessible through the attribute
'recipients', which is a dictionary of exactly the same sort as
SMTP.sendmail() returns.
"""
def __init__(self, recipients):
self.recipients = recipients
self.args = ( recipients,)
class SMTPDataError(SMTPResponseException):
"""The SMTP server didn't accept the data."""
class SMTPConnectError(SMTPResponseException):
"""Error during connection establishment."""
class SMTPHeloError(SMTPResponseException):
"""The server refused our HELO reply."""
class SMTPAuthenticationError(SMTPResponseException):
"""Authentication error.
Most probably the server didn't accept the username/password
combination provided.
"""
class SSLFakeSocket:
"""A fake socket object that really wraps a SSLObject.
It only supports what is needed in smtplib.
"""
def __init__(self, realsock, sslobj):
self.realsock = realsock
self.sslobj = sslobj
def send(self, str):
self.sslobj.write(str)
return len(str)
sendall = send
def close(self):
self.realsock.close()
class SSLFakeFile:
"""A fake file like object that really wraps a SSLObject.
It only supports what is needed in smtplib.
"""
def __init__(self, sslobj):
self.sslobj = sslobj
def readline(self):
str = ""
chr = None
while chr != "\n":
chr = self.sslobj.read(1)
str += chr
return str
def close(self):
pass
def quoteaddr(addr):
"""Quote a subset of the email addresses defined by RFC 821.
Should be able to handle anything rfc822.parseaddr can handle.
"""
m = (None, None)
try:
m = email.Utils.parseaddr(addr)[1]
except AttributeError:
pass
if m == (None, None): # Indicates parse failure or AttributeError
# something weird here.. punt -ddm
return "<%s>" % addr
elif m is None:
# the sender wants an empty return address
return "<>"
else:
return "<%s>" % m
def quotedata(data):
"""Quote data for email.
Double leading '.', and change Unix newline '\\n', or Mac '\\r' into
Internet CRLF end-of-line.
"""
return re.sub(r'(?m)^\.', '..',
re.sub(r'(?:\r\n|\n|\r(?!\n))', CRLF, data))
class SMTP:
"""This class manages a connection to an SMTP or ESMTP server.
SMTP Objects:
SMTP objects have the following attributes:
helo_resp
This is the message given by the server in response to the
most recent HELO command.
ehlo_resp
This is the message given by the server in response to the
most recent EHLO command. This is usually multiline.
does_esmtp
This is a True value _after you do an EHLO command_, if the
server supports ESMTP.
esmtp_features
This is a dictionary, which, if the server supports ESMTP,
will _after you do an EHLO command_, contain the names of the
SMTP service extensions this server supports, and their
parameters (if any).
Note, all extension names are mapped to lower case in the
dictionary.
See each method's docstrings for details. In general, there is a
method of the same name to perform each SMTP command. There is also a
method called 'sendmail' that will do an entire mail transaction.
"""
debuglevel = 0
file = None
helo_resp = None
ehlo_resp = None
does_esmtp = 0
def __init__(self, host = '', port = 0, local_hostname = None):
"""Initialize a new instance.
If specified, `host' is the name of the remote host to which to
connect. If specified, `port' specifies the port to which to connect.
By default, smtplib.SMTP_PORT is used. An SMTPConnectError is raised
if the specified `host' doesn't respond correctly. If specified,
`local_hostname` is used as the FQDN of the local host. By default,
the local hostname is found using socket.getfqdn().
"""
self.esmtp_features = {}
if host:
(code, msg) = self.connect(host, port)
if code != 220:
raise SMTPConnectError(code, msg)
if local_hostname is not None:
self.local_hostname = local_hostname
else:
# RFC 2821 says we should use the fqdn in the EHLO/HELO verb, and
# if that can't be calculated, that we should use a domain literal
# instead (essentially an encoded IP address like [A.B.C.D]).
fqdn = socket.getfqdn()
if '.' in fqdn:
self.local_hostname = fqdn
else:
# We can't find an fqdn hostname, so use a domain literal
addr = '127.0.0.1'
try:
addr = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
pass
self.local_hostname = '[%s]' % addr
def set_debuglevel(self, debuglevel):
"""Set the debug output level.
A non-false value results in debug messages for connection and for all
messages sent to and received from the server.
"""
self.debuglevel = debuglevel
def connect(self, host='localhost', port = 0):
"""Connect to a host on a given port.
If the hostname ends with a colon (`:') followed by a number, and
there is no port specified, that suffix will be stripped off and the
number interpreted as the port number to use.
Note: This method is automatically invoked by __init__, if a host is
specified during instantiation.
"""
if not port and (host.find(':') == host.rfind(':')):
i = host.rfind(':')
if i >= 0:
host, port = host[:i], host[i+1:]
try: port = int(port)
except ValueError:
raise socket.error, "nonnumeric port"
if not port: port = SMTP_PORT
if self.debuglevel > 0: print>>stderr, 'connect:', (host, port)
msg = "getaddrinfo returns an empty list"
self.sock = None
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
self.sock = socket.socket(af, socktype, proto)
if self.debuglevel > 0: print>>stderr, 'connect:', sa
self.sock.connect(sa)
except socket.error, msg:
if self.debuglevel > 0: print>>stderr, 'connect fail:', msg
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
(code, msg) = self.getreply()
if self.debuglevel > 0: print>>stderr, "connect:", msg
return (code, msg)
def send(self, str):
"""Send `str' to the server."""
if self.debuglevel > 0: print>>stderr, 'send:', repr(str)
if hasattr(self, 'sock') and self.sock:
try:
self.sock.sendall(str)
except socket.error:
self.close()
raise SMTPServerDisconnected('Server not connected')
else:
raise SMTPServerDisconnected('please run connect() first')
def putcmd(self, cmd, args=""):
"""Send a command to the server."""
if args == "":
str = '%s%s' % (cmd, CRLF)
else:
str = '%s %s%s' % (cmd, args, CRLF)
self.send(str)
def getreply(self):
"""Get a reply from the server.
Returns a tuple consisting of:
- server response code (e.g. '250', or such, if all goes well)
Note: returns -1 if it can't read response code.
- server response string corresponding to response code (multiline
responses are converted to a single, multiline string).
Raises SMTPServerDisconnected if end-of-file is reached.
"""
resp=[]
if self.file is None:
self.file = self.sock.makefile('rb')
while 1:
line = self.file.readline()
if line == '':
self.close()
raise SMTPServerDisconnected("Connection unexpectedly closed")
if self.debuglevel > 0: print>>stderr, 'reply:', repr(line)
resp.append(line[4:].strip())
code=line[:3]
# Check that the error code is syntactically correct.
# Don't attempt to read a continuation line if it is broken.
try:
errcode = int(code)
except ValueError:
errcode = -1
break
# Check if multiline response.
if line[3:4]!="-":
break
errmsg = "\n".join(resp)
if self.debuglevel > 0:
print>>stderr, 'reply: retcode (%s); Msg: %s' % (errcode,errmsg)
return errcode, errmsg
def docmd(self, cmd, args=""):
"""Send a command, and return its response code."""
self.putcmd(cmd,args)
return self.getreply()
# std smtp commands
def helo(self, name=''):
"""SMTP 'helo' command.
Hostname to send for this command defaults to the FQDN of the local
host.
"""
self.putcmd("helo", name or self.local_hostname)
(code,msg)=self.getreply()
self.helo_resp=msg
return (code,msg)
def ehlo(self, name=''):
""" SMTP 'ehlo' command.
Hostname to send for this command defaults to the FQDN of the local
host.
"""
self.esmtp_features = {}
self.putcmd("ehlo", name or self.local_hostname)
(code,msg)=self.getreply()
# According to RFC1869 some (badly written)
# MTA's will disconnect on an ehlo. Toss an exception if
# that happens -ddm
if code == -1 and len(msg) == 0:
self.close()
raise SMTPServerDisconnected("Server not connected")
self.ehlo_resp=msg
if code != 250:
return (code,msg)
self.does_esmtp=1
#parse the ehlo response -ddm
resp=self.ehlo_resp.split('\n')
del resp[0]
for each in resp:
# To be able to communicate with as many SMTP servers as possible,
# we have to take the old-style auth advertisement into account,
# because:
# 1) Else our SMTP feature parser gets confused.
# 2) There are some servers that only advertise the auth methods we
# support using the old style.
auth_match = OLDSTYLE_AUTH.match(each)
if auth_match:
# This doesn't remove duplicates, but that's no problem
self.esmtp_features["auth"] = self.esmtp_features.get("auth", "") \
+ " " + auth_match.groups(0)[0]
continue
# RFC 1869 requires a space between ehlo keyword and parameters.
# It's actually stricter, in that only spaces are allowed between
# parameters, but were not going to check for that here. Note
# that the space isn't present if there are no parameters.
m=re.match(r'(?P<feature>[A-Za-z0-9][A-Za-z0-9\-]*) ?',each)
if m:
feature=m.group("feature").lower()
params=m.string[m.end("feature"):].strip()
if feature == "auth":
self.esmtp_features[feature] = self.esmtp_features.get(feature, "") \
+ " " + params
else:
self.esmtp_features[feature]=params
return (code,msg)
def has_extn(self, opt):
"""Does the server support a given SMTP service extension?"""
return opt.lower() in self.esmtp_features
def help(self, args=''):
"""SMTP 'help' command.
Returns help text from server."""
self.putcmd("help", args)
return self.getreply()[1]
def rset(self):
"""SMTP 'rset' command -- resets session."""
return self.docmd("rset")
def noop(self):
"""SMTP 'noop' command -- doesn't do anything :>"""
return self.docmd("noop")
def mail(self,sender,options=[]):
"""SMTP 'mail' command -- begins mail xfer session."""
optionlist = ''
if options and self.does_esmtp:
optionlist = ' ' + ' '.join(options)
self.putcmd("mail", "FROM:%s%s" % (quoteaddr(sender) ,optionlist))
return self.getreply()
def rcpt(self,recip,options=[]):
"""SMTP 'rcpt' command -- indicates 1 recipient for this mail."""
optionlist = ''
if options and self.does_esmtp:
optionlist = ' ' + ' '.join(options)
self.putcmd("rcpt","TO:%s%s" % (quoteaddr(recip),optionlist))
return self.getreply()
def data(self,msg):
"""SMTP 'DATA' command -- sends message data to server.
Automatically quotes lines beginning with a period per rfc821.
Raises SMTPDataError if there is an unexpected reply to the
DATA command; the return value from this method is the final
response code received when the all data is sent.
"""
self.putcmd("data")
(code,repl)=self.getreply()
if self.debuglevel >0 : print>>stderr, "data:", (code,repl)
if code != 354:
raise SMTPDataError(code,repl)
else:
q = quotedata(msg)
if q[-2:] != CRLF:
q = q + CRLF
q = q + "." + CRLF
self.send(q)
(code,msg)=self.getreply()
if self.debuglevel >0 : print>>stderr, "data:", (code,msg)
return (code,msg)
def verify(self, address):
"""SMTP 'verify' command -- checks for address validity."""
self.putcmd("vrfy", quoteaddr(address))
return self.getreply()
# a.k.a.
vrfy=verify
def expn(self, address):
"""SMTP 'expn' command -- expands a mailing list."""
self.putcmd("expn", quoteaddr(address))
return self.getreply()
# some useful methods
def login(self, user, password):
"""Log in on an SMTP server that requires authentication.
The arguments are:
- user: The user name to authenticate with.
- password: The password for the authentication.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first.
This method will return normally if the authentication was successful.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
SMTPAuthenticationError The server didn't accept the username/
password combination.
SMTPException No suitable authentication method was
found.
"""
def encode_cram_md5(challenge, user, password):
challenge = base64.decodestring(challenge)
response = user + " " + hmac.HMAC(password, challenge).hexdigest()
return encode_base64(response, eol="")
def encode_plain(user, password):
return encode_base64("\0%s\0%s" % (user, password), eol="")
AUTH_PLAIN = "PLAIN"
AUTH_CRAM_MD5 = "CRAM-MD5"
AUTH_LOGIN = "LOGIN"
if self.helo_resp is None and self.ehlo_resp is None:
if not (200 <= self.ehlo()[0] <= 299):
(code, resp) = self.helo()
if not (200 <= code <= 299):
raise SMTPHeloError(code, resp)
if not self.has_extn("auth"):
raise SMTPException("SMTP AUTH extension not supported by server.")
# Authentication methods the server supports:
authlist = self.esmtp_features["auth"].split()
# List of authentication methods we support: from preferred to
# less preferred methods. Except for the purpose of testing the weaker
# ones, we prefer stronger methods like CRAM-MD5:
preferred_auths = [AUTH_CRAM_MD5, AUTH_PLAIN, AUTH_LOGIN]
# Determine the authentication method we'll use
authmethod = None
for method in preferred_auths:
if method in authlist:
authmethod = method
break
if authmethod == AUTH_CRAM_MD5:
(code, resp) = self.docmd("AUTH", AUTH_CRAM_MD5)
if code == 503:
# 503 == 'Error: already authenticated'
return (code, resp)
(code, resp) = self.docmd(encode_cram_md5(resp, user, password))
elif authmethod == AUTH_PLAIN:
(code, resp) = self.docmd("AUTH",
AUTH_PLAIN + " " + encode_plain(user, password))
elif authmethod == AUTH_LOGIN:
(code, resp) = self.docmd("AUTH",
"%s %s" % (AUTH_LOGIN, encode_base64(user, eol="")))
if code != 334:
raise SMTPAuthenticationError(code, resp)
(code, resp) = self.docmd(encode_base64(password, eol=""))
elif authmethod is None:
raise SMTPException("No suitable authentication method found.")
if code not in (235, 503):
# 235 == 'Authentication successful'
# 503 == 'Error: already authenticated'
raise SMTPAuthenticationError(code, resp)
return (code, resp)
def starttls(self, keyfile = None, certfile = None):
"""Puts the connection to the SMTP server into TLS mode.
If the server supports TLS, this will encrypt the rest of the SMTP
session. If you provide the keyfile and certfile parameters,
the identity of the SMTP server and client can be checked. This,
however, depends on whether the socket module really checks the
certificates.
"""
(resp, reply) = self.docmd("STARTTLS")
if resp == 220:
sslobj = socket.ssl(self.sock, keyfile, certfile)
self.sock = SSLFakeSocket(self.sock, sslobj)
self.file = SSLFakeFile(sslobj)
# RFC 3207:
# The client MUST discard any knowledge obtained from
# the server, such as the list of SMTP service extensions,
# which was not obtained from the TLS negotiation itself.
self.helo_resp = None
self.ehlo_resp = None
self.esmtp_features = {}
self.does_esmtp = 0
return (resp, reply)
def sendmail(self, from_addr, to_addrs, msg, mail_options=[],
rcpt_options=[]):
"""This command performs an entire mail transaction.
The arguments are:
- from_addr : The address sending this mail.
- to_addrs : A list of addresses to send this mail to. A bare
string will be treated as a list with 1 address.
- msg : The message to send.
- mail_options : List of ESMTP options (such as 8bitmime) for the
mail command.
- rcpt_options : List of ESMTP options (such as DSN commands) for
all the rcpt commands.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first. If the server does ESMTP, message size
and each of the specified options will be passed to it. If EHLO
fails, HELO will be tried and ESMTP options suppressed.
This method will return normally if the mail is accepted for at least
one recipient. It returns a dictionary, with one entry for each
recipient that was refused. Each entry contains a tuple of the SMTP
error code and the accompanying error message sent by the server.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
SMTPRecipientsRefused The server rejected ALL recipients
(no mail was sent).
SMTPSenderRefused The server didn't accept the from_addr.
SMTPDataError The server replied with an unexpected
error code (other than a refusal of
a recipient).
Note: the connection will be open even after an exception is raised.
Example:
>>> import smtplib
>>> s=smtplib.SMTP("localhost")
>>> tolist=["[email protected]","[email protected]","[email protected]","[email protected]"]
>>> msg = '''\\
... From: [email protected]
... Subject: testin'...
...
... This is a test '''
>>> s.sendmail("[email protected]",tolist,msg)
{ "[email protected]" : ( 550 ,"User unknown" ) }
>>> s.quit()
In the above example, the message was accepted for delivery to three
of the four addresses, and one was rejected, with the error code
550. If all addresses are accepted, then the method will return an
empty dictionary.
"""
if self.helo_resp is None and self.ehlo_resp is None:
if not (200 <= self.ehlo()[0] <= 299):
(code,resp) = self.helo()
if not (200 <= code <= 299):
raise SMTPHeloError(code, resp)
esmtp_opts = []
if self.does_esmtp:
# Hmmm? what's this? -ddm
# self.esmtp_features['7bit']=""
if self.has_extn('size'):
esmtp_opts.append("size=%d" % len(msg))
for option in mail_options:
esmtp_opts.append(option)
(code,resp) = self.mail(from_addr, esmtp_opts)
if code != 250:
self.rset()
raise SMTPSenderRefused(code, resp, from_addr)
senderrs={}
if isinstance(to_addrs, basestring):
to_addrs = [to_addrs]
for each in to_addrs:
(code,resp)=self.rcpt(each, rcpt_options)
if (code != 250) and (code != 251):
senderrs[each]=(code,resp)
if len(senderrs)==len(to_addrs):
# the server refused all our recipients
self.rset()
raise SMTPRecipientsRefused(senderrs)
(code,resp) = self.data(msg)
if code != 250:
self.rset()
raise SMTPDataError(code, resp)
#if we got here then somebody got our mail
return senderrs
def close(self):
"""Close the connection to the SMTP server."""
if self.file:
self.file.close()
self.file = None
if self.sock:
self.sock.close()
self.sock = None
def quit(self):
"""Terminate the SMTP session."""
self.docmd("quit")
self.close()
# Test the sendmail method, which tests most of the others.
# Note: This always sends to localhost.
if __name__ == '__main__':
import sys
def prompt(prompt):
sys.stdout.write(prompt + ": ")
return sys.stdin.readline().strip()
fromaddr = prompt("From")
toaddrs = prompt("To").split(',')
print "Enter message, end with ^D:"
msg = ''
while 1:
line = sys.stdin.readline()
if not line:
break
msg = msg + line
print "Message length is %d" % len(msg)
server = SMTP('localhost')
server.set_debuglevel(1)
server.sendmail(fromaddr, toaddrs, msg)
server.quit()
| apache-2.0 |
abdellatifkarroum/odoo | addons/crm_partner_assign/crm_lead.py | 112 | 2985 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.tools.translate import _
class crm_lead(osv.osv):
_inherit = 'crm.lead'
def get_interested_action(self, cr, uid, interested, context=None):
try:
model, action_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'crm_partner_assign', 'crm_lead_channel_interested_act')
except ValueError:
raise osv.except_osv(_('Error!'), _("The CRM Channel Interested Action is missing"))
action = self.pool[model].read(cr, uid, [action_id], context=context)[0]
action_context = eval(action['context'])
action_context['interested'] = interested
action['context'] = str(action_context)
return action
def case_interested(self, cr, uid, ids, context=None):
return self.get_interested_action(cr, uid, True, context=context)
def case_disinterested(self, cr, uid, ids, context=None):
return self.get_interested_action(cr, uid, False, context=context)
def assign_salesman_of_assigned_partner(self, cr, uid, ids, context=None):
salesmans_leads = {}
for lead in self.browse(cr, uid, ids, context=context):
if (lead.stage_id.probability > 0 and lead.stage_id.probability < 100) or lead.stage_id.sequence == 1:
if lead.partner_assigned_id and lead.partner_assigned_id.user_id and lead.partner_assigned_id.user_id != lead.user_id:
salesman_id = lead.partner_assigned_id.user_id.id
if salesmans_leads.get(salesman_id):
salesmans_leads[salesman_id].append(lead.id)
else:
salesmans_leads[salesman_id] = [lead.id]
for salesman_id, lead_ids in salesmans_leads.items():
salesteam_id = self.on_change_user(cr, uid, lead_ids, salesman_id, context=None)['value'].get('section_id')
self.write(cr, uid, lead_ids, {'user_id': salesman_id, 'section_id': salesteam_id}, context=context)
| agpl-3.0 |
qenter/vlc-android | toolchains/arm/lib/python2.7/os.py | 147 | 25769 | r"""OS routines for Mac, NT, or Posix depending on what system we're on.
This exports:
- all functions from posix, nt, os2, or ce, e.g. unlink, stat, etc.
- os.path is one of the modules posixpath, or ntpath
- os.name is 'posix', 'nt', 'os2', 'ce' or 'riscos'
- os.curdir is a string representing the current directory ('.' or ':')
- os.pardir is a string representing the parent directory ('..' or '::')
- os.sep is the (or a most common) pathname separator ('/' or ':' or '\\')
- os.extsep is the extension separator ('.' or '/')
- os.altsep is the alternate pathname separator (None or '/')
- os.pathsep is the component separator used in $PATH etc
- os.linesep is the line separator in text files ('\r' or '\n' or '\r\n')
- os.defpath is the default search path for executables
- os.devnull is the file path of the null device ('/dev/null', etc.)
Programs that import and use 'os' stand a better chance of being
portable between different platforms. Of course, they must then
only use functions that are defined by all platforms (e.g., unlink
and opendir), and leave all pathname manipulation to os.path
(e.g., split and join).
"""
#'
import sys, errno
_names = sys.builtin_module_names
# Note: more names are added to __all__ later.
__all__ = ["altsep", "curdir", "pardir", "sep", "extsep", "pathsep", "linesep",
"defpath", "name", "path", "devnull",
"SEEK_SET", "SEEK_CUR", "SEEK_END"]
def _get_exports_list(module):
try:
return list(module.__all__)
except AttributeError:
return [n for n in dir(module) if n[0] != '_']
if 'posix' in _names:
name = 'posix'
linesep = '\n'
from posix import *
try:
from posix import _exit
except ImportError:
pass
import posixpath as path
import posix
__all__.extend(_get_exports_list(posix))
del posix
elif 'nt' in _names:
name = 'nt'
linesep = '\r\n'
from nt import *
try:
from nt import _exit
except ImportError:
pass
import ntpath as path
import nt
__all__.extend(_get_exports_list(nt))
del nt
elif 'os2' in _names:
name = 'os2'
linesep = '\r\n'
from os2 import *
try:
from os2 import _exit
except ImportError:
pass
if sys.version.find('EMX GCC') == -1:
import ntpath as path
else:
import os2emxpath as path
from _emx_link import link
import os2
__all__.extend(_get_exports_list(os2))
del os2
elif 'ce' in _names:
name = 'ce'
linesep = '\r\n'
from ce import *
try:
from ce import _exit
except ImportError:
pass
# We can use the standard Windows path.
import ntpath as path
import ce
__all__.extend(_get_exports_list(ce))
del ce
elif 'riscos' in _names:
name = 'riscos'
linesep = '\n'
from riscos import *
try:
from riscos import _exit
except ImportError:
pass
import riscospath as path
import riscos
__all__.extend(_get_exports_list(riscos))
del riscos
else:
raise ImportError, 'no os specific module found'
sys.modules['os.path'] = path
from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep,
devnull)
del _names
# Python uses fixed values for the SEEK_ constants; they are mapped
# to native constants if necessary in posixmodule.c
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
#'
# Super directory utilities.
# (Inspired by Eric Raymond; the doc strings are mostly his)
def makedirs(name, mode=0777):
"""makedirs(path [, mode=0777])
Super-mkdir; create a leaf directory and all intermediate ones.
Works like mkdir, except that any intermediate path segment (not
just the rightmost) will be created if it does not exist. This is
recursive.
"""
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
try:
makedirs(head, mode)
except OSError, e:
# be happy if someone already created the path
if e.errno != errno.EEXIST:
raise
if tail == curdir: # xxx/newdir/. exists if xxx/newdir exists
return
mkdir(name, mode)
def removedirs(name):
"""removedirs(path)
Super-rmdir; remove a leaf directory and all empty intermediate
ones. Works like rmdir except that, if the leaf directory is
successfully removed, directories corresponding to rightmost path
segments will be pruned away until either the whole path is
consumed or an error occurs. Errors during this latter phase are
ignored -- they generally mean that a directory was not empty.
"""
rmdir(name)
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
while head and tail:
try:
rmdir(head)
except error:
break
head, tail = path.split(head)
def renames(old, new):
"""renames(old, new)
Super-rename; create directories as necessary and delete any left
empty. Works like rename, except creation of any intermediate
directories needed to make the new pathname good is attempted
first. After the rename, directories corresponding to rightmost
path segments of the old name will be pruned way until either the
whole path is consumed or a nonempty directory is found.
Note: this function can fail with the new directory structure made
if you lack permissions needed to unlink the leaf directory or
file.
"""
head, tail = path.split(new)
if head and tail and not path.exists(head):
makedirs(head)
rename(old, new)
head, tail = path.split(old)
if head and tail:
try:
removedirs(head)
except error:
pass
__all__.extend(["makedirs", "removedirs", "renames"])
def walk(top, topdown=True, onerror=None, followlinks=False):
"""Directory tree generator.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), yields a 3-tuple
dirpath, dirnames, filenames
dirpath is a string, the path to the directory. dirnames is a list of
the names of the subdirectories in dirpath (excluding '.' and '..').
filenames is a list of the names of the non-directory files in dirpath.
Note that the names in the lists are just names, with no path components.
To get a full path (which begins with top) to a file or directory in
dirpath, do os.path.join(dirpath, name).
If optional arg 'topdown' is true or not specified, the triple for a
directory is generated before the triples for any of its subdirectories
(directories are generated top down). If topdown is false, the triple
for a directory is generated after the triples for all of its
subdirectories (directories are generated bottom up).
When topdown is true, the caller can modify the dirnames list in-place
(e.g., via del or slice assignment), and walk will only recurse into the
subdirectories whose names remain in dirnames; this can be used to prune
the search, or to impose a specific order of visiting. Modifying
dirnames when topdown is false is ineffective, since the directories in
dirnames have already been generated by the time dirnames itself is
generated.
By default errors from the os.listdir() call are ignored. If
optional arg 'onerror' is specified, it should be a function; it
will be called with one argument, an os.error instance. It can
report the error to continue with the walk, or raise the exception
to abort the walk. Note that the filename is available as the
filename attribute of the exception object.
By default, os.walk does not follow symbolic links to subdirectories on
systems that support them. In order to get this functionality, set the
optional argument 'followlinks' to true.
Caution: if you pass a relative pathname for top, don't change the
current working directory between resumptions of walk. walk never
changes the current directory, and assumes that the client doesn't
either.
Example:
import os
from os.path import join, getsize
for root, dirs, files in os.walk('python/Lib/email'):
print root, "consumes",
print sum([getsize(join(root, name)) for name in files]),
print "bytes in", len(files), "non-directory files"
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
islink, join, isdir = path.islink, path.join, path.isdir
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.path.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
# Note that listdir and error are globals in this module due
# to earlier import-*.
names = listdir(top)
except error, err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
new_path = join(top, name)
if followlinks or not islink(new_path):
for x in walk(new_path, topdown, onerror, followlinks):
yield x
if not topdown:
yield top, dirs, nondirs
__all__.append("walk")
# Make sure os.environ exists, at least
try:
environ
except NameError:
environ = {}
def execl(file, *args):
"""execl(file, *args)
Execute the executable file with argument list args, replacing the
current process. """
execv(file, args)
def execle(file, *args):
"""execle(file, *args, env)
Execute the executable file with argument list args and
environment env, replacing the current process. """
env = args[-1]
execve(file, args[:-1], env)
def execlp(file, *args):
"""execlp(file, *args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process. """
execvp(file, args)
def execlpe(file, *args):
"""execlpe(file, *args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env, replacing the current
process. """
env = args[-1]
execvpe(file, args[:-1], env)
def execvp(file, args):
"""execvp(file, args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process.
args may be a list or tuple of strings. """
_execvpe(file, args)
def execvpe(file, args, env):
"""execvpe(file, args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env , replacing the
current process.
args may be a list or tuple of strings. """
_execvpe(file, args, env)
__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"])
def _execvpe(file, args, env=None):
if env is not None:
func = execve
argrest = (args, env)
else:
func = execv
argrest = (args,)
env = environ
head, tail = path.split(file)
if head:
func(file, *argrest)
return
if 'PATH' in env:
envpath = env['PATH']
else:
envpath = defpath
PATH = envpath.split(pathsep)
saved_exc = None
saved_tb = None
for dir in PATH:
fullname = path.join(dir, file)
try:
func(fullname, *argrest)
except error, e:
tb = sys.exc_info()[2]
if (e.errno != errno.ENOENT and e.errno != errno.ENOTDIR
and saved_exc is None):
saved_exc = e
saved_tb = tb
if saved_exc:
raise error, saved_exc, saved_tb
raise error, e, tb
# Change environ to automatically call putenv() if it exists
try:
# This will fail if there's no putenv
putenv
except NameError:
pass
else:
import UserDict
# Fake unsetenv() for Windows
# not sure about os2 here but
# I'm guessing they are the same.
if name in ('os2', 'nt'):
def unsetenv(key):
putenv(key, "")
if name == "riscos":
# On RISC OS, all env access goes through getenv and putenv
from riscosenviron import _Environ
elif name in ('os2', 'nt'): # Where Env Var Names Must Be UPPERCASE
# But we store them as upper case
class _Environ(UserDict.IterableUserDict):
def __init__(self, environ):
UserDict.UserDict.__init__(self)
data = self.data
for k, v in environ.items():
data[k.upper()] = v
def __setitem__(self, key, item):
putenv(key, item)
self.data[key.upper()] = item
def __getitem__(self, key):
return self.data[key.upper()]
try:
unsetenv
except NameError:
def __delitem__(self, key):
del self.data[key.upper()]
else:
def __delitem__(self, key):
unsetenv(key)
del self.data[key.upper()]
def clear(self):
for key in self.data.keys():
unsetenv(key)
del self.data[key]
def pop(self, key, *args):
unsetenv(key)
return self.data.pop(key.upper(), *args)
def has_key(self, key):
return key.upper() in self.data
def __contains__(self, key):
return key.upper() in self.data
def get(self, key, failobj=None):
return self.data.get(key.upper(), failobj)
def update(self, dict=None, **kwargs):
if dict:
try:
keys = dict.keys()
except AttributeError:
# List of (key, value)
for k, v in dict:
self[k] = v
else:
# got keys
# cannot use items(), since mappings
# may not have them.
for k in keys:
self[k] = dict[k]
if kwargs:
self.update(kwargs)
def copy(self):
return dict(self)
else: # Where Env Var Names Can Be Mixed Case
class _Environ(UserDict.IterableUserDict):
def __init__(self, environ):
UserDict.UserDict.__init__(self)
self.data = environ
def __setitem__(self, key, item):
putenv(key, item)
self.data[key] = item
def update(self, dict=None, **kwargs):
if dict:
try:
keys = dict.keys()
except AttributeError:
# List of (key, value)
for k, v in dict:
self[k] = v
else:
# got keys
# cannot use items(), since mappings
# may not have them.
for k in keys:
self[k] = dict[k]
if kwargs:
self.update(kwargs)
try:
unsetenv
except NameError:
pass
else:
def __delitem__(self, key):
unsetenv(key)
del self.data[key]
def clear(self):
for key in self.data.keys():
unsetenv(key)
del self.data[key]
def pop(self, key, *args):
unsetenv(key)
return self.data.pop(key, *args)
def copy(self):
return dict(self)
environ = _Environ(environ)
def getenv(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default."""
return environ.get(key, default)
__all__.append("getenv")
def _exists(name):
return name in globals()
# Supply spawn*() (probably only for Unix)
if _exists("fork") and not _exists("spawnv") and _exists("execv"):
P_WAIT = 0
P_NOWAIT = P_NOWAITO = 1
# XXX Should we support P_DETACH? I suppose it could fork()**2
# and close the std I/O streams. Also, P_OVERLAY is the same
# as execv*()?
def _spawnvef(mode, file, args, env, func):
# Internal helper; func is the exec*() function to use
pid = fork()
if not pid:
# Child
try:
if env is None:
func(file, args)
else:
func(file, args, env)
except:
_exit(127)
else:
# Parent
if mode == P_NOWAIT:
return pid # Caller is responsible for waiting!
while 1:
wpid, sts = waitpid(pid, 0)
if WIFSTOPPED(sts):
continue
elif WIFSIGNALED(sts):
return -WTERMSIG(sts)
elif WIFEXITED(sts):
return WEXITSTATUS(sts)
else:
raise error, "Not stopped, signaled or exited???"
def spawnv(mode, file, args):
"""spawnv(mode, file, args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execv)
def spawnve(mode, file, args, env):
"""spawnve(mode, file, args, env) -> integer
Execute file with arguments from args in a subprocess with the
specified environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execve)
# Note: spawnvp[e] is't currently supported on Windows
def spawnvp(mode, file, args):
"""spawnvp(mode, file, args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execvp)
def spawnvpe(mode, file, args, env):
"""spawnvpe(mode, file, args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execvpe)
if _exists("spawnv"):
# These aren't supplied by the basic Windows code
# but can be easily implemented in Python
def spawnl(mode, file, *args):
"""spawnl(mode, file, *args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnv(mode, file, args)
def spawnle(mode, file, *args):
"""spawnle(mode, file, *args, env) -> integer
Execute file with arguments from args in a subprocess with the
supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnve(mode, file, args[:-1], env)
__all__.extend(["spawnv", "spawnve", "spawnl", "spawnle",])
if _exists("spawnvp"):
# At the moment, Windows doesn't implement spawnvp[e],
# so it won't have spawnlp[e] either.
def spawnlp(mode, file, *args):
"""spawnlp(mode, file, *args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnvp(mode, file, args)
def spawnlpe(mode, file, *args):
"""spawnlpe(mode, file, *args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnvpe(mode, file, args[:-1], env)
__all__.extend(["spawnvp", "spawnvpe", "spawnlp", "spawnlpe",])
# Supply popen2 etc. (for Unix)
if _exists("fork"):
if not _exists("popen2"):
def popen2(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout) are returned."""
import warnings
msg = "os.popen2 is deprecated. Use the subprocess module."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
import subprocess
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, shell=isinstance(cmd, basestring),
bufsize=bufsize, stdin=PIPE, stdout=PIPE,
close_fds=True)
return p.stdin, p.stdout
__all__.append("popen2")
if not _exists("popen3"):
def popen3(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout, child_stderr) are returned."""
import warnings
msg = "os.popen3 is deprecated. Use the subprocess module."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
import subprocess
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, shell=isinstance(cmd, basestring),
bufsize=bufsize, stdin=PIPE, stdout=PIPE,
stderr=PIPE, close_fds=True)
return p.stdin, p.stdout, p.stderr
__all__.append("popen3")
if not _exists("popen4"):
def popen4(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout_stderr) are returned."""
import warnings
msg = "os.popen4 is deprecated. Use the subprocess module."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
import subprocess
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, shell=isinstance(cmd, basestring),
bufsize=bufsize, stdin=PIPE, stdout=PIPE,
stderr=subprocess.STDOUT, close_fds=True)
return p.stdin, p.stdout
__all__.append("popen4")
import copy_reg as _copy_reg
def _make_stat_result(tup, dict):
return stat_result(tup, dict)
def _pickle_stat_result(sr):
(type, args) = sr.__reduce__()
return (_make_stat_result, args)
try:
_copy_reg.pickle(stat_result, _pickle_stat_result, _make_stat_result)
except NameError: # stat_result may not exist
pass
def _make_statvfs_result(tup, dict):
return statvfs_result(tup, dict)
def _pickle_statvfs_result(sr):
(type, args) = sr.__reduce__()
return (_make_statvfs_result, args)
try:
_copy_reg.pickle(statvfs_result, _pickle_statvfs_result,
_make_statvfs_result)
except NameError: # statvfs_result may not exist
pass
| gpl-2.0 |
cgstudiomap/cgstudiomap | main/parts/odoo/addons/web_linkedin/web_linkedin.py | 333 | 4485 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
import urllib2
from urlparse import urlparse, urlunparse
import openerp
import openerp.addons.web
from openerp.osv import fields, osv
class Binary(openerp.http.Controller):
@openerp.http.route('/web_linkedin/binary/url2binary', type='json', auth='user')
def url2binary(self, url):
"""Used exclusively to load images from LinkedIn profiles, must not be used for anything else."""
_scheme, _netloc, path, params, query, fragment = urlparse(url)
# media.linkedin.com is the master domain for LinkedIn media (replicated to CDNs),
# so forcing it should always work and prevents abusing this method to load arbitrary URLs
url = urlunparse(('http', 'media.licdn.com', path, params, query, fragment))
bfile = urllib2.urlopen(url)
return base64.b64encode(bfile.read())
class web_linkedin_settings(osv.osv_memory):
_inherit = 'sale.config.settings'
_columns = {
'api_key': fields.char(string="API Key", size=50),
'server_domain': fields.char(),
}
def get_default_linkedin(self, cr, uid, fields, context=None):
key = self.pool.get("ir.config_parameter").get_param(cr, uid, "web.linkedin.apikey") or ""
dom = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url')
return {'api_key': key, 'server_domain': dom,}
def set_linkedin(self, cr, uid, ids, context=None):
key = self.browse(cr, uid, ids[0], context)["api_key"] or ""
self.pool.get("ir.config_parameter").set_param(cr, uid, "web.linkedin.apikey", key, groups=['base.group_user'])
class web_linkedin_fields(osv.Model):
_inherit = 'res.partner'
def _get_url(self, cr, uid, ids, name, arg, context=None):
res = dict((id, False) for id in ids)
for partner in self.browse(cr, uid, ids, context=context):
res[partner.id] = partner.linkedin_url
return res
def linkedin_check_similar_partner(self, cr, uid, linkedin_datas, context=None):
res = []
res_partner = self.pool.get('res.partner')
for linkedin_data in linkedin_datas:
partner_ids = res_partner.search(cr, uid, ["|", ("linkedin_id", "=", linkedin_data['id']),
"&", ("linkedin_id", "=", False),
"|", ("name", "ilike", linkedin_data['firstName'] + "%" + linkedin_data['lastName']), ("name", "ilike", linkedin_data['lastName'] + "%" + linkedin_data['firstName'])], context=context)
if partner_ids:
partner = res_partner.read(cr, uid, partner_ids[0], ["image", "mobile", "phone", "parent_id", "name", "email", "function", "linkedin_id"], context=context)
if partner['linkedin_id'] and partner['linkedin_id'] != linkedin_data['id']:
partner.pop('id')
if partner['parent_id']:
partner['parent_id'] = partner['parent_id'][0]
for key, val in partner.items():
if not val:
partner.pop(key)
res.append(partner)
else:
res.append({})
return res
_columns = {
'linkedin_id': fields.char(string="LinkedIn ID"),
'linkedin_url': fields.char(string="LinkedIn url", store=True),
'linkedin_public_url': fields.function(_get_url, type='text', string="LinkedIn url",
help="This url is set automatically when you join the partner with a LinkedIn account."),
}
| agpl-3.0 |
ecrespo/django_kanban-agile | kanban/lib/python2.7/site-packages/setuptools/tests/test_windows_wrappers.py | 151 | 6154 | """
Python Script Wrapper for Windows
=================================
setuptools includes wrappers for Python scripts that allows them to be
executed like regular windows programs. There are 2 wrappers, one
for command-line programs, cli.exe, and one for graphical programs,
gui.exe. These programs are almost identical, function pretty much
the same way, and are generated from the same source file. The
wrapper programs are used by copying them to the directory containing
the script they are to wrap and with the same name as the script they
are to wrap.
"""
from __future__ import absolute_import
import sys
import textwrap
import subprocess
import pytest
from setuptools.command.easy_install import nt_quote_arg
import pkg_resources
pytestmark = pytest.mark.skipif(sys.platform != 'win32', reason="Windows only")
class WrapperTester:
@classmethod
def prep_script(cls, template):
python_exe = nt_quote_arg(sys.executable)
return template % locals()
@classmethod
def create_script(cls, tmpdir):
"""
Create a simple script, foo-script.py
Note that the script starts with a Unix-style '#!' line saying which
Python executable to run. The wrapper will use this line to find the
correct Python executable.
"""
script = cls.prep_script(cls.script_tmpl)
with (tmpdir / cls.script_name).open('w') as f:
f.write(script)
# also copy cli.exe to the sample directory
with (tmpdir / cls.wrapper_name).open('wb') as f:
w = pkg_resources.resource_string('setuptools', cls.wrapper_source)
f.write(w)
class TestCLI(WrapperTester):
script_name = 'foo-script.py'
wrapper_source = 'cli-32.exe'
wrapper_name = 'foo.exe'
script_tmpl = textwrap.dedent("""
#!%(python_exe)s
import sys
input = repr(sys.stdin.read())
print(sys.argv[0][-14:])
print(sys.argv[1:])
print(input)
if __debug__:
print('non-optimized')
""").lstrip()
def test_basic(self, tmpdir):
"""
When the copy of cli.exe, foo.exe in this example, runs, it examines
the path name it was run with and computes a Python script path name
by removing the '.exe' suffix and adding the '-script.py' suffix. (For
GUI programs, the suffix '-script.pyw' is added.) This is why we
named out script the way we did. Now we can run out script by running
the wrapper:
This example was a little pathological in that it exercised windows
(MS C runtime) quoting rules:
- Strings containing spaces are surrounded by double quotes.
- Double quotes in strings need to be escaped by preceding them with
back slashes.
- One or more backslashes preceding double quotes need to be escaped
by preceding each of them with back slashes.
"""
self.create_script(tmpdir)
cmd = [
str(tmpdir / 'foo.exe'),
'arg1',
'arg 2',
'arg "2\\"',
'arg 4\\',
'arg5 a\\\\b',
]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
stdout, stderr = proc.communicate('hello\nworld\n'.encode('ascii'))
actual = stdout.decode('ascii').replace('\r\n', '\n')
expected = textwrap.dedent(r"""
\foo-script.py
['arg1', 'arg 2', 'arg "2\\"', 'arg 4\\', 'arg5 a\\\\b']
'hello\nworld\n'
non-optimized
""").lstrip()
assert actual == expected
def test_with_options(self, tmpdir):
"""
Specifying Python Command-line Options
--------------------------------------
You can specify a single argument on the '#!' line. This can be used
to specify Python options like -O, to run in optimized mode or -i
to start the interactive interpreter. You can combine multiple
options as usual. For example, to run in optimized mode and
enter the interpreter after running the script, you could use -Oi:
"""
self.create_script(tmpdir)
tmpl = textwrap.dedent("""
#!%(python_exe)s -Oi
import sys
input = repr(sys.stdin.read())
print(sys.argv[0][-14:])
print(sys.argv[1:])
print(input)
if __debug__:
print('non-optimized')
sys.ps1 = '---'
""").lstrip()
with (tmpdir / 'foo-script.py').open('w') as f:
f.write(self.prep_script(tmpl))
cmd = [str(tmpdir / 'foo.exe')]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = proc.communicate()
actual = stdout.decode('ascii').replace('\r\n', '\n')
expected = textwrap.dedent(r"""
\foo-script.py
[]
''
---
""").lstrip()
assert actual == expected
class TestGUI(WrapperTester):
"""
Testing the GUI Version
-----------------------
"""
script_name = 'bar-script.pyw'
wrapper_source = 'gui-32.exe'
wrapper_name = 'bar.exe'
script_tmpl = textwrap.dedent("""
#!%(python_exe)s
import sys
f = open(sys.argv[1], 'wb')
bytes_written = f.write(repr(sys.argv[2]).encode('utf-8'))
f.close()
""").strip()
def test_basic(self, tmpdir):
"""Test the GUI version with the simple scipt, bar-script.py"""
self.create_script(tmpdir)
cmd = [
str(tmpdir / 'bar.exe'),
str(tmpdir / 'test_output.txt'),
'Test Argument',
]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = proc.communicate()
assert not stdout
assert not stderr
with (tmpdir / 'test_output.txt').open('rb') as f_out:
actual = f_out.read().decode('ascii')
assert actual == repr('Test Argument')
| mit |
jiasir/redis-ha | redis_ha_installer_el6.py | 1 | 3062 | #!/usr/bin/env python
# Install redis server
# Usage: sudo python redis_ha_installer.py [master|backup]
# Author: jiasir (Taio Jia)
# E-Mail: [email protected]
# License: The MIT License
import os
import sys
import shutil
import logging
from command import Command
run = Command()
def usage():
print 'Usage: sudo python redis_ha_installer_el6.py <master|backup>'
def install_epel():
run.execute_get_output('rpm', '-ivh', 'http://mirrors.yun-idc.com/epel/6/i386/epel-release-6-8.noarch.rpm')
def start_instace():
run.execute_get_output('sudo', 'service', 'redis', 'start')
run.execute_get_output('sudo', 'service', 'keepalived', 'start')
def install_keepalived():
try:
with open('/etc/sysctl.conf', 'a') as f:
f.write('net.ipv4.ip_nonlocal_bind = 1')
except IOError:
print IOError.__doc__
try:
with open('/etc/rc.local', 'a') as f:
f.write('ulimit -SHn 65535')
except IOError:
print IOError.__doc__
run.execute_get_output('sudo', 'sysctl', '-p')
run.execute_get_output('sudo', 'ulimit', '-SHn', '65535')
run.execute_get_output('sudo', 'yum', '-y', 'install', 'keepalived')
def install_redis():
try:
with open('/etc/sysctl.conf', 'a') as f:
f.write('vm.overcommit_memory = 1')
except IOError:
print IOError.__doc__
run.execute_get_output('sudo', 'sysctl', '-p')
run.execute_get_output('sudo', 'yum', '-y', 'install', 'redis')
def copy_keepalived_master_conf():
shutil.copy('conf/keepalived.conf.master', '/etc/keepalived/keepalived.conf')
print '[OK] Create keepalived config file: /etc/keepalived/keepalived.conf'
def copy_keepalived_backup_conf():
shutil.copy('conf/keepalived.conf.backup', '/etc/keepalived/keepalived.conf')
print '[OK] Create keepalived config file: /etc/keepalived/keepalived.conf'
def copy_redis_master_conf():
shutil.copy('conf/redis.conf.master', '/etc/redis/redis.conf')
print '[OK] Create redis config file: /etc/redis/redis.conf'
def copy_redis_slave_conf():
shutil.copy('conf/redis.conf.slave', '/etc/redis/redis.conf')
print '[OK] Create redis config file: /etc/redis/redis.conf'
def copy_fail_over_script():
shutil.copy('tools/redis.sh', '/var/lib/redis/redis.sh')
print '[OK] Create fail-over script: /var/lib/redis/redis.sh'
def main():
if len(sys.argv) > 1:
option = sys.argv[1]
if option == "master":
install_epel()
install_keepalived()
install_redis()
copy_keepalived_master_conf()
copy_redis_master_conf()
copy_fail_over_script()
start_instace()
elif option == "backup":
install_epel()
install_keepalived()
install_redis()
copy_keepalived_backup_conf()
copy_redis_slave_conf()
copy_fail_over_script()
start_instace()
else:
usage()
if __name__ == '__main__':
if os.getuid() == 0:
main()
else:
print 'You do not have permission'
usage()
exit() | mit |
gelisam/evercal | lib/thrift/server/TNonblockingServer.py | 83 | 10950 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""Implementation of non-blocking server.
The main idea of the server is reciving and sending requests
only from main thread.
It also makes thread pool server in tasks terms, not connections.
"""
import threading
import socket
import Queue
import select
import struct
import logging
from thrift.transport import TTransport
from thrift.protocol.TBinaryProtocol import TBinaryProtocolFactory
__all__ = ['TNonblockingServer']
class Worker(threading.Thread):
"""Worker is a small helper to process incoming connection."""
def __init__(self, queue):
threading.Thread.__init__(self)
self.queue = queue
def run(self):
"""Process queries from task queue, stop if processor is None."""
while True:
try:
processor, iprot, oprot, otrans, callback = self.queue.get()
if processor is None:
break
processor.process(iprot, oprot)
callback(True, otrans.getvalue())
except Exception:
logging.exception("Exception while processing request")
callback(False, '')
WAIT_LEN = 0
WAIT_MESSAGE = 1
WAIT_PROCESS = 2
SEND_ANSWER = 3
CLOSED = 4
def locked(func):
"Decorator which locks self.lock."
def nested(self, *args, **kwargs):
self.lock.acquire()
try:
return func(self, *args, **kwargs)
finally:
self.lock.release()
return nested
def socket_exception(func):
"Decorator close object on socket.error."
def read(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except socket.error:
self.close()
return read
class Connection:
"""Basic class is represented connection.
It can be in state:
WAIT_LEN --- connection is reading request len.
WAIT_MESSAGE --- connection is reading request.
WAIT_PROCESS --- connection has just read whole request and
waits for call ready routine.
SEND_ANSWER --- connection is sending answer string (including length
of answer).
CLOSED --- socket was closed and connection should be deleted.
"""
def __init__(self, new_socket, wake_up):
self.socket = new_socket
self.socket.setblocking(False)
self.status = WAIT_LEN
self.len = 0
self.message = ''
self.lock = threading.Lock()
self.wake_up = wake_up
def _read_len(self):
"""Reads length of request.
It's really paranoic routine and it may be replaced by
self.socket.recv(4)."""
read = self.socket.recv(4 - len(self.message))
if len(read) == 0:
# if we read 0 bytes and self.message is empty, it means client close
# connection
if len(self.message) != 0:
logging.error("can't read frame size from socket")
self.close()
return
self.message += read
if len(self.message) == 4:
self.len, = struct.unpack('!i', self.message)
if self.len < 0:
logging.error("negative frame size, it seems client"\
" doesn't use FramedTransport")
self.close()
elif self.len == 0:
logging.error("empty frame, it's really strange")
self.close()
else:
self.message = ''
self.status = WAIT_MESSAGE
@socket_exception
def read(self):
"""Reads data from stream and switch state."""
assert self.status in (WAIT_LEN, WAIT_MESSAGE)
if self.status == WAIT_LEN:
self._read_len()
# go back to the main loop here for simplicity instead of
# falling through, even though there is a good chance that
# the message is already available
elif self.status == WAIT_MESSAGE:
read = self.socket.recv(self.len - len(self.message))
if len(read) == 0:
logging.error("can't read frame from socket (get %d of %d bytes)" %
(len(self.message), self.len))
self.close()
return
self.message += read
if len(self.message) == self.len:
self.status = WAIT_PROCESS
@socket_exception
def write(self):
"""Writes data from socket and switch state."""
assert self.status == SEND_ANSWER
sent = self.socket.send(self.message)
if sent == len(self.message):
self.status = WAIT_LEN
self.message = ''
self.len = 0
else:
self.message = self.message[sent:]
@locked
def ready(self, all_ok, message):
"""Callback function for switching state and waking up main thread.
This function is the only function witch can be called asynchronous.
The ready can switch Connection to three states:
WAIT_LEN if request was oneway.
SEND_ANSWER if request was processed in normal way.
CLOSED if request throws unexpected exception.
The one wakes up main thread.
"""
assert self.status == WAIT_PROCESS
if not all_ok:
self.close()
self.wake_up()
return
self.len = ''
if len(message) == 0:
# it was a oneway request, do not write answer
self.message = ''
self.status = WAIT_LEN
else:
self.message = struct.pack('!i', len(message)) + message
self.status = SEND_ANSWER
self.wake_up()
@locked
def is_writeable(self):
"Returns True if connection should be added to write list of select."
return self.status == SEND_ANSWER
# it's not necessary, but...
@locked
def is_readable(self):
"Returns True if connection should be added to read list of select."
return self.status in (WAIT_LEN, WAIT_MESSAGE)
@locked
def is_closed(self):
"Returns True if connection is closed."
return self.status == CLOSED
def fileno(self):
"Returns the file descriptor of the associated socket."
return self.socket.fileno()
def close(self):
"Closes connection"
self.status = CLOSED
self.socket.close()
class TNonblockingServer:
"""Non-blocking server."""
def __init__(self, processor, lsocket, inputProtocolFactory=None,
outputProtocolFactory=None, threads=10):
self.processor = processor
self.socket = lsocket
self.in_protocol = inputProtocolFactory or TBinaryProtocolFactory()
self.out_protocol = outputProtocolFactory or self.in_protocol
self.threads = int(threads)
self.clients = {}
self.tasks = Queue.Queue()
self._read, self._write = socket.socketpair()
self.prepared = False
def setNumThreads(self, num):
"""Set the number of worker threads that should be created."""
# implement ThreadPool interface
assert not self.prepared, "You can't change number of threads for working server"
self.threads = num
def prepare(self):
"""Prepares server for serve requests."""
self.socket.listen()
for _ in xrange(self.threads):
thread = Worker(self.tasks)
thread.setDaemon(True)
thread.start()
self.prepared = True
def wake_up(self):
"""Wake up main thread.
The server usualy waits in select call in we should terminate one.
The simplest way is using socketpair.
Select always wait to read from the first socket of socketpair.
In this case, we can just write anything to the second socket from
socketpair."""
self._write.send('1')
def _select(self):
"""Does select on open connections."""
readable = [self.socket.handle.fileno(), self._read.fileno()]
writable = []
for i, connection in self.clients.items():
if connection.is_readable():
readable.append(connection.fileno())
if connection.is_writeable():
writable.append(connection.fileno())
if connection.is_closed():
del self.clients[i]
return select.select(readable, writable, readable)
def handle(self):
"""Handle requests.
WARNING! You must call prepare BEFORE calling handle.
"""
assert self.prepared, "You have to call prepare before handle"
rset, wset, xset = self._select()
for readable in rset:
if readable == self._read.fileno():
# don't care i just need to clean readable flag
self._read.recv(1024)
elif readable == self.socket.handle.fileno():
client = self.socket.accept().handle
self.clients[client.fileno()] = Connection(client, self.wake_up)
else:
connection = self.clients[readable]
connection.read()
if connection.status == WAIT_PROCESS:
itransport = TTransport.TMemoryBuffer(connection.message)
otransport = TTransport.TMemoryBuffer()
iprot = self.in_protocol.getProtocol(itransport)
oprot = self.out_protocol.getProtocol(otransport)
self.tasks.put([self.processor, iprot, oprot,
otransport, connection.ready])
for writeable in wset:
self.clients[writeable].write()
for oob in xset:
self.clients[oob].close()
del self.clients[oob]
def close(self):
"""Closes the server."""
for _ in xrange(self.threads):
self.tasks.put([None, None, None, None, None])
self.socket.close()
self.prepared = False
def serve(self):
"""Serve forever."""
self.prepare()
while True:
self.handle()
| bsd-2-clause |
edx/ecommerce | ecommerce/extensions/dashboard/users/tests/test_views.py | 1 | 3915 |
import json
import httpretty
import mock
from django.contrib.messages import constants as MSG
from django.urls import reverse
from requests import Timeout
from testfixtures import LogCapture
from ecommerce.core.tests import toggle_switch
from ecommerce.core.url_utils import get_lms_enrollment_api_url
from ecommerce.extensions.dashboard.tests import DashboardViewTestMixin
from ecommerce.extensions.dashboard.users.views import UserDetailView
from ecommerce.tests.testcases import TestCase
LOGGER_NAME = 'ecommerce.extensions.dashboard.users.views'
class UserDetailViewTests(DashboardViewTestMixin, TestCase):
def setUp(self):
super(UserDetailViewTests, self).setUp()
self.switch = toggle_switch('user_enrollments_on_dashboard', True)
self.user = self.create_user(is_staff=True)
self.client.login(username=self.user.username, password=self.password)
self.data = [{'course_id': 'a/b/c'}]
def mock_enrollment_api(self, status=200):
self.assertTrue(httpretty.is_enabled())
httpretty.register_uri(httpretty.GET, get_lms_enrollment_api_url(), status=status,
body=json.dumps(self.data),
content_type='application/json')
def load_view(self):
return self.client.get(reverse('dashboard:user-detail', args=[self.user.id]))
@httpretty.activate
def test_enrollments(self):
""" Verify the view retrieves data from the Enrollment API. """
self.mock_enrollment_api()
response = self.load_view()
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['enrollments'], self.data)
def test_enrollments_switch_inactive(self):
""" Verify enrollment data is NOT returned if the user_enrollments_on_dashboard switch is NOT active. """
self.switch.active = False
self.switch.save()
mock_get_enrollments = mock.Mock()
with mock.patch.object(UserDetailView, '_get_enrollments', mock_get_enrollments):
response = self.load_view()
self.assertFalse(mock_get_enrollments.called)
self.assertEqual(response.status_code, 200)
self.assertNotIn('enrollments', response.context)
@httpretty.activate
def test_enrollments_bad_response(self):
"""Verify a message is logged, and a separate message displayed to the user,
if the API does not return HTTTP 200."""
api_status = 500
self.mock_enrollment_api(status=api_status)
with LogCapture(LOGGER_NAME) as logger:
response = self.load_view()
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['enrollments'], [])
self.assert_message_equals(response, 'Failed to retrieve enrollment data.', MSG.ERROR)
logger.check((
LOGGER_NAME,
'WARNING',
'Failed to retrieve enrollments for [{}]. Enrollment API returned status code [{}].'.format(
self.user.username,
api_status
)
))
@mock.patch('requests.get', mock.Mock(side_effect=Timeout))
def test_enrollments_exception(self):
"""Verify a message is logged, and a separate message displayed to the user,
if an exception is raised while retrieving enrollments."""
with LogCapture(LOGGER_NAME) as logger:
response = self.load_view()
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['enrollments'], [])
self.assert_message_equals(response, 'Failed to retrieve enrollment data.', MSG.ERROR)
logger.check((
LOGGER_NAME,
'ERROR',
'An unexpected error occurred while retrieving enrollments for [{}].'.format(self.user.username)
))
| agpl-3.0 |
MostafaGazar/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/quantized_distribution_test.py | 4 | 18061 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import stats
import tensorflow as tf
distributions = tf.contrib.distributions
class QuantizedDistributionTest(tf.test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(0)
def _assert_all_finite(self, array):
self.assertTrue(np.isfinite(array).all())
def test_quantization_of_uniform_with_cutoffs_having_no_effect(self):
with self.test_session() as sess:
# The Quantized uniform with cutoffs == None divides the real line into:
# R = ...(-1, 0](0, 1](1, 2](2, 3](3, 4]...
# j = ... 0 1 2 3 4 ...
# Since this uniform (below) is supported on [0, 3],
# it places 1/3 of its mass in the intervals j = 1, 2, 3.
# Adding a cutoff at y = 0 changes the picture to
# R = ...(-inf, 0](0, 1](1, 2](2, 3](3, 4]...
# j = ... 0 1 2 3 4 ...
# So the QUniform still places 1/3 of its mass in the intervals
# j = 1, 2, 3.
# Adding a cutoff at y = 3 changes the picture to
# R = ...(-1, 0](0, 1](1, 2](2, inf)
# j = ... 0 1 2 3
# and the QUniform still places 1/3 of its mass in the intervals
# j = 1, 2, 3.
for lcut, ucut in [
(None, None), (0.0, None), (None, 3.0), (0.0, 3.0), (-10., 10.)
]:
qdist = distributions.QuantizedDistribution(
base_dist_cls=distributions.Uniform,
lower_cutoff=lcut,
upper_cutoff=ucut,
a=0.0,
b=3.0)
# pmf
pmf_n1, pmf_0, pmf_1, pmf_2, pmf_3, pmf_4, pmf_5 = sess.run(
qdist.pmf([-1., 0., 1., 2., 3., 4., 5.]))
# uniform had no mass below -1.
self.assertAllClose(0., pmf_n1)
# uniform had no mass below 0.
self.assertAllClose(0., pmf_0)
# uniform put 1/3 of its mass in each of (0, 1], (1, 2], (2, 3],
# which are the intervals j = 1, 2, 3.
self.assertAllClose(1 / 3, pmf_1)
self.assertAllClose(1 / 3, pmf_2)
self.assertAllClose(1 / 3, pmf_3)
# uniform had no mass in (3, 4] or (4, 5], which are j = 4, 5.
self.assertAllClose(0 / 3, pmf_4)
self.assertAllClose(0 / 3, pmf_5)
# cdf
cdf_n1, cdf_0, cdf_1, cdf_2, cdf_2p5, cdf_3, cdf_4, cdf_5 = sess.run(
qdist.cdf([-1., 0., 1., 2., 2.5, 3., 4., 5.]))
self.assertAllClose(0., cdf_n1)
self.assertAllClose(0., cdf_0)
self.assertAllClose(1 / 3, cdf_1)
self.assertAllClose(2 / 3, cdf_2)
# Note fractional values allowed for cdfs of discrete distributions.
# And adding 0.5 makes no difference because the quantized dist has
# mass only on the integers, never in between.
self.assertAllClose(2 / 3, cdf_2p5)
self.assertAllClose(3 / 3, cdf_3)
self.assertAllClose(3 / 3, cdf_4)
self.assertAllClose(3 / 3, cdf_5)
def test_quantization_of_uniform_with_cutoffs_in_the_middle(self):
with self.test_session() as sess:
# The uniform is supported on [-3, 3]
# Consider partitions the real line in intervals
# ...(-3, -2](-2, -1](-1, 0](0, 1](1, 2](2, 3] ...
# Before cutoffs, the uniform puts a mass of 1/6 in each interval written
# above. Because of cutoffs, the qdist considers intervals and indices
# ...(-infty, -1](-1, 0](0, infty) ...
# -1 0 1
qdist = distributions.QuantizedDistribution(
base_dist_cls=distributions.Uniform,
lower_cutoff=-1.0,
upper_cutoff=1.0,
a=-3.0,
b=3.0)
# pmf
cdf_n3, cdf_n2, cdf_n1, cdf_0, cdf_0p5, cdf_1, cdf_10 = sess.run(
qdist.cdf([-3., -2., -1., 0., 0.5, 1.0, 10.0]))
# Uniform had no mass on (-4, -3] or (-3, -2]
self.assertAllClose(0., cdf_n3)
self.assertAllClose(0., cdf_n2)
# Uniform had 1/6 of its mass in each of (-3, -2], and (-2, -1], which
# were collapsed into (-infty, -1], which is now the "-1" interval.
self.assertAllClose(1 / 3, cdf_n1)
# The j=0 interval contained mass from (-3, 0], which is 1/2 of the
# uniform's mass.
self.assertAllClose(1 / 2, cdf_0)
# Adding 0.5 makes no difference because the quantized dist has mass on
# the integers, not in between them.
self.assertAllClose(1 / 2, cdf_0p5)
# After applying the cutoff, all mass was either in the interval
# (0, infty), or below. (0, infty) is the interval indexed by j=1,
# so pmf(1) should equal 1.
self.assertAllClose(1., cdf_1)
# Since no mass of qdist is above 1,
# pmf(10) = P[Y <= 10] = P[Y <= 1] = pmf(1).
self.assertAllClose(1., cdf_10)
def test_quantization_of_batch_of_uniforms(self):
batch_shape = (5, 5)
with self.test_session():
# The uniforms are supported on [0, 10]. The qdist considers the
# intervals
# ... (0, 1](1, 2]...(9, 10]...
# with the intervals displayed above each holding 1 / 10 of the mass.
# The qdist will be defined with no cutoffs,
qdist = distributions.QuantizedDistribution(
base_dist_cls=distributions.Uniform,
lower_cutoff=None,
upper_cutoff=None,
a=tf.zeros(
batch_shape, dtype=tf.float32),
b=10 * tf.ones(
batch_shape, dtype=tf.float32))
# x is random integers in {-3,...,12}.
x = self._rng.randint(-3, 13, size=batch_shape).astype(np.float32)
# pmf
# qdist.pmf(j) = 1 / 10 for j in {1,...,10}, and 0 otherwise,
expected_pmf = (1 / 10) * np.ones(batch_shape)
expected_pmf[x < 1] = 0.
expected_pmf[x > 10] = 0.
self.assertAllClose(expected_pmf, qdist.pmf(x).eval())
# cdf
# qdist.cdf(j)
# = 0 for j < 1
# = j / 10, for j in {1,...,10},
# = 1, for j > 10.
expected_cdf = x.copy() / 10
expected_cdf[x < 1] = 0.
expected_cdf[x > 10] = 1.
self.assertAllClose(expected_cdf, qdist.cdf(x).eval())
def test_sampling_from_batch_of_normals(self):
batch_shape = (2,)
with self.test_session():
qdist = distributions.QuantizedDistribution(
base_dist_cls=distributions.Normal,
lower_cutoff=0.,
upper_cutoff=None,
mu=tf.zeros(
batch_shape, dtype=tf.float32),
sigma=tf.ones(
batch_shape, dtype=tf.float32))
samps = qdist.sample_n(n=5000, seed=42)
samps_v = samps.eval()
# With lower_cutoff = 0, the interval j=0 is (-infty, 0], which holds 1/2
# of the mass of the normals.
# rtol chosen to be 2x as large as necessary to pass.
self.assertAllClose([0.5, 0.5], (samps_v == 0).mean(axis=0), rtol=0.03)
# The interval j=1 is (0, 1], which is from the mean to one standard
# deviation out. This should contain 0.6827 / 2 of the mass.
self.assertAllClose(
[0.6827 / 2, 0.6827 / 2], (samps_v == 1).mean(axis=0), rtol=0.03)
def test_samples_agree_with_cdf_for_samples_over_large_range(self):
# Consider the cdf for distribution X, F(x).
# If U ~ Uniform[0, 1], then Y := F^{-1}(U) is distributed like X since
# P[Y <= y] = P[F^{-1}(U) <= y] = P[U <= F(y)] = F(y).
# If F is a bijection, we also have Z = F(X) is Uniform.
#
# Make an exponential with large mean (= 100). This ensures we will get
# quantized values over a large range. This large range allows us to
# pretend that the cdf F is a bijection, and hence F(X) is uniform.
# Note that F cannot be bijection since it is constant between the
# integers. Hence, F(X) (see below) will not be uniform exactly.
with self.test_session():
qdist = distributions.QuantizedDistribution(
base_dist_cls=distributions.Exponential,
lam=0.01)
# X ~ QuantizedExponential
x = qdist.sample_n(n=10000, seed=42)
# Z = F(X), should be Uniform.
z = qdist.cdf(x)
# Compare the CDF of Z to that of a Uniform.
# dist = maximum distance between P[Z <= a] and P[U <= a].
# We ignore pvalue, since of course this distribution is not exactly, and
# with so many sample points we would get a false fail.
dist, _ = stats.kstest(z.eval(), "uniform")
# Since the distribution take values (approximately) in [0, 100], the
# cdf should have jumps (approximately) every 1/100 of the way up.
# Assert that the jumps are not more than 2/100.
self.assertLess(dist, 0.02)
def test_samples_agree_with_pdf_for_samples_over_small_range(self):
# Testing that samples and pdf agree for a small range is important because
# it makes sure the bin edges are consistent.
# Make an exponential with mean 5.
with self.test_session():
qdist = distributions.QuantizedDistribution(
base_dist_cls=distributions.Exponential,
lam=0.2)
# Standard error should be less than 1 / (2 * sqrt(n_samples))
n_samples = 10000
std_err_bound = 1 / (2 * np.sqrt(n_samples))
samps = qdist.sample((n_samples,), seed=42).eval()
# The smallest value the samples can take on is 1, which corresponds to
# the interval (0, 1]. Recall we use ceiling in the sampling definition.
self.assertLess(0.5, samps.min())
x_vals = np.arange(1, 11).astype(np.float32)
pmf_vals = qdist.pmf(x_vals).eval()
for ii in range(10):
self.assertAllClose(
pmf_vals[ii],
(samps == x_vals[ii]).mean(),
atol=std_err_bound)
def test_normal_cdf_and_survival_function(self):
# At integer values, the result should be the same as the standard normal.
batch_shape = (3, 3)
mu = self._rng.randn(*batch_shape)
sigma = self._rng.rand(*batch_shape) + 1.0
with self.test_session():
qdist = distributions.QuantizedDistribution(
base_dist_cls=distributions.Normal,
mu=mu,
sigma=sigma)
sp_normal = stats.norm(mu, sigma)
x = self._rng.randint(-5, 5, size=batch_shape).astype(np.float64)
self.assertAllClose(
sp_normal.cdf(x),
qdist.cdf(x).eval())
self.assertAllClose(
sp_normal.sf(x),
qdist.survival_function(x).eval())
def test_normal_log_cdf_and_log_survival_function(self):
# At integer values, the result should be the same as the standard normal.
batch_shape = (3, 3)
mu = self._rng.randn(*batch_shape)
sigma = self._rng.rand(*batch_shape) + 1.0
with self.test_session():
qdist = distributions.QuantizedDistribution(
base_dist_cls=distributions.Normal,
mu=mu,
sigma=sigma)
sp_normal = stats.norm(mu, sigma)
x = self._rng.randint(-10, 10, size=batch_shape).astype(np.float64)
self.assertAllClose(
sp_normal.logcdf(x),
qdist.log_cdf(x).eval())
self.assertAllClose(
sp_normal.logsf(x),
qdist.log_survival_function(x).eval())
def test_normal_prob_with_cutoffs(self):
# At integer values, the result should be the same as the standard normal.
with self.test_session():
qdist = distributions.QuantizedDistribution(
base_dist_cls=distributions.Normal,
mu=0.,
sigma=1.,
lower_cutoff=-2.,
upper_cutoff=2.)
sm_normal = stats.norm(0., 1.)
# These cutoffs create partitions of the real line, and indices:
# (-inf, -2](-2, -1](-1, 0](0, 1](1, inf)
# -2 -1 0 1 2
# Test interval (-inf, -2], <--> index -2.
self.assertAllClose(
sm_normal.cdf(-2),
qdist.prob(-2.).eval(),
atol=0)
# Test interval (-2, -1], <--> index -1.
self.assertAllClose(
sm_normal.cdf(-1) - sm_normal.cdf(-2),
qdist.prob(-1.).eval(),
atol=0)
# Test interval (-1, 0], <--> index 0.
self.assertAllClose(
sm_normal.cdf(0) - sm_normal.cdf(-1),
qdist.prob(0.).eval(),
atol=0)
# Test interval (1, inf), <--> index 2.
self.assertAllClose(
1. - sm_normal.cdf(1),
qdist.prob(2.).eval(),
atol=0)
def test_normal_log_prob_with_cutoffs(self):
# At integer values, the result should be the same as the standard normal.
with self.test_session():
qdist = distributions.QuantizedDistribution(
base_dist_cls=distributions.Normal,
mu=0.,
sigma=1.,
lower_cutoff=-2.,
upper_cutoff=2.)
sm_normal = stats.norm(0., 1.)
# These cutoffs create partitions of the real line, and indices:
# (-inf, -2](-2, -1](-1, 0](0, 1](1, inf)
# -2 -1 0 1 2
# Test interval (-inf, -2], <--> index -2.
self.assertAllClose(
np.log(sm_normal.cdf(-2)),
qdist.log_prob(-2.).eval(),
atol=0)
# Test interval (-2, -1], <--> index -1.
self.assertAllClose(
np.log(sm_normal.cdf(-1) - sm_normal.cdf(-2)),
qdist.log_prob(-1.).eval(),
atol=0)
# Test interval (-1, 0], <--> index 0.
self.assertAllClose(
np.log(sm_normal.cdf(0) - sm_normal.cdf(-1)),
qdist.log_prob(0.).eval(),
atol=0)
# Test interval (1, inf), <--> index 2.
self.assertAllClose(
np.log(1. - sm_normal.cdf(1)),
qdist.log_prob(2.).eval(),
atol=0)
def test_log_prob_and_grad_gives_finite_results(self):
with self.test_session():
for dtype in [np.float32, np.float64]:
mu = tf.Variable(0., name="mu", dtype=dtype)
sigma = tf.Variable(1., name="sigma", dtype=dtype)
qdist = distributions.QuantizedDistribution(
base_dist_cls=distributions.Normal,
mu=mu,
sigma=sigma)
x = np.arange(-100, 100, 2).astype(dtype)
tf.initialize_all_variables().run()
proba = qdist.log_prob(x)
grads = tf.gradients(proba, [mu, sigma])
self._assert_all_finite(proba.eval())
self._assert_all_finite(grads[0].eval())
self._assert_all_finite(grads[1].eval())
def test_prob_and_grad_gives_finite_results_for_common_events(self):
with self.test_session():
mu = tf.Variable(0.0, name="mu")
sigma = tf.Variable(1.0, name="sigma")
qdist = distributions.QuantizedDistribution(
base_dist_cls=distributions.Normal,
mu=mu,
sigma=sigma)
x = tf.ceil(4 * self._rng.rand(100).astype(np.float32) - 2)
tf.initialize_all_variables().run()
proba = qdist.prob(x)
self._assert_all_finite(proba.eval())
grads = tf.gradients(proba, [mu, sigma])
self._assert_all_finite(grads[0].eval())
self._assert_all_finite(grads[1].eval())
def test_lower_cutoff_must_be_below_upper_cutoff_or_we_raise(self):
with self.test_session():
qdist = distributions.QuantizedDistribution(
base_dist_cls=distributions.Normal,
lower_cutoff=1., # not strictly less than upper_cutoff.
upper_cutoff=1.,
mu=0.,
sigma=1.,
validate_args=True)
self.assertTrue(qdist.validate_args) # Default is True.
with self.assertRaisesOpError("must be strictly less"):
qdist.sample().eval()
def test_cutoffs_must_be_integer_valued_if_validate_args_true(self):
with self.test_session():
qdist = distributions.QuantizedDistribution(
base_dist_cls=distributions.Normal,
lower_cutoff=1.5,
upper_cutoff=10.,
mu=0.,
sigma=1.,
validate_args=True)
self.assertTrue(qdist.validate_args) # Default is True.
with self.assertRaisesOpError("has non-integer components"):
qdist.sample().eval()
def test_cutoffs_can_be_float_valued_if_validate_args_false(self):
with self.test_session():
qdist = distributions.QuantizedDistribution(
base_dist_cls=distributions.Normal,
lower_cutoff=1.5,
upper_cutoff=10.11,
mu=0.,
sigma=1.,
validate_args=False)
self.assertFalse(qdist.validate_args) # Default is True.
# Should not raise
qdist.sample().eval()
def test_dtype_and_shape_inherited_from_base_dist(self):
batch_shape = (2, 3)
with self.test_session():
qdist = distributions.QuantizedDistribution(
base_dist_cls=distributions.Normal,
lower_cutoff=1.0,
upper_cutoff=10.0,
mu=tf.zeros(batch_shape),
sigma=tf.ones(batch_shape))
self.assertEqual(batch_shape, qdist.get_batch_shape())
self.assertAllEqual(batch_shape, qdist.batch_shape().eval())
self.assertEqual((), qdist.get_event_shape())
self.assertAllEqual((), qdist.event_shape().eval())
samps = qdist.sample_n(n=10)
self.assertEqual((10,) + batch_shape, samps.get_shape())
self.assertAllEqual((10,) + batch_shape, samps.eval().shape)
y = self._rng.randint(0, 5, size=batch_shape).astype(np.float32)
self.assertEqual(batch_shape, qdist.prob(y).get_shape())
self.assertEqual(batch_shape, qdist.prob(y).eval().shape)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
nitzmahone/ansible | test/units/parsing/yaml/test_objects.py | 42 | 5607 | # This file is part of Ansible
# -*- coding: utf-8 -*-
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2016, Adrian Likins <[email protected]>
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat import unittest
from ansible.errors import AnsibleError
from ansible.parsing import vault
from ansible.parsing.yaml.loader import AnsibleLoader
# module under test
from ansible.parsing.yaml import objects
from units.mock.yaml_helper import YamlTestUtils
from units.mock.vault_helper import TextVaultSecret
class TestAnsibleVaultUnicodeNoVault(unittest.TestCase, YamlTestUtils):
def test_empty_init(self):
self.assertRaises(TypeError, objects.AnsibleVaultEncryptedUnicode)
def test_empty_string_init(self):
seq = ''.encode('utf8')
self.assert_values(seq)
def test_empty_byte_string_init(self):
seq = b''
self.assert_values(seq)
def _assert_values(self, avu, seq):
self.assertIsInstance(avu, objects.AnsibleVaultEncryptedUnicode)
self.assertTrue(avu.vault is None)
# AnsibleVaultEncryptedUnicode without a vault should never == any string
self.assertNotEquals(avu, seq)
def assert_values(self, seq):
avu = objects.AnsibleVaultEncryptedUnicode(seq)
self._assert_values(avu, seq)
def test_single_char(self):
seq = 'a'.encode('utf8')
self.assert_values(seq)
def test_string(self):
seq = 'some letters'
self.assert_values(seq)
def test_byte_string(self):
seq = 'some letters'.encode('utf8')
self.assert_values(seq)
class TestAnsibleVaultEncryptedUnicode(unittest.TestCase, YamlTestUtils):
def setUp(self):
self.good_vault_password = "hunter42"
good_vault_secret = TextVaultSecret(self.good_vault_password)
self.good_vault_secrets = [('good_vault_password', good_vault_secret)]
self.good_vault = vault.VaultLib(self.good_vault_secrets)
# TODO: make this use two vault secret identities instead of two vaultSecrets
self.wrong_vault_password = 'not-hunter42'
wrong_vault_secret = TextVaultSecret(self.wrong_vault_password)
self.wrong_vault_secrets = [('wrong_vault_password', wrong_vault_secret)]
self.wrong_vault = vault.VaultLib(self.wrong_vault_secrets)
self.vault = self.good_vault
self.vault_secrets = self.good_vault_secrets
def _loader(self, stream):
return AnsibleLoader(stream, vault_secrets=self.vault_secrets)
def test_dump_load_cycle(self):
aveu = self._from_plaintext('the test string for TestAnsibleVaultEncryptedUnicode.test_dump_load_cycle')
self._dump_load_cycle(aveu)
def assert_values(self, avu, seq):
self.assertIsInstance(avu, objects.AnsibleVaultEncryptedUnicode)
self.assertEqual(avu, seq)
self.assertTrue(avu.vault is self.vault)
self.assertIsInstance(avu.vault, vault.VaultLib)
def _from_plaintext(self, seq):
id_secret = vault.match_encrypt_secret(self.good_vault_secrets)
return objects.AnsibleVaultEncryptedUnicode.from_plaintext(seq, vault=self.vault, secret=id_secret[1])
def _from_ciphertext(self, ciphertext):
avu = objects.AnsibleVaultEncryptedUnicode(ciphertext)
avu.vault = self.vault
return avu
def test_empty_init(self):
self.assertRaises(TypeError, objects.AnsibleVaultEncryptedUnicode)
def test_empty_string_init_from_plaintext(self):
seq = ''
avu = self._from_plaintext(seq)
self.assert_values(avu, seq)
def test_empty_unicode_init_from_plaintext(self):
seq = u''
avu = self._from_plaintext(seq)
self.assert_values(avu, seq)
def test_string_from_plaintext(self):
seq = 'some letters'
avu = self._from_plaintext(seq)
self.assert_values(avu, seq)
def test_unicode_from_plaintext(self):
seq = u'some letters'
avu = self._from_plaintext(seq)
self.assert_values(avu, seq)
def test_unicode_from_plaintext_encode(self):
seq = u'some text here'
avu = self._from_plaintext(seq)
b_avu = avu.encode('utf-8', 'strict')
self.assertIsInstance(avu, objects.AnsibleVaultEncryptedUnicode)
self.assertEqual(b_avu, seq.encode('utf-8', 'strict'))
self.assertTrue(avu.vault is self.vault)
self.assertIsInstance(avu.vault, vault.VaultLib)
# TODO/FIXME: make sure bad password fails differently than 'thats not encrypted'
def test_empty_string_wrong_password(self):
seq = ''
self.vault = self.wrong_vault
avu = self._from_plaintext(seq)
def compare(avu, seq):
return avu == seq
self.assertRaises(AnsibleError, compare, avu, seq)
def test_vaulted_utf8_value_37258(self):
seq = u"aöffü"
avu = self._from_plaintext(seq)
self.assert_values(avu, seq)
| gpl-3.0 |
jss-emr/openerp-7-src | openerp/addons/sale_margin/sale_margin.py | 30 | 4116 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class sale_order_line(osv.osv):
_inherit = "sale.order.line"
def product_id_change(self, cr, uid, ids, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False, packaging=False, fiscal_position=False, flag=False, context=None):
res = super(sale_order_line, self).product_id_change(cr, uid, ids, pricelist, product, qty=qty,
uom=uom, qty_uos=qty_uos, uos=uos, name=name, partner_id=partner_id,
lang=lang, update_tax=update_tax, date_order=date_order, packaging=packaging, fiscal_position=fiscal_position, flag=flag, context=context)
if not pricelist:
return res
frm_cur = self.pool.get('res.users').browse(cr, uid, uid).company_id.currency_id.id
to_cur = self.pool.get('product.pricelist').browse(cr, uid, [pricelist])[0].currency_id.id
if product:
purchase_price = self.pool.get('product.product').browse(cr, uid, product).standard_price
price = self.pool.get('res.currency').compute(cr, uid, frm_cur, to_cur, purchase_price, round=False)
res['value'].update({'purchase_price': price})
return res
def _product_margin(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for line in self.browse(cr, uid, ids, context=context):
res[line.id] = 0
if line.product_id:
if line.purchase_price:
res[line.id] = round((line.price_unit*line.product_uos_qty*(100.0-line.discount)/100.0) -(line.purchase_price*line.product_uos_qty), 2)
else:
res[line.id] = round((line.price_unit*line.product_uos_qty*(100.0-line.discount)/100.0) -(line.product_id.standard_price*line.product_uos_qty), 2)
return res
_columns = {
'margin': fields.function(_product_margin, string='Margin',
store = True),
'purchase_price': fields.float('Cost Price', digits=(16,2))
}
sale_order_line()
class sale_order(osv.osv):
_inherit = "sale.order"
def _product_margin(self, cr, uid, ids, field_name, arg, context=None):
result = {}
for sale in self.browse(cr, uid, ids, context=context):
result[sale.id] = 0.0
for line in sale.order_line:
result[sale.id] += line.margin or 0.0
return result
def _get_order(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('sale.order.line').browse(cr, uid, ids, context=context):
result[line.order_id.id] = True
return result.keys()
_columns = {
'margin': fields.function(_product_margin, string='Margin', help="It gives profitability by calculating the difference between the Unit Price and the cost price.", store={
'sale.order.line': (_get_order, ['margin'], 20),
'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line'], 20),
}),
}
sale_order()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | agpl-3.0 |
letsencrypt/letsencrypt | certbot-apache/certbot_apache/configurator.py | 1 | 99590 | """Apache Configuration based off of Augeas Configurator."""
# pylint: disable=too-many-lines
import copy
import fnmatch
import logging
import os
import pkg_resources
import re
import six
import socket
import time
import zope.component
import zope.interface
from acme import challenges
from acme.magic_typing import Any, DefaultDict, Dict, List, Set, Union # pylint: disable=unused-import, no-name-in-module
from certbot import errors
from certbot import interfaces
from certbot import util
from certbot.achallenges import KeyAuthorizationAnnotatedChallenge # pylint: disable=unused-import
from certbot.plugins import common
from certbot.plugins.util import path_surgery
from certbot.plugins.enhancements import AutoHSTSEnhancement
from certbot_apache import apache_util
from certbot_apache import augeas_configurator
from certbot_apache import constants
from certbot_apache import display_ops
from certbot_apache import http_01
from certbot_apache import obj
from certbot_apache import parser
from collections import defaultdict
logger = logging.getLogger(__name__)
# TODO: Augeas sections ie. <VirtualHost>, <IfModule> beginning and closing
# tags need to be the same case, otherwise Augeas doesn't recognize them.
# This is not able to be completely remedied by regular expressions because
# Augeas views <VirtualHost> </Virtualhost> as an error. This will just
# require another check_parsing_errors() after all files are included...
# (after a find_directive search is executed currently). It can be a one
# time check however because all of LE's transactions will ensure
# only properly formed sections are added.
# Note: This protocol works for filenames with spaces in it, the sites are
# properly set up and directives are changed appropriately, but Apache won't
# recognize names in sites-enabled that have spaces. These are not added to the
# Apache configuration. It may be wise to warn the user if they are trying
# to use vhost filenames that contain spaces and offer to change ' ' to '_'
# Note: FILEPATHS and changes to files are transactional. They are copied
# over before the updates are made to the existing files. NEW_FILES is
# transactional due to the use of register_file_creation()
# TODO: Verify permissions on configuration root... it is easier than
# checking permissions on each of the relative directories and less error
# prone.
# TODO: Write a server protocol finder. Listen <port> <protocol> or
# Protocol <protocol>. This can verify partial setups are correct
# TODO: Add directives to sites-enabled... not sites-available.
# sites-available doesn't allow immediate find_dir search even with save()
# and load()
@zope.interface.implementer(interfaces.IAuthenticator, interfaces.IInstaller)
@zope.interface.provider(interfaces.IPluginFactory)
class ApacheConfigurator(augeas_configurator.AugeasConfigurator):
# pylint: disable=too-many-instance-attributes,too-many-public-methods
"""Apache configurator.
State of Configurator: This code has been been tested and built for Ubuntu
14.04 Apache 2.4 and it works for Ubuntu 12.04 Apache 2.2
:ivar config: Configuration.
:type config: :class:`~certbot.interfaces.IConfig`
:ivar parser: Handles low level parsing
:type parser: :class:`~certbot_apache.parser`
:ivar tup version: version of Apache
:ivar list vhosts: All vhosts found in the configuration
(:class:`list` of :class:`~certbot_apache.obj.VirtualHost`)
:ivar dict assoc: Mapping between domains and vhosts
"""
description = "Apache Web Server plugin"
if os.environ.get("CERTBOT_DOCS") == "1":
description += ( # pragma: no cover
" (Please note that the default values of the Apache plugin options"
" change depending on the operating system Certbot is run on.)"
)
OS_DEFAULTS = dict(
server_root="/etc/apache2",
vhost_root="/etc/apache2/sites-available",
vhost_files="*",
logs_root="/var/log/apache2",
ctl="apache2ctl",
version_cmd=['apache2ctl', '-v'],
restart_cmd=['apache2ctl', 'graceful'],
conftest_cmd=['apache2ctl', 'configtest'],
enmod=None,
dismod=None,
le_vhost_ext="-le-ssl.conf",
handle_modules=False,
handle_sites=False,
challenge_location="/etc/apache2",
MOD_SSL_CONF_SRC=pkg_resources.resource_filename(
"certbot_apache", "options-ssl-apache.conf")
)
def option(self, key):
"""Get a value from options"""
return self.options.get(key)
def _prepare_options(self):
"""
Set the values possibly changed by command line parameters to
OS_DEFAULTS constant dictionary
"""
opts = ["enmod", "dismod", "le_vhost_ext", "server_root", "vhost_root",
"logs_root", "challenge_location", "handle_modules", "handle_sites",
"ctl"]
for o in opts:
# Config options use dashes instead of underscores
if self.conf(o.replace("_", "-")) is not None:
self.options[o] = self.conf(o.replace("_", "-"))
else:
self.options[o] = self.OS_DEFAULTS[o]
# Special cases
self.options["version_cmd"][0] = self.option("ctl")
self.options["restart_cmd"][0] = self.option("ctl")
self.options["conftest_cmd"][0] = self.option("ctl")
@classmethod
def add_parser_arguments(cls, add):
# When adding, modifying or deleting command line arguments, be sure to
# include the changes in the list used in method _prepare_options() to
# ensure consistent behavior.
# Respect CERTBOT_DOCS environment variable and use default values from
# base class regardless of the underlying distribution (overrides).
if os.environ.get("CERTBOT_DOCS") == "1":
DEFAULTS = ApacheConfigurator.OS_DEFAULTS
else:
# cls.OS_DEFAULTS can be distribution specific, see override classes
DEFAULTS = cls.OS_DEFAULTS
add("enmod", default=DEFAULTS["enmod"],
help="Path to the Apache 'a2enmod' binary")
add("dismod", default=DEFAULTS["dismod"],
help="Path to the Apache 'a2dismod' binary")
add("le-vhost-ext", default=DEFAULTS["le_vhost_ext"],
help="SSL vhost configuration extension")
add("server-root", default=DEFAULTS["server_root"],
help="Apache server root directory")
add("vhost-root", default=None,
help="Apache server VirtualHost configuration root")
add("logs-root", default=DEFAULTS["logs_root"],
help="Apache server logs directory")
add("challenge-location",
default=DEFAULTS["challenge_location"],
help="Directory path for challenge configuration")
add("handle-modules", default=DEFAULTS["handle_modules"],
help="Let installer handle enabling required modules for you " +
"(Only Ubuntu/Debian currently)")
add("handle-sites", default=DEFAULTS["handle_sites"],
help="Let installer handle enabling sites for you " +
"(Only Ubuntu/Debian currently)")
add("ctl", default=DEFAULTS["ctl"],
help="Full path to Apache control script")
util.add_deprecated_argument(
add, argument_name="init-script", nargs=1)
def __init__(self, *args, **kwargs):
"""Initialize an Apache Configurator.
:param tup version: version of Apache as a tuple (2, 4, 7)
(used mostly for unittesting)
"""
version = kwargs.pop("version", None)
super(ApacheConfigurator, self).__init__(*args, **kwargs)
# Add name_server association dict
self.assoc = dict() # type: Dict[str, obj.VirtualHost]
# Outstanding challenges
self._chall_out = set() # type: Set[KeyAuthorizationAnnotatedChallenge]
# List of vhosts configured per wildcard domain on this run.
# used by deploy_cert() and enhance()
self._wildcard_vhosts = dict() # type: Dict[str, List[obj.VirtualHost]]
# Maps enhancements to vhosts we've enabled the enhancement for
self._enhanced_vhosts = defaultdict(set) # type: DefaultDict[str, Set[obj.VirtualHost]]
# Temporary state for AutoHSTS enhancement
self._autohsts = {} # type: Dict[str, Dict[str, Union[int, float]]]
# These will be set in the prepare function
self._prepared = False
self.parser = None
self.version = version
self.vhosts = None
self.options = copy.deepcopy(self.OS_DEFAULTS)
self._enhance_func = {"redirect": self._enable_redirect,
"ensure-http-header": self._set_http_header,
"staple-ocsp": self._enable_ocsp_stapling}
@property
def mod_ssl_conf(self):
"""Full absolute path to SSL configuration file."""
return os.path.join(self.config.config_dir, constants.MOD_SSL_CONF_DEST)
@property
def updated_mod_ssl_conf_digest(self):
"""Full absolute path to digest of updated SSL configuration file."""
return os.path.join(self.config.config_dir, constants.UPDATED_MOD_SSL_CONF_DIGEST)
def prepare(self):
"""Prepare the authenticator/installer.
:raises .errors.NoInstallationError: If Apache configs cannot be found
:raises .errors.MisconfigurationError: If Apache is misconfigured
:raises .errors.NotSupportedError: If Apache version is not supported
:raises .errors.PluginError: If there is any other error
"""
# Perform the actual Augeas initialization to be able to react
try:
self.init_augeas()
except ImportError:
raise errors.NoInstallationError("Problem in Augeas installation")
self._prepare_options()
# Verify Apache is installed
self._verify_exe_availability(self.option("ctl"))
# Make sure configuration is valid
self.config_test()
# Set Version
if self.version is None:
self.version = self.get_version()
logger.debug('Apache version is %s',
'.'.join(str(i) for i in self.version))
if self.version < (2, 2):
raise errors.NotSupportedError(
"Apache Version %s not supported.", str(self.version))
if not self._check_aug_version():
raise errors.NotSupportedError(
"Apache plugin support requires libaugeas0 and augeas-lenses "
"version 1.2.0 or higher, please make sure you have you have "
"those installed.")
self.parser = self.get_parser()
# Check for errors in parsing files with Augeas
self.check_parsing_errors("httpd.aug")
# Get all of the available vhosts
self.vhosts = self.get_virtual_hosts()
self.install_ssl_options_conf(self.mod_ssl_conf,
self.updated_mod_ssl_conf_digest)
# Prevent two Apache plugins from modifying a config at once
try:
util.lock_dir_until_exit(self.option("server_root"))
except (OSError, errors.LockError):
logger.debug("Encountered error:", exc_info=True)
raise errors.PluginError(
"Unable to lock %s", self.option("server_root"))
self._prepared = True
def _verify_exe_availability(self, exe):
"""Checks availability of Apache executable"""
if not util.exe_exists(exe):
if not path_surgery(exe):
raise errors.NoInstallationError(
'Cannot find Apache executable {0}'.format(exe))
def _check_aug_version(self):
""" Checks that we have recent enough version of libaugeas.
If augeas version is recent enough, it will support case insensitive
regexp matching"""
self.aug.set("/test/path/testing/arg", "aRgUMeNT")
try:
matches = self.aug.match(
"/test//*[self::arg=~regexp('argument', 'i')]")
except RuntimeError:
self.aug.remove("/test/path")
return False
self.aug.remove("/test/path")
return matches
def get_parser(self):
"""Initializes the ApacheParser"""
# If user provided vhost_root value in command line, use it
return parser.ApacheParser(
self.aug, self.option("server_root"), self.conf("vhost-root"),
self.version, configurator=self)
def _wildcard_domain(self, domain):
"""
Checks if domain is a wildcard domain
:param str domain: Domain to check
:returns: If the domain is wildcard domain
:rtype: bool
"""
if isinstance(domain, six.text_type):
wildcard_marker = u"*."
else:
wildcard_marker = b"*."
return domain.startswith(wildcard_marker)
def deploy_cert(self, domain, cert_path, key_path,
chain_path=None, fullchain_path=None):
"""Deploys certificate to specified virtual host.
Currently tries to find the last directives to deploy the certificate
in the VHost associated with the given domain. If it can't find the
directives, it searches the "included" confs. The function verifies
that it has located the three directives and finally modifies them
to point to the correct destination. After the certificate is
installed, the VirtualHost is enabled if it isn't already.
.. todo:: Might be nice to remove chain directive if none exists
This shouldn't happen within certbot though
:raises errors.PluginError: When unable to deploy certificate due to
a lack of directives
"""
vhosts = self.choose_vhosts(domain)
for vhost in vhosts:
self._deploy_cert(vhost, cert_path, key_path, chain_path, fullchain_path)
def choose_vhosts(self, domain, create_if_no_ssl=True):
"""
Finds VirtualHosts that can be used with the provided domain
:param str domain: Domain name to match VirtualHosts to
:param bool create_if_no_ssl: If found VirtualHost doesn't have a HTTPS
counterpart, should one get created
:returns: List of VirtualHosts or None
:rtype: `list` of :class:`~certbot_apache.obj.VirtualHost`
"""
if self._wildcard_domain(domain):
if domain in self._wildcard_vhosts:
# Vhosts for a wildcard domain were already selected
return self._wildcard_vhosts[domain]
# Ask user which VHosts to support.
# Returned objects are guaranteed to be ssl vhosts
return self._choose_vhosts_wildcard(domain, create_if_no_ssl)
else:
return [self.choose_vhost(domain, create_if_no_ssl)]
def _vhosts_for_wildcard(self, domain):
"""
Get VHost objects for every VirtualHost that the user wants to handle
with the wildcard certificate.
"""
# Collect all vhosts that match the name
matched = set()
for vhost in self.vhosts:
for name in vhost.get_names():
if self._in_wildcard_scope(name, domain):
matched.add(vhost)
return list(matched)
def _in_wildcard_scope(self, name, domain):
"""
Helper method for _vhosts_for_wildcard() that makes sure that the domain
is in the scope of wildcard domain.
eg. in scope: domain = *.wild.card, name = 1.wild.card
not in scope: domain = *.wild.card, name = 1.2.wild.card
"""
if len(name.split(".")) == len(domain.split(".")):
return fnmatch.fnmatch(name, domain)
def _choose_vhosts_wildcard(self, domain, create_ssl=True):
"""Prompts user to choose vhosts to install a wildcard certificate for"""
# Get all vhosts that are covered by the wildcard domain
vhosts = self._vhosts_for_wildcard(domain)
# Go through the vhosts, making sure that we cover all the names
# present, but preferring the SSL vhosts
filtered_vhosts = dict()
for vhost in vhosts:
for name in vhost.get_names():
if vhost.ssl:
# Always prefer SSL vhosts
filtered_vhosts[name] = vhost
elif name not in filtered_vhosts and create_ssl:
# Add if not in list previously
filtered_vhosts[name] = vhost
# Only unique VHost objects
dialog_input = set([vhost for vhost in filtered_vhosts.values()])
# Ask the user which of names to enable, expect list of names back
dialog_output = display_ops.select_vhost_multiple(list(dialog_input))
if not dialog_output:
logger.error(
"No vhost exists with servername or alias for domain %s. "
"No vhost was selected. Please specify ServerName or ServerAlias "
"in the Apache config.",
domain)
raise errors.PluginError("No vhost selected")
# Make sure we create SSL vhosts for the ones that are HTTP only
# if requested.
return_vhosts = list()
for vhost in dialog_output:
if not vhost.ssl:
return_vhosts.append(self.make_vhost_ssl(vhost))
else:
return_vhosts.append(vhost)
self._wildcard_vhosts[domain] = return_vhosts
return return_vhosts
def _deploy_cert(self, vhost, cert_path, key_path, chain_path, fullchain_path):
"""
Helper function for deploy_cert() that handles the actual deployment
this exists because we might want to do multiple deployments per
domain originally passed for deploy_cert(). This is especially true
with wildcard certificates
"""
# This is done first so that ssl module is enabled and cert_path,
# cert_key... can all be parsed appropriately
self.prepare_server_https("443")
# Add directives and remove duplicates
self._add_dummy_ssl_directives(vhost.path)
self._clean_vhost(vhost)
path = {"cert_path": self.parser.find_dir("SSLCertificateFile",
None, vhost.path),
"cert_key": self.parser.find_dir("SSLCertificateKeyFile",
None, vhost.path)}
# Only include if a certificate chain is specified
if chain_path is not None:
path["chain_path"] = self.parser.find_dir(
"SSLCertificateChainFile", None, vhost.path)
# Handle errors when certificate/key directives cannot be found
if not path["cert_path"]:
logger.warning(
"Cannot find an SSLCertificateFile directive in %s. "
"VirtualHost was not modified", vhost.path)
raise errors.PluginError(
"Unable to find an SSLCertificateFile directive")
elif not path["cert_key"]:
logger.warning(
"Cannot find an SSLCertificateKeyFile directive for "
"certificate in %s. VirtualHost was not modified", vhost.path)
raise errors.PluginError(
"Unable to find an SSLCertificateKeyFile directive for "
"certificate")
logger.info("Deploying Certificate to VirtualHost %s", vhost.filep)
if self.version < (2, 4, 8) or (chain_path and not fullchain_path):
# install SSLCertificateFile, SSLCertificateKeyFile,
# and SSLCertificateChainFile directives
set_cert_path = cert_path
self.aug.set(path["cert_path"][-1], cert_path)
self.aug.set(path["cert_key"][-1], key_path)
if chain_path is not None:
self.parser.add_dir(vhost.path,
"SSLCertificateChainFile", chain_path)
else:
raise errors.PluginError("--chain-path is required for your "
"version of Apache")
else:
if not fullchain_path:
raise errors.PluginError("Please provide the --fullchain-path "
"option pointing to your full chain file")
set_cert_path = fullchain_path
self.aug.set(path["cert_path"][-1], fullchain_path)
self.aug.set(path["cert_key"][-1], key_path)
# Enable the new vhost if needed
if not vhost.enabled:
self.enable_site(vhost)
# Save notes about the transaction that took place
self.save_notes += ("Changed vhost at %s with addresses of %s\n"
"\tSSLCertificateFile %s\n"
"\tSSLCertificateKeyFile %s\n" %
(vhost.filep,
", ".join(str(addr) for addr in vhost.addrs),
set_cert_path, key_path))
if chain_path is not None:
self.save_notes += "\tSSLCertificateChainFile %s\n" % chain_path
def choose_vhost(self, target_name, create_if_no_ssl=True):
"""Chooses a virtual host based on the given domain name.
If there is no clear virtual host to be selected, the user is prompted
with all available choices.
The returned vhost is guaranteed to have TLS enabled unless
create_if_no_ssl is set to False, in which case there is no such guarantee
and the result is not cached.
:param str target_name: domain name
:param bool create_if_no_ssl: If found VirtualHost doesn't have a HTTPS
counterpart, should one get created
:returns: vhost associated with name
:rtype: :class:`~certbot_apache.obj.VirtualHost`
:raises .errors.PluginError: If no vhost is available or chosen
"""
# Allows for domain names to be associated with a virtual host
if target_name in self.assoc:
return self.assoc[target_name]
# Try to find a reasonable vhost
vhost = self._find_best_vhost(target_name)
if vhost is not None:
if not create_if_no_ssl:
return vhost
if not vhost.ssl:
vhost = self.make_vhost_ssl(vhost)
self._add_servername_alias(target_name, vhost)
self.assoc[target_name] = vhost
return vhost
# Negate create_if_no_ssl value to indicate if we want a SSL vhost
# to get created if a non-ssl vhost is selected.
return self._choose_vhost_from_list(target_name, temp=not create_if_no_ssl)
def _choose_vhost_from_list(self, target_name, temp=False):
# Select a vhost from a list
vhost = display_ops.select_vhost(target_name, self.vhosts)
if vhost is None:
logger.error(
"No vhost exists with servername or alias of %s. "
"No vhost was selected. Please specify ServerName or ServerAlias "
"in the Apache config.",
target_name)
raise errors.PluginError("No vhost selected")
elif temp:
return vhost
elif not vhost.ssl:
addrs = self._get_proposed_addrs(vhost, "443")
# TODO: Conflicts is too conservative
if not any(vhost.enabled and vhost.conflicts(addrs) for
vhost in self.vhosts):
vhost = self.make_vhost_ssl(vhost)
else:
logger.error(
"The selected vhost would conflict with other HTTPS "
"VirtualHosts within Apache. Please select another "
"vhost or add ServerNames to your configuration.")
raise errors.PluginError(
"VirtualHost not able to be selected.")
self._add_servername_alias(target_name, vhost)
self.assoc[target_name] = vhost
return vhost
def domain_in_names(self, names, target_name):
"""Checks if target domain is covered by one or more of the provided
names. The target name is matched by wildcard as well as exact match.
:param names: server aliases
:type names: `collections.Iterable` of `str`
:param str target_name: name to compare with wildcards
:returns: True if target_name is covered by a wildcard,
otherwise, False
:rtype: bool
"""
# use lowercase strings because fnmatch can be case sensitive
target_name = target_name.lower()
for name in names:
name = name.lower()
# fnmatch treats "[seq]" specially and [ or ] characters aren't
# valid in Apache but Apache doesn't error out if they are present
if "[" not in name and fnmatch.fnmatch(target_name, name):
return True
return False
def find_best_http_vhost(self, target, filter_defaults, port="80"):
"""Returns non-HTTPS vhost objects found from the Apache config
:param str target: Domain name of the desired VirtualHost
:param bool filter_defaults: whether _default_ vhosts should be
included if it is the best match
:param str port: port number the vhost should be listening on
:returns: VirtualHost object that's the best match for target name
:rtype: `obj.VirtualHost` or None
"""
filtered_vhosts = []
for vhost in self.vhosts:
if any(a.is_wildcard() or a.get_port() == port for a in vhost.addrs) and not vhost.ssl:
filtered_vhosts.append(vhost)
return self._find_best_vhost(target, filtered_vhosts, filter_defaults)
def _find_best_vhost(self, target_name, vhosts=None, filter_defaults=True):
"""Finds the best vhost for a target_name.
This does not upgrade a vhost to HTTPS... it only finds the most
appropriate vhost for the given target_name.
:param str target_name: domain handled by the desired vhost
:param vhosts: vhosts to consider
:type vhosts: `collections.Iterable` of :class:`~certbot_apache.obj.VirtualHost`
:param bool filter_defaults: whether a vhost with a _default_
addr is acceptable
:returns: VHost or None
"""
# Points 6 - Servername SSL
# Points 5 - Wildcard SSL
# Points 4 - Address name with SSL
# Points 3 - Servername no SSL
# Points 2 - Wildcard no SSL
# Points 1 - Address name with no SSL
best_candidate = None
best_points = 0
if vhosts is None:
vhosts = self.vhosts
for vhost in vhosts:
if vhost.modmacro is True:
continue
names = vhost.get_names()
if target_name in names:
points = 3
elif self.domain_in_names(names, target_name):
points = 2
elif any(addr.get_addr() == target_name for addr in vhost.addrs):
points = 1
else:
# No points given if names can't be found.
# This gets hit but doesn't register
continue # pragma: no cover
if vhost.ssl:
points += 3
if points > best_points:
best_points = points
best_candidate = vhost
# No winners here... is there only one reasonable vhost?
if best_candidate is None:
if filter_defaults:
vhosts = self._non_default_vhosts(vhosts)
# remove mod_macro hosts from reasonable vhosts
reasonable_vhosts = [vh for vh
in vhosts if vh.modmacro is False]
if len(reasonable_vhosts) == 1:
best_candidate = reasonable_vhosts[0]
return best_candidate
def _non_default_vhosts(self, vhosts):
"""Return all non _default_ only vhosts."""
return [vh for vh in vhosts if not all(
addr.get_addr() == "_default_" for addr in vh.addrs
)]
def get_all_names(self):
"""Returns all names found in the Apache Configuration.
:returns: All ServerNames, ServerAliases, and reverse DNS entries for
virtual host addresses
:rtype: set
"""
all_names = set() # type: Set[str]
vhost_macro = []
for vhost in self.vhosts:
all_names.update(vhost.get_names())
if vhost.modmacro:
vhost_macro.append(vhost.filep)
for addr in vhost.addrs:
if common.hostname_regex.match(addr.get_addr()):
all_names.add(addr.get_addr())
else:
name = self.get_name_from_ip(addr)
if name:
all_names.add(name)
if len(vhost_macro) > 0:
zope.component.getUtility(interfaces.IDisplay).notification(
"Apache mod_macro seems to be in use in file(s):\n{0}"
"\n\nUnfortunately mod_macro is not yet supported".format(
"\n ".join(vhost_macro)), force_interactive=True)
return util.get_filtered_names(all_names)
def get_name_from_ip(self, addr): # pylint: disable=no-self-use
"""Returns a reverse dns name if available.
:param addr: IP Address
:type addr: ~.common.Addr
:returns: name or empty string if name cannot be determined
:rtype: str
"""
# If it isn't a private IP, do a reverse DNS lookup
if not common.private_ips_regex.match(addr.get_addr()):
try:
socket.inet_aton(addr.get_addr())
return socket.gethostbyaddr(addr.get_addr())[0]
except (socket.error, socket.herror, socket.timeout):
pass
return ""
def _get_vhost_names(self, path):
"""Helper method for getting the ServerName and
ServerAlias values from vhost in path
:param path: Path to read ServerName and ServerAliases from
:returns: Tuple including ServerName and `list` of ServerAlias strings
"""
servername_match = self.parser.find_dir(
"ServerName", None, start=path, exclude=False)
serveralias_match = self.parser.find_dir(
"ServerAlias", None, start=path, exclude=False)
serveraliases = []
for alias in serveralias_match:
serveralias = self.parser.get_arg(alias)
serveraliases.append(serveralias)
servername = None
if servername_match:
# Get last ServerName as each overwrites the previous
servername = self.parser.get_arg(servername_match[-1])
return (servername, serveraliases)
def _add_servernames(self, host):
"""Helper function for get_virtual_hosts().
:param host: In progress vhost whose names will be added
:type host: :class:`~certbot_apache.obj.VirtualHost`
"""
servername, serveraliases = self._get_vhost_names(host.path)
for alias in serveraliases:
if not host.modmacro:
host.aliases.add(alias)
if not host.modmacro:
host.name = servername
def _create_vhost(self, path):
"""Used by get_virtual_hosts to create vhost objects
:param str path: Augeas path to virtual host
:returns: newly created vhost
:rtype: :class:`~certbot_apache.obj.VirtualHost`
"""
addrs = set()
try:
args = self.aug.match(path + "/arg")
except RuntimeError:
logger.warning("Encountered a problem while parsing file: %s, skipping", path)
return None
for arg in args:
addrs.add(obj.Addr.fromstring(self.parser.get_arg(arg)))
is_ssl = False
if self.parser.find_dir("SSLEngine", "on", start=path, exclude=False):
is_ssl = True
# "SSLEngine on" might be set outside of <VirtualHost>
# Treat vhosts with port 443 as ssl vhosts
for addr in addrs:
if addr.get_port() == "443":
is_ssl = True
filename = apache_util.get_file_path(
self.aug.get("/augeas/files%s/path" % apache_util.get_file_path(path)))
if filename is None:
return None
macro = False
if "/macro/" in path.lower():
macro = True
vhost_enabled = self.parser.parsed_in_original(filename)
vhost = obj.VirtualHost(filename, path, addrs, is_ssl,
vhost_enabled, modmacro=macro)
self._add_servernames(vhost)
return vhost
def get_virtual_hosts(self):
"""Returns list of virtual hosts found in the Apache configuration.
:returns: List of :class:`~certbot_apache.obj.VirtualHost`
objects found in configuration
:rtype: list
"""
# Search base config, and all included paths for VirtualHosts
file_paths = {} # type: Dict[str, str]
internal_paths = defaultdict(set) # type: DefaultDict[str, Set[str]]
vhs = []
# Make a list of parser paths because the parser_paths
# dictionary may be modified during the loop.
for vhost_path in list(self.parser.parser_paths):
paths = self.aug.match(
("/files%s//*[label()=~regexp('%s')]" %
(vhost_path, parser.case_i("VirtualHost"))))
paths = [path for path in paths if
"virtualhost" in os.path.basename(path).lower()]
for path in paths:
new_vhost = self._create_vhost(path)
if not new_vhost:
continue
internal_path = apache_util.get_internal_aug_path(new_vhost.path)
realpath = os.path.realpath(new_vhost.filep)
if realpath not in file_paths:
file_paths[realpath] = new_vhost.filep
internal_paths[realpath].add(internal_path)
vhs.append(new_vhost)
elif (realpath == new_vhost.filep and
realpath != file_paths[realpath]):
# Prefer "real" vhost paths instead of symlinked ones
# ex: sites-enabled/vh.conf -> sites-available/vh.conf
# remove old (most likely) symlinked one
new_vhs = []
for v in vhs:
if v.filep == file_paths[realpath]:
internal_paths[realpath].remove(
apache_util.get_internal_aug_path(v.path))
else:
new_vhs.append(v)
vhs = new_vhs
file_paths[realpath] = realpath
internal_paths[realpath].add(internal_path)
vhs.append(new_vhost)
elif internal_path not in internal_paths[realpath]:
internal_paths[realpath].add(internal_path)
vhs.append(new_vhost)
return vhs
def is_name_vhost(self, target_addr):
"""Returns if vhost is a name based vhost
NameVirtualHost was deprecated in Apache 2.4 as all VirtualHosts are
now NameVirtualHosts. If version is earlier than 2.4, check if addr
has a NameVirtualHost directive in the Apache config
:param certbot_apache.obj.Addr target_addr: vhost address
:returns: Success
:rtype: bool
"""
# Mixed and matched wildcard NameVirtualHost with VirtualHost
# behavior is undefined. Make sure that an exact match exists
# search for NameVirtualHost directive for ip_addr
# note ip_addr can be FQDN although Apache does not recommend it
return (self.version >= (2, 4) or
self.parser.find_dir("NameVirtualHost", str(target_addr)))
def add_name_vhost(self, addr):
"""Adds NameVirtualHost directive for given address.
:param addr: Address that will be added as NameVirtualHost directive
:type addr: :class:`~certbot_apache.obj.Addr`
"""
loc = parser.get_aug_path(self.parser.loc["name"])
if addr.get_port() == "443":
path = self.parser.add_dir_to_ifmodssl(
loc, "NameVirtualHost", [str(addr)])
else:
path = self.parser.add_dir(loc, "NameVirtualHost", [str(addr)])
msg = ("Setting %s to be NameBasedVirtualHost\n"
"\tDirective added to %s\n" % (addr, path))
logger.debug(msg)
self.save_notes += msg
def prepare_server_https(self, port, temp=False):
"""Prepare the server for HTTPS.
Make sure that the ssl_module is loaded and that the server
is appropriately listening on port.
:param str port: Port to listen on
"""
self.prepare_https_modules(temp)
self.ensure_listen(port, https=True)
def ensure_listen(self, port, https=False):
"""Make sure that Apache is listening on the port. Checks if the
Listen statement for the port already exists, and adds it to the
configuration if necessary.
:param str port: Port number to check and add Listen for if not in
place already
:param bool https: If the port will be used for HTTPS
"""
# If HTTPS requested for nonstandard port, add service definition
if https and port != "443":
port_service = "%s %s" % (port, "https")
else:
port_service = port
# Check for Listen <port>
# Note: This could be made to also look for ip:443 combo
listens = [self.parser.get_arg(x).split()[0] for
x in self.parser.find_dir("Listen")]
# Listen already in place
if self._has_port_already(listens, port):
return
listen_dirs = set(listens)
if not listens:
listen_dirs.add(port_service)
for listen in listens:
# For any listen statement, check if the machine also listens on
# the given port. If not, add such a listen statement.
if len(listen.split(":")) == 1:
# Its listening to all interfaces
if port not in listen_dirs and port_service not in listen_dirs:
listen_dirs.add(port_service)
else:
# The Listen statement specifies an ip
_, ip = listen[::-1].split(":", 1)
ip = ip[::-1]
if "%s:%s" % (ip, port_service) not in listen_dirs and (
"%s:%s" % (ip, port_service) not in listen_dirs):
listen_dirs.add("%s:%s" % (ip, port_service))
if https:
self._add_listens_https(listen_dirs, listens, port)
else:
self._add_listens_http(listen_dirs, listens, port)
def _add_listens_http(self, listens, listens_orig, port):
"""Helper method for ensure_listen to figure out which new
listen statements need adding for listening HTTP on port
:param set listens: Set of all needed Listen statements
:param list listens_orig: List of existing listen statements
:param string port: Port number we're adding
"""
new_listens = listens.difference(listens_orig)
if port in new_listens:
# We have wildcard, skip the rest
self.parser.add_dir(parser.get_aug_path(self.parser.loc["listen"]),
"Listen", port)
self.save_notes += "Added Listen %s directive to %s\n" % (
port, self.parser.loc["listen"])
else:
for listen in new_listens:
self.parser.add_dir(parser.get_aug_path(
self.parser.loc["listen"]), "Listen", listen.split(" "))
self.save_notes += ("Added Listen %s directive to "
"%s\n") % (listen,
self.parser.loc["listen"])
def _add_listens_https(self, listens, listens_orig, port):
"""Helper method for ensure_listen to figure out which new
listen statements need adding for listening HTTPS on port
:param set listens: Set of all needed Listen statements
:param list listens_orig: List of existing listen statements
:param string port: Port number we're adding
"""
# Add service definition for non-standard ports
if port != "443":
port_service = "%s %s" % (port, "https")
else:
port_service = port
new_listens = listens.difference(listens_orig)
if port in new_listens or port_service in new_listens:
# We have wildcard, skip the rest
self.parser.add_dir_to_ifmodssl(
parser.get_aug_path(self.parser.loc["listen"]),
"Listen", port_service.split(" "))
self.save_notes += "Added Listen %s directive to %s\n" % (
port_service, self.parser.loc["listen"])
else:
for listen in new_listens:
self.parser.add_dir_to_ifmodssl(
parser.get_aug_path(self.parser.loc["listen"]),
"Listen", listen.split(" "))
self.save_notes += ("Added Listen %s directive to "
"%s\n") % (listen,
self.parser.loc["listen"])
def _has_port_already(self, listens, port):
"""Helper method for prepare_server_https to find out if user
already has an active Listen statement for the port we need
:param list listens: List of listen variables
:param string port: Port in question
"""
if port in listens:
return True
# Check if Apache is already listening on a specific IP
for listen in listens:
if len(listen.split(":")) > 1:
# Ugly but takes care of protocol def, eg: 1.1.1.1:443 https
if listen.split(":")[-1].split(" ")[0] == port:
return True
def prepare_https_modules(self, temp):
"""Helper method for prepare_server_https, taking care of enabling
needed modules
:param boolean temp: If the change is temporary
"""
if self.option("handle_modules"):
if self.version >= (2, 4) and ("socache_shmcb_module" not in
self.parser.modules):
self.enable_mod("socache_shmcb", temp=temp)
if "ssl_module" not in self.parser.modules:
self.enable_mod("ssl", temp=temp)
def make_vhost_ssl(self, nonssl_vhost): # pylint: disable=too-many-locals
"""Makes an ssl_vhost version of a nonssl_vhost.
Duplicates vhost and adds default ssl options
New vhost will reside as (nonssl_vhost.path) +
``self.option("le_vhost_ext")``
.. note:: This function saves the configuration
:param nonssl_vhost: Valid VH that doesn't have SSLEngine on
:type nonssl_vhost: :class:`~certbot_apache.obj.VirtualHost`
:returns: SSL vhost
:rtype: :class:`~certbot_apache.obj.VirtualHost`
:raises .errors.PluginError: If more than one virtual host is in
the file or if plugin is unable to write/read vhost files.
"""
avail_fp = nonssl_vhost.filep
ssl_fp = self._get_ssl_vhost_path(avail_fp)
orig_matches = self.aug.match("/files%s//* [label()=~regexp('%s')]" %
(self._escape(ssl_fp),
parser.case_i("VirtualHost")))
self._copy_create_ssl_vhost_skeleton(nonssl_vhost, ssl_fp)
# Reload augeas to take into account the new vhost
self.aug.load()
# Get Vhost augeas path for new vhost
new_matches = self.aug.match("/files%s//* [label()=~regexp('%s')]" %
(self._escape(ssl_fp),
parser.case_i("VirtualHost")))
vh_p = self._get_new_vh_path(orig_matches, new_matches)
if not vh_p:
# The vhost was not found on the currently parsed paths
# Make Augeas aware of the new vhost
self.parser.parse_file(ssl_fp)
# Try to search again
new_matches = self.aug.match(
"/files%s//* [label()=~regexp('%s')]" %
(self._escape(ssl_fp),
parser.case_i("VirtualHost")))
vh_p = self._get_new_vh_path(orig_matches, new_matches)
if not vh_p:
raise errors.PluginError(
"Could not reverse map the HTTPS VirtualHost to the original")
# Update Addresses
self._update_ssl_vhosts_addrs(vh_p)
# Log actions and create save notes
logger.info("Created an SSL vhost at %s", ssl_fp)
self.save_notes += "Created ssl vhost at %s\n" % ssl_fp
self.save()
# We know the length is one because of the assertion above
# Create the Vhost object
ssl_vhost = self._create_vhost(vh_p)
ssl_vhost.ancestor = nonssl_vhost
self.vhosts.append(ssl_vhost)
# NOTE: Searches through Augeas seem to ruin changes to directives
# The configuration must also be saved before being searched
# for the new directives; For these reasons... this is tacked
# on after fully creating the new vhost
# Now check if addresses need to be added as NameBasedVhost addrs
# This is for compliance with versions of Apache < 2.4
self._add_name_vhost_if_necessary(ssl_vhost)
return ssl_vhost
def _get_new_vh_path(self, orig_matches, new_matches):
""" Helper method for make_vhost_ssl for matching augeas paths. Returns
VirtualHost path from new_matches that's not present in orig_matches.
Paths are normalized, because augeas leaves indices out for paths
with only single directive with a similar key """
orig_matches = [i.replace("[1]", "") for i in orig_matches]
for match in new_matches:
if match.replace("[1]", "") not in orig_matches:
# Return the unmodified path
return match
return None
def _get_ssl_vhost_path(self, non_ssl_vh_fp):
""" Get a file path for SSL vhost, uses user defined path as priority,
but if the value is invalid or not defined, will fall back to non-ssl
vhost filepath.
:param str non_ssl_vh_fp: Filepath of non-SSL vhost
:returns: Filepath for SSL vhost
:rtype: str
"""
if self.conf("vhost-root") and os.path.exists(self.conf("vhost-root")):
fp = os.path.join(os.path.realpath(self.option("vhost_root")),
os.path.basename(non_ssl_vh_fp))
else:
# Use non-ssl filepath
fp = os.path.realpath(non_ssl_vh_fp)
if fp.endswith(".conf"):
return fp[:-(len(".conf"))] + self.option("le_vhost_ext")
else:
return fp + self.option("le_vhost_ext")
def _sift_rewrite_rule(self, line):
"""Decides whether a line should be copied to a SSL vhost.
A canonical example of when sifting a line is required:
When the http vhost contains a RewriteRule that unconditionally
redirects any request to the https version of the same site.
e.g:
RewriteRule ^ https://%{SERVER_NAME}%{REQUEST_URI} [L,QSA,R=permanent]
Copying the above line to the ssl vhost would cause a
redirection loop.
:param str line: a line extracted from the http vhost.
:returns: True - don't copy line from http vhost to SSL vhost.
:rtype: bool
"""
if not line.lower().lstrip().startswith("rewriterule"):
return False
# According to: http://httpd.apache.org/docs/2.4/rewrite/flags.html
# The syntax of a RewriteRule is:
# RewriteRule pattern target [Flag1,Flag2,Flag3]
# i.e. target is required, so it must exist.
target = line.split()[2].strip()
# target may be surrounded with quotes
if target[0] in ("'", '"') and target[0] == target[-1]:
target = target[1:-1]
# Sift line if it redirects the request to a HTTPS site
return target.startswith("https://")
def _copy_create_ssl_vhost_skeleton(self, vhost, ssl_fp):
"""Copies over existing Vhost with IfModule mod_ssl.c> skeleton.
:param obj.VirtualHost vhost: Original VirtualHost object
:param str ssl_fp: Full path where the new ssl_vhost will reside.
A new file is created on the filesystem.
"""
# First register the creation so that it is properly removed if
# configuration is rolled back
if os.path.exists(ssl_fp):
notes = "Appended new VirtualHost directive to file %s" % ssl_fp
files = set()
files.add(ssl_fp)
self.reverter.add_to_checkpoint(files, notes)
else:
self.reverter.register_file_creation(False, ssl_fp)
sift = False
try:
orig_contents = self._get_vhost_block(vhost)
ssl_vh_contents, sift = self._sift_rewrite_rules(orig_contents)
with open(ssl_fp, "a") as new_file:
new_file.write("<IfModule mod_ssl.c>\n")
new_file.write("\n".join(ssl_vh_contents))
# The content does not include the closing tag, so add it
new_file.write("</VirtualHost>\n")
new_file.write("</IfModule>\n")
# Add new file to augeas paths if we're supposed to handle
# activation (it's not included as default)
if not self.parser.parsed_in_current(ssl_fp):
self.parser.parse_file(ssl_fp)
except IOError:
logger.critical("Error writing/reading to file in make_vhost_ssl", exc_info=True)
raise errors.PluginError("Unable to write/read in make_vhost_ssl")
if sift:
reporter = zope.component.getUtility(interfaces.IReporter)
reporter.add_message(
"Some rewrite rules copied from {0} were disabled in the "
"vhost for your HTTPS site located at {1} because they have "
"the potential to create redirection loops.".format(
vhost.filep, ssl_fp), reporter.MEDIUM_PRIORITY)
self.aug.set("/augeas/files%s/mtime" % (self._escape(ssl_fp)), "0")
self.aug.set("/augeas/files%s/mtime" % (self._escape(vhost.filep)), "0")
def _sift_rewrite_rules(self, contents):
""" Helper function for _copy_create_ssl_vhost_skeleton to prepare the
new HTTPS VirtualHost contents. Currently disabling the rewrites """
result = []
sift = False
contents = iter(contents)
comment = ("# Some rewrite rules in this file were "
"disabled on your HTTPS site,\n"
"# because they have the potential to create "
"redirection loops.\n")
for line in contents:
A = line.lower().lstrip().startswith("rewritecond")
B = line.lower().lstrip().startswith("rewriterule")
if not (A or B):
result.append(line)
continue
# A RewriteRule that doesn't need filtering
if B and not self._sift_rewrite_rule(line):
result.append(line)
continue
# A RewriteRule that does need filtering
if B and self._sift_rewrite_rule(line):
if not sift:
result.append(comment)
sift = True
result.append("# " + line)
continue
# We save RewriteCond(s) and their corresponding
# RewriteRule in 'chunk'.
# We then decide whether we comment out the entire
# chunk based on its RewriteRule.
chunk = []
if A:
chunk.append(line)
line = next(contents)
# RewriteCond(s) must be followed by one RewriteRule
while not line.lower().lstrip().startswith("rewriterule"):
chunk.append(line)
line = next(contents)
# Now, current line must start with a RewriteRule
chunk.append(line)
if self._sift_rewrite_rule(line):
if not sift:
result.append(comment)
sift = True
result.append('\n'.join(
['# ' + l for l in chunk]))
continue
else:
result.append('\n'.join(chunk))
continue
return result, sift
def _get_vhost_block(self, vhost):
""" Helper method to get VirtualHost contents from the original file.
This is done with help of augeas span, which returns the span start and
end positions
:returns: `list` of VirtualHost block content lines without closing tag
"""
try:
span_val = self.aug.span(vhost.path)
except ValueError:
logger.critical("Error while reading the VirtualHost %s from "
"file %s", vhost.name, vhost.filep, exc_info=True)
raise errors.PluginError("Unable to read VirtualHost from file")
span_filep = span_val[0]
span_start = span_val[5]
span_end = span_val[6]
with open(span_filep, 'r') as fh:
fh.seek(span_start)
vh_contents = fh.read(span_end-span_start).split("\n")
self._remove_closing_vhost_tag(vh_contents)
return vh_contents
def _remove_closing_vhost_tag(self, vh_contents):
"""Removes the closing VirtualHost tag if it exists.
This method modifies vh_contents directly to remove the closing
tag. If the closing vhost tag is found, everything on the line
after it is also removed. Whether or not this tag is included
in the result of span depends on the Augeas version.
:param list vh_contents: VirtualHost block contents to check
"""
for offset, line in enumerate(reversed(vh_contents)):
if line:
line_index = line.lower().find("</virtualhost>")
if line_index != -1:
content_index = len(vh_contents) - offset - 1
vh_contents[content_index] = line[:line_index]
break
def _update_ssl_vhosts_addrs(self, vh_path):
ssl_addrs = set()
ssl_addr_p = self.aug.match(vh_path + "/arg")
for addr in ssl_addr_p:
old_addr = obj.Addr.fromstring(
str(self.parser.get_arg(addr)))
ssl_addr = old_addr.get_addr_obj("443")
self.aug.set(addr, str(ssl_addr))
ssl_addrs.add(ssl_addr)
return ssl_addrs
def _clean_vhost(self, vhost):
# remove duplicated or conflicting ssl directives
self._deduplicate_directives(vhost.path,
["SSLCertificateFile",
"SSLCertificateKeyFile"])
# remove all problematic directives
self._remove_directives(vhost.path, ["SSLCertificateChainFile"])
def _deduplicate_directives(self, vh_path, directives):
for directive in directives:
while len(self.parser.find_dir(directive, None,
vh_path, False)) > 1:
directive_path = self.parser.find_dir(directive, None,
vh_path, False)
self.aug.remove(re.sub(r"/\w*$", "", directive_path[0]))
def _remove_directives(self, vh_path, directives):
for directive in directives:
while len(self.parser.find_dir(directive, None,
vh_path, False)) > 0:
directive_path = self.parser.find_dir(directive, None,
vh_path, False)
self.aug.remove(re.sub(r"/\w*$", "", directive_path[0]))
def _add_dummy_ssl_directives(self, vh_path):
self.parser.add_dir(vh_path, "SSLCertificateFile",
"insert_cert_file_path")
self.parser.add_dir(vh_path, "SSLCertificateKeyFile",
"insert_key_file_path")
# Only include the TLS configuration if not already included
existing_inc = self.parser.find_dir("Include", self.mod_ssl_conf, vh_path)
if not existing_inc:
self.parser.add_dir(vh_path, "Include", self.mod_ssl_conf)
def _add_servername_alias(self, target_name, vhost):
vh_path = vhost.path
sname, saliases = self._get_vhost_names(vh_path)
if target_name == sname or target_name in saliases:
return
if self._has_matching_wildcard(vh_path, target_name):
return
if not self.parser.find_dir("ServerName", None,
start=vh_path, exclude=False):
self.parser.add_dir(vh_path, "ServerName", target_name)
else:
self.parser.add_dir(vh_path, "ServerAlias", target_name)
self._add_servernames(vhost)
def _has_matching_wildcard(self, vh_path, target_name):
"""Is target_name already included in a wildcard in the vhost?
:param str vh_path: Augeas path to the vhost
:param str target_name: name to compare with wildcards
:returns: True if there is a wildcard covering target_name in
the vhost in vhost_path, otherwise, False
:rtype: bool
"""
matches = self.parser.find_dir(
"ServerAlias", start=vh_path, exclude=False)
aliases = (self.aug.get(match) for match in matches)
return self.domain_in_names(aliases, target_name)
def _add_name_vhost_if_necessary(self, vhost):
"""Add NameVirtualHost Directives if necessary for new vhost.
NameVirtualHosts was a directive in Apache < 2.4
https://httpd.apache.org/docs/2.2/mod/core.html#namevirtualhost
:param vhost: New virtual host that was recently created.
:type vhost: :class:`~certbot_apache.obj.VirtualHost`
"""
need_to_save = False
# See if the exact address appears in any other vhost
# Remember 1.1.1.1:* == 1.1.1.1 -> hence any()
for addr in vhost.addrs:
# In Apache 2.2, when a NameVirtualHost directive is not
# set, "*" and "_default_" will conflict when sharing a port
addrs = set((addr,))
if addr.get_addr() in ("*", "_default_"):
addrs.update(obj.Addr((a, addr.get_port(),))
for a in ("*", "_default_"))
for test_vh in self.vhosts:
if (vhost.filep != test_vh.filep and
any(test_addr in addrs for
test_addr in test_vh.addrs) and
not self.is_name_vhost(addr)):
self.add_name_vhost(addr)
logger.info("Enabling NameVirtualHosts on %s", addr)
need_to_save = True
break
if need_to_save:
self.save()
def find_vhost_by_id(self, id_str):
"""
Searches through VirtualHosts and tries to match the id in a comment
:param str id_str: Id string for matching
:returns: The matched VirtualHost or None
:rtype: :class:`~certbot_apache.obj.VirtualHost` or None
:raises .errors.PluginError: If no VirtualHost is found
"""
for vh in self.vhosts:
if self._find_vhost_id(vh) == id_str:
return vh
msg = "No VirtualHost with ID {} was found.".format(id_str)
logger.warning(msg)
raise errors.PluginError(msg)
def _find_vhost_id(self, vhost):
"""Tries to find the unique ID from the VirtualHost comments. This is
used for keeping track of VirtualHost directive over time.
:param vhost: Virtual host to add the id
:type vhost: :class:`~certbot_apache.obj.VirtualHost`
:returns: The unique ID or None
:rtype: str or None
"""
# Strip the {} off from the format string
search_comment = constants.MANAGED_COMMENT_ID.format("")
id_comment = self.parser.find_comments(search_comment, vhost.path)
if id_comment:
# Use the first value, multiple ones shouldn't exist
comment = self.parser.get_arg(id_comment[0])
return comment.split(" ")[-1]
return None
def add_vhost_id(self, vhost):
"""Adds an unique ID to the VirtualHost as a comment for mapping back
to it on later invocations, as the config file order might have changed.
If ID already exists, returns that instead.
:param vhost: Virtual host to add or find the id
:type vhost: :class:`~certbot_apache.obj.VirtualHost`
:returns: The unique ID for vhost
:rtype: str or None
"""
vh_id = self._find_vhost_id(vhost)
if vh_id:
return vh_id
id_string = apache_util.unique_id()
comment = constants.MANAGED_COMMENT_ID.format(id_string)
self.parser.add_comment(vhost.path, comment)
return id_string
def _escape(self, fp):
fp = fp.replace(",", "\\,")
fp = fp.replace("[", "\\[")
fp = fp.replace("]", "\\]")
fp = fp.replace("|", "\\|")
fp = fp.replace("=", "\\=")
fp = fp.replace("(", "\\(")
fp = fp.replace(")", "\\)")
fp = fp.replace("!", "\\!")
return fp
######################################################################
# Enhancements
######################################################################
def supported_enhancements(self): # pylint: disable=no-self-use
"""Returns currently supported enhancements."""
return ["redirect", "ensure-http-header", "staple-ocsp"]
def enhance(self, domain, enhancement, options=None):
"""Enhance configuration.
:param str domain: domain to enhance
:param str enhancement: enhancement type defined in
:const:`~certbot.constants.ENHANCEMENTS`
:param options: options for the enhancement
See :const:`~certbot.constants.ENHANCEMENTS`
documentation for appropriate parameter.
:raises .errors.PluginError: If Enhancement is not supported, or if
there is any other problem with the enhancement.
"""
try:
func = self._enhance_func[enhancement]
except KeyError:
raise errors.PluginError(
"Unsupported enhancement: {0}".format(enhancement))
matched_vhosts = self.choose_vhosts(domain, create_if_no_ssl=False)
# We should be handling only SSL vhosts for enhancements
vhosts = [vhost for vhost in matched_vhosts if vhost.ssl]
if not vhosts:
msg_tmpl = ("Certbot was not able to find SSL VirtualHost for a "
"domain {0} for enabling enhancement \"{1}\". The requested "
"enhancement was not configured.")
msg_enhancement = enhancement
if options:
msg_enhancement += ": " + options
msg = msg_tmpl.format(domain, msg_enhancement)
logger.warning(msg)
raise errors.PluginError(msg)
try:
for vhost in vhosts:
func(vhost, options)
except errors.PluginError:
logger.warning("Failed %s for %s", enhancement, domain)
raise
def _autohsts_increase(self, vhost, id_str, nextstep):
"""Increase the AutoHSTS max-age value
:param vhost: Virtual host object to modify
:type vhost: :class:`~certbot_apache.obj.VirtualHost`
:param str id_str: The unique ID string of VirtualHost
:param int nextstep: Next AutoHSTS max-age value index
"""
nextstep_value = constants.AUTOHSTS_STEPS[nextstep]
self._autohsts_write(vhost, nextstep_value)
self._autohsts[id_str] = {"laststep": nextstep, "timestamp": time.time()}
def _autohsts_write(self, vhost, nextstep_value):
"""
Write the new HSTS max-age value to the VirtualHost file
"""
hsts_dirpath = None
header_path = self.parser.find_dir("Header", None, vhost.path)
if header_path:
pat = '(?:[ "]|^)(strict-transport-security)(?:[ "]|$)'
for match in header_path:
if re.search(pat, self.aug.get(match).lower()):
hsts_dirpath = match
if not hsts_dirpath:
err_msg = ("Certbot was unable to find the existing HSTS header "
"from the VirtualHost at path {0}.").format(vhost.filep)
raise errors.PluginError(err_msg)
# Prepare the HSTS header value
hsts_maxage = "\"max-age={0}\"".format(nextstep_value)
# Update the header
# Our match statement was for string strict-transport-security, but
# we need to update the value instead. The next index is for the value
hsts_dirpath = hsts_dirpath.replace("arg[3]", "arg[4]")
self.aug.set(hsts_dirpath, hsts_maxage)
note_msg = ("Increasing HSTS max-age value to {0} for VirtualHost "
"in {1}\n".format(nextstep_value, vhost.filep))
logger.debug(note_msg)
self.save_notes += note_msg
self.save(note_msg)
def _autohsts_fetch_state(self):
"""
Populates the AutoHSTS state from the pluginstorage
"""
try:
self._autohsts = self.storage.fetch("autohsts")
except KeyError:
self._autohsts = dict()
def _autohsts_save_state(self):
"""
Saves the state of AutoHSTS object to pluginstorage
"""
self.storage.put("autohsts", self._autohsts)
self.storage.save()
def _autohsts_vhost_in_lineage(self, vhost, lineage):
"""
Searches AutoHSTS managed VirtualHosts that belong to the lineage.
Matches the private key path.
"""
return bool(
self.parser.find_dir("SSLCertificateKeyFile",
lineage.key_path, vhost.path))
def _enable_ocsp_stapling(self, ssl_vhost, unused_options):
"""Enables OCSP Stapling
In OCSP, each client (e.g. browser) would have to query the
OCSP Responder to validate that the site certificate was not revoked.
Enabling OCSP Stapling, would allow the web-server to query the OCSP
Responder, and staple its response to the offered certificate during
TLS. i.e. clients would not have to query the OCSP responder.
OCSP Stapling enablement on Apache implicitly depends on
SSLCertificateChainFile being set by other code.
.. note:: This function saves the configuration
:param ssl_vhost: Destination of traffic, an ssl enabled vhost
:type ssl_vhost: :class:`~certbot_apache.obj.VirtualHost`
:param unused_options: Not currently used
:type unused_options: Not Available
:returns: Success, general_vhost (HTTP vhost)
:rtype: (bool, :class:`~certbot_apache.obj.VirtualHost`)
"""
min_apache_ver = (2, 3, 3)
if self.get_version() < min_apache_ver:
raise errors.PluginError(
"Unable to set OCSP directives.\n"
"Apache version is below 2.3.3.")
if "socache_shmcb_module" not in self.parser.modules:
self.enable_mod("socache_shmcb")
# Check if there's an existing SSLUseStapling directive on.
use_stapling_aug_path = self.parser.find_dir("SSLUseStapling",
"on", start=ssl_vhost.path)
if not use_stapling_aug_path:
self.parser.add_dir(ssl_vhost.path, "SSLUseStapling", "on")
ssl_vhost_aug_path = self._escape(parser.get_aug_path(ssl_vhost.filep))
# Check if there's an existing SSLStaplingCache directive.
stapling_cache_aug_path = self.parser.find_dir('SSLStaplingCache',
None, ssl_vhost_aug_path)
# We'll simply delete the directive, so that we'll have a
# consistent OCSP cache path.
if stapling_cache_aug_path:
self.aug.remove(
re.sub(r"/\w*$", "", stapling_cache_aug_path[0]))
self.parser.add_dir_to_ifmodssl(ssl_vhost_aug_path,
"SSLStaplingCache",
["shmcb:/var/run/apache2/stapling_cache(128000)"])
msg = "OCSP Stapling was enabled on SSL Vhost: %s.\n"%(
ssl_vhost.filep)
self.save_notes += msg
self.save()
logger.info(msg)
def _set_http_header(self, ssl_vhost, header_substring):
"""Enables header that is identified by header_substring on ssl_vhost.
If the header identified by header_substring is not already set,
a new Header directive is placed in ssl_vhost's configuration with
arguments from: constants.HTTP_HEADER[header_substring]
.. note:: This function saves the configuration
:param ssl_vhost: Destination of traffic, an ssl enabled vhost
:type ssl_vhost: :class:`~certbot_apache.obj.VirtualHost`
:param header_substring: string that uniquely identifies a header.
e.g: Strict-Transport-Security, Upgrade-Insecure-Requests.
:type str
:returns: Success, general_vhost (HTTP vhost)
:rtype: (bool, :class:`~certbot_apache.obj.VirtualHost`)
:raises .errors.PluginError: If no viable HTTP host can be created or
set with header header_substring.
"""
if "headers_module" not in self.parser.modules:
self.enable_mod("headers")
# Check if selected header is already set
self._verify_no_matching_http_header(ssl_vhost, header_substring)
# Add directives to server
self.parser.add_dir(ssl_vhost.path, "Header",
constants.HEADER_ARGS[header_substring])
self.save_notes += ("Adding %s header to ssl vhost in %s\n" %
(header_substring, ssl_vhost.filep))
self.save()
logger.info("Adding %s header to ssl vhost in %s", header_substring,
ssl_vhost.filep)
def _verify_no_matching_http_header(self, ssl_vhost, header_substring):
"""Checks to see if an there is an existing Header directive that
contains the string header_substring.
:param ssl_vhost: vhost to check
:type vhost: :class:`~certbot_apache.obj.VirtualHost`
:param header_substring: string that uniquely identifies a header.
e.g: Strict-Transport-Security, Upgrade-Insecure-Requests.
:type str
:returns: boolean
:rtype: (bool)
:raises errors.PluginEnhancementAlreadyPresent When header
header_substring exists
"""
header_path = self.parser.find_dir("Header", None,
start=ssl_vhost.path)
if header_path:
# "Existing Header directive for virtualhost"
pat = '(?:[ "]|^)(%s)(?:[ "]|$)' % (header_substring.lower())
for match in header_path:
if re.search(pat, self.aug.get(match).lower()):
raise errors.PluginEnhancementAlreadyPresent(
"Existing %s header" % (header_substring))
def _enable_redirect(self, ssl_vhost, unused_options):
"""Redirect all equivalent HTTP traffic to ssl_vhost.
.. todo:: This enhancement should be rewritten and will
unfortunately require lots of debugging by hand.
Adds Redirect directive to the port 80 equivalent of ssl_vhost
First the function attempts to find the vhost with equivalent
ip addresses that serves on non-ssl ports
The function then adds the directive
.. note:: This function saves the configuration
:param ssl_vhost: Destination of traffic, an ssl enabled vhost
:type ssl_vhost: :class:`~certbot_apache.obj.VirtualHost`
:param unused_options: Not currently used
:type unused_options: Not Available
:raises .errors.PluginError: If no viable HTTP host can be created or
used for the redirect.
"""
if "rewrite_module" not in self.parser.modules:
self.enable_mod("rewrite")
general_vh = self._get_http_vhost(ssl_vhost)
if general_vh is None:
# Add virtual_server with redirect
logger.debug("Did not find http version of ssl virtual host "
"attempting to create")
redirect_addrs = self._get_proposed_addrs(ssl_vhost)
for vhost in self.vhosts:
if vhost.enabled and vhost.conflicts(redirect_addrs):
raise errors.PluginError(
"Unable to find corresponding HTTP vhost; "
"Unable to create one as intended addresses conflict; "
"Current configuration does not support automated "
"redirection")
self._create_redirect_vhost(ssl_vhost)
else:
if general_vh in self._enhanced_vhosts["redirect"]:
logger.debug("Already enabled redirect for this vhost")
return
# Check if Certbot redirection already exists
self._verify_no_certbot_redirect(general_vh)
# Note: if code flow gets here it means we didn't find the exact
# certbot RewriteRule config for redirection. Finding
# another RewriteRule is likely to be fine in most or all cases,
# but redirect loops are possible in very obscure cases; see #1620
# for reasoning.
if self._is_rewrite_exists(general_vh):
logger.warning("Added an HTTP->HTTPS rewrite in addition to "
"other RewriteRules; you may wish to check for "
"overall consistency.")
# Add directives to server
# Note: These are not immediately searchable in sites-enabled
# even with save() and load()
if not self._is_rewrite_engine_on(general_vh):
self.parser.add_dir(general_vh.path, "RewriteEngine", "on")
names = ssl_vhost.get_names()
for idx, name in enumerate(names):
args = ["%{SERVER_NAME}", "={0}".format(name), "[OR]"]
if idx == len(names) - 1:
args.pop()
self.parser.add_dir(general_vh.path, "RewriteCond", args)
self._set_https_redirection_rewrite_rule(general_vh)
self.save_notes += ("Redirecting host in %s to ssl vhost in %s\n" %
(general_vh.filep, ssl_vhost.filep))
self.save()
self._enhanced_vhosts["redirect"].add(general_vh)
logger.info("Redirecting vhost in %s to ssl vhost in %s",
general_vh.filep, ssl_vhost.filep)
def _set_https_redirection_rewrite_rule(self, vhost):
if self.get_version() >= (2, 3, 9):
self.parser.add_dir(vhost.path, "RewriteRule",
constants.REWRITE_HTTPS_ARGS_WITH_END)
else:
self.parser.add_dir(vhost.path, "RewriteRule",
constants.REWRITE_HTTPS_ARGS)
def _verify_no_certbot_redirect(self, vhost):
"""Checks to see if a redirect was already installed by certbot.
Checks to see if virtualhost already contains a rewrite rule that is
identical to Certbot's redirection rewrite rule.
For graceful transition to new rewrite rules for HTTPS redireciton we
delete certbot's old rewrite rules and set the new one instead.
:param vhost: vhost to check
:type vhost: :class:`~certbot_apache.obj.VirtualHost`
:raises errors.PluginEnhancementAlreadyPresent: When the exact
certbot redirection WriteRule exists in virtual host.
"""
rewrite_path = self.parser.find_dir(
"RewriteRule", None, start=vhost.path)
# There can be other RewriteRule directive lines in vhost config.
# rewrite_args_dict keys are directive ids and the corresponding value
# for each is a list of arguments to that directive.
rewrite_args_dict = defaultdict(list) # type: DefaultDict[str, List[str]]
pat = r'(.*directive\[\d+\]).*'
for match in rewrite_path:
m = re.match(pat, match)
if m:
dir_path = m.group(1)
rewrite_args_dict[dir_path].append(match)
if rewrite_args_dict:
redirect_args = [constants.REWRITE_HTTPS_ARGS,
constants.REWRITE_HTTPS_ARGS_WITH_END]
for dir_path, args_paths in rewrite_args_dict.items():
arg_vals = [self.aug.get(x) for x in args_paths]
# Search for past redirection rule, delete it, set the new one
if arg_vals in constants.OLD_REWRITE_HTTPS_ARGS:
self.aug.remove(dir_path)
self._set_https_redirection_rewrite_rule(vhost)
self.save()
raise errors.PluginEnhancementAlreadyPresent(
"Certbot has already enabled redirection")
if arg_vals in redirect_args:
raise errors.PluginEnhancementAlreadyPresent(
"Certbot has already enabled redirection")
def _is_rewrite_exists(self, vhost):
"""Checks if there exists a RewriteRule directive in vhost
:param vhost: vhost to check
:type vhost: :class:`~certbot_apache.obj.VirtualHost`
:returns: True if a RewriteRule directive exists.
:rtype: bool
"""
rewrite_path = self.parser.find_dir(
"RewriteRule", None, start=vhost.path)
return bool(rewrite_path)
def _is_rewrite_engine_on(self, vhost):
"""Checks if a RewriteEngine directive is on
:param vhost: vhost to check
:type vhost: :class:`~certbot_apache.obj.VirtualHost`
"""
rewrite_engine_path_list = self.parser.find_dir("RewriteEngine", "on",
start=vhost.path)
if rewrite_engine_path_list:
for re_path in rewrite_engine_path_list:
# A RewriteEngine directive may also be included in per
# directory .htaccess files. We only care about the VirtualHost.
if 'virtualhost' in re_path.lower():
return self.parser.get_arg(re_path)
return False
def _create_redirect_vhost(self, ssl_vhost):
"""Creates an http_vhost specifically to redirect for the ssl_vhost.
:param ssl_vhost: ssl vhost
:type ssl_vhost: :class:`~certbot_apache.obj.VirtualHost`
:returns: tuple of the form
(`success`, :class:`~certbot_apache.obj.VirtualHost`)
:rtype: tuple
"""
text = self._get_redirect_config_str(ssl_vhost)
redirect_filepath = self._write_out_redirect(ssl_vhost, text)
self.aug.load()
# Make a new vhost data structure and add it to the lists
new_vhost = self._create_vhost(parser.get_aug_path(self._escape(redirect_filepath)))
self.vhosts.append(new_vhost)
self._enhanced_vhosts["redirect"].add(new_vhost)
# Finally create documentation for the change
self.save_notes += ("Created a port 80 vhost, %s, for redirection to "
"ssl vhost %s\n" %
(new_vhost.filep, ssl_vhost.filep))
def _get_redirect_config_str(self, ssl_vhost):
# get servernames and serveraliases
serveralias = ""
servername = ""
if ssl_vhost.name is not None:
servername = "ServerName " + ssl_vhost.name
if ssl_vhost.aliases:
serveralias = "ServerAlias " + " ".join(ssl_vhost.aliases)
rewrite_rule_args = [] # type: List[str]
if self.get_version() >= (2, 3, 9):
rewrite_rule_args = constants.REWRITE_HTTPS_ARGS_WITH_END
else:
rewrite_rule_args = constants.REWRITE_HTTPS_ARGS
return ("<VirtualHost %s>\n"
"%s \n"
"%s \n"
"ServerSignature Off\n"
"\n"
"RewriteEngine On\n"
"RewriteRule %s\n"
"\n"
"ErrorLog %s/redirect.error.log\n"
"LogLevel warn\n"
"</VirtualHost>\n"
% (" ".join(str(addr) for
addr in self._get_proposed_addrs(ssl_vhost)),
servername, serveralias,
" ".join(rewrite_rule_args),
self.option("logs_root")))
def _write_out_redirect(self, ssl_vhost, text):
# This is the default name
redirect_filename = "le-redirect.conf"
# See if a more appropriate name can be applied
if ssl_vhost.name is not None:
# make sure servername doesn't exceed filename length restriction
if len(ssl_vhost.name) < (255 - (len(redirect_filename) + 1)):
redirect_filename = "le-redirect-%s.conf" % ssl_vhost.name
redirect_filepath = os.path.join(self.option("vhost_root"),
redirect_filename)
# Register the new file that will be created
# Note: always register the creation before writing to ensure file will
# be removed in case of unexpected program exit
self.reverter.register_file_creation(False, redirect_filepath)
# Write out file
with open(redirect_filepath, "w") as redirect_file:
redirect_file.write(text)
# Add new include to configuration if it doesn't exist yet
if not self.parser.parsed_in_current(redirect_filepath):
self.parser.parse_file(redirect_filepath)
logger.info("Created redirect file: %s", redirect_filename)
return redirect_filepath
def _get_http_vhost(self, ssl_vhost):
"""Find appropriate HTTP vhost for ssl_vhost."""
# First candidate vhosts filter
if ssl_vhost.ancestor:
return ssl_vhost.ancestor
candidate_http_vhs = [
vhost for vhost in self.vhosts if not vhost.ssl
]
# Second filter - check addresses
for http_vh in candidate_http_vhs:
if http_vh.same_server(ssl_vhost):
return http_vh
# Third filter - if none with same names, return generic
for http_vh in candidate_http_vhs:
if http_vh.same_server(ssl_vhost, generic=True):
return http_vh
return None
def _get_proposed_addrs(self, vhost, port="80"):
"""Return all addrs of vhost with the port replaced with the specified.
:param obj.VirtualHost ssl_vhost: Original Vhost
:param str port: Desired port for new addresses
:returns: `set` of :class:`~obj.Addr`
"""
redirects = set()
for addr in vhost.addrs:
redirects.add(addr.get_addr_obj(port))
return redirects
def enable_site(self, vhost):
"""Enables an available site, Apache reload required.
.. note:: Does not make sure that the site correctly works or that all
modules are enabled appropriately.
.. note:: The distribution specific override replaces functionality
of this method where available.
:param vhost: vhost to enable
:type vhost: :class:`~certbot_apache.obj.VirtualHost`
:raises .errors.NotSupportedError: If filesystem layout is not
supported.
"""
if vhost.enabled:
return
if not self.parser.parsed_in_original(vhost.filep):
# Add direct include to root conf
logger.info("Enabling site %s by adding Include to root configuration",
vhost.filep)
self.save_notes += "Enabled site %s\n" % vhost.filep
self.parser.add_include(self.parser.loc["default"], vhost.filep)
vhost.enabled = True
return
def enable_mod(self, mod_name, temp=False): # pylint: disable=unused-argument
"""Enables module in Apache.
Both enables and reloads Apache so module is active.
:param str mod_name: Name of the module to enable. (e.g. 'ssl')
:param bool temp: Whether or not this is a temporary action.
.. note:: The distribution specific override replaces functionality
of this method where available.
:raises .errors.MisconfigurationError: We cannot enable modules in
generic fashion.
"""
mod_message = ("Apache needs to have module \"{0}\" active for the " +
"requested installation options. Unfortunately Certbot is unable " +
"to install or enable it for you. Please install the module, and " +
"run Certbot again.")
raise errors.MisconfigurationError(mod_message.format(mod_name))
def restart(self):
"""Runs a config test and reloads the Apache server.
:raises .errors.MisconfigurationError: If either the config test
or reload fails.
"""
self.config_test()
self._reload()
def _reload(self):
"""Reloads the Apache server.
:raises .errors.MisconfigurationError: If reload fails
"""
try:
util.run_script(self.option("restart_cmd"))
except errors.SubprocessError as err:
logger.info("Unable to restart apache using %s",
self.option("restart_cmd"))
alt_restart = self.option("restart_cmd_alt")
if alt_restart:
logger.debug("Trying alternative restart command: %s",
alt_restart)
# There is an alternative restart command available
# This usually is "restart" verb while original is "graceful"
try:
util.run_script(self.option(
"restart_cmd_alt"))
return
except errors.SubprocessError as secerr:
error = str(secerr)
else:
error = str(err)
raise errors.MisconfigurationError(error)
def config_test(self): # pylint: disable=no-self-use
"""Check the configuration of Apache for errors.
:raises .errors.MisconfigurationError: If config_test fails
"""
try:
util.run_script(self.option("conftest_cmd"))
except errors.SubprocessError as err:
raise errors.MisconfigurationError(str(err))
def get_version(self):
"""Return version of Apache Server.
Version is returned as tuple. (ie. 2.4.7 = (2, 4, 7))
:returns: version
:rtype: tuple
:raises .PluginError: if unable to find Apache version
"""
try:
stdout, _ = util.run_script(self.option("version_cmd"))
except errors.SubprocessError:
raise errors.PluginError(
"Unable to run %s -v" %
self.option("version_cmd"))
regex = re.compile(r"Apache/([0-9\.]*)", re.IGNORECASE)
matches = regex.findall(stdout)
if len(matches) != 1:
raise errors.PluginError("Unable to find Apache version")
return tuple([int(i) for i in matches[0].split(".")])
def more_info(self):
"""Human-readable string to help understand the module"""
return (
"Configures Apache to authenticate and install HTTPS.{0}"
"Server root: {root}{0}"
"Version: {version}".format(
os.linesep, root=self.parser.loc["root"],
version=".".join(str(i) for i in self.version))
)
###########################################################################
# Challenges Section
###########################################################################
def get_chall_pref(self, unused_domain): # pylint: disable=no-self-use
"""Return list of challenge preferences."""
return [challenges.HTTP01]
def perform(self, achalls):
"""Perform the configuration related challenge.
This function currently assumes all challenges will be fulfilled.
If this turns out not to be the case in the future. Cleanup and
outstanding challenges will have to be designed better.
"""
self._chall_out.update(achalls)
responses = [None] * len(achalls)
http_doer = http_01.ApacheHttp01(self)
for i, achall in enumerate(achalls):
# Currently also have chall_doer hold associated index of the
# challenge. This helps to put all of the responses back together
# when they are all complete.
http_doer.add_chall(achall, i)
http_response = http_doer.perform()
if http_response:
# Must reload in order to activate the challenges.
# Handled here because we may be able to load up other challenge
# types
self.restart()
# TODO: Remove this dirty hack. We need to determine a reliable way
# of identifying when the new configuration is being used.
time.sleep(3)
self._update_responses(responses, http_response, http_doer)
return responses
def _update_responses(self, responses, chall_response, chall_doer):
# Go through all of the challenges and assign them to the proper
# place in the responses return value. All responses must be in the
# same order as the original challenges.
for i, resp in enumerate(chall_response):
responses[chall_doer.indices[i]] = resp
def cleanup(self, achalls):
"""Revert all challenges."""
self._chall_out.difference_update(achalls)
# If all of the challenges have been finished, clean up everything
if not self._chall_out:
self.revert_challenge_config()
self.restart()
self.parser.reset_modules()
def install_ssl_options_conf(self, options_ssl, options_ssl_digest):
"""Copy Certbot's SSL options file into the system's config dir if required."""
# XXX if we ever try to enforce a local privilege boundary (eg, running
# certbot for unprivileged users via setuid), this function will need
# to be modified.
return common.install_version_controlled_file(options_ssl, options_ssl_digest,
self.option("MOD_SSL_CONF_SRC"), constants.ALL_SSL_OPTIONS_HASHES)
def enable_autohsts(self, _unused_lineage, domains):
"""
Enable the AutoHSTS enhancement for defined domains
:param _unused_lineage: Certificate lineage object, unused
:type _unused_lineage: certbot.storage.RenewableCert
:param domains: List of domains in certificate to enhance
:type domains: str
"""
self._autohsts_fetch_state()
_enhanced_vhosts = []
for d in domains:
matched_vhosts = self.choose_vhosts(d, create_if_no_ssl=False)
# We should be handling only SSL vhosts for AutoHSTS
vhosts = [vhost for vhost in matched_vhosts if vhost.ssl]
if not vhosts:
msg_tmpl = ("Certbot was not able to find SSL VirtualHost for a "
"domain {0} for enabling AutoHSTS enhancement.")
msg = msg_tmpl.format(d)
logger.warning(msg)
raise errors.PluginError(msg)
for vh in vhosts:
try:
self._enable_autohsts_domain(vh)
_enhanced_vhosts.append(vh)
except errors.PluginEnhancementAlreadyPresent:
if vh in _enhanced_vhosts:
continue
msg = ("VirtualHost for domain {0} in file {1} has a " +
"String-Transport-Security header present, exiting.")
raise errors.PluginEnhancementAlreadyPresent(
msg.format(d, vh.filep))
if _enhanced_vhosts:
note_msg = "Enabling AutoHSTS"
self.save(note_msg)
logger.info(note_msg)
self.restart()
# Save the current state to pluginstorage
self._autohsts_save_state()
def _enable_autohsts_domain(self, ssl_vhost):
"""Do the initial AutoHSTS deployment to a vhost
:param ssl_vhost: The VirtualHost object to deploy the AutoHSTS
:type ssl_vhost: :class:`~certbot_apache.obj.VirtualHost` or None
:raises errors.PluginEnhancementAlreadyPresent: When already enhanced
"""
# This raises the exception
self._verify_no_matching_http_header(ssl_vhost,
"Strict-Transport-Security")
if "headers_module" not in self.parser.modules:
self.enable_mod("headers")
# Prepare the HSTS header value
hsts_header = constants.HEADER_ARGS["Strict-Transport-Security"][:-1]
initial_maxage = constants.AUTOHSTS_STEPS[0]
hsts_header.append("\"max-age={0}\"".format(initial_maxage))
# Add ID to the VirtualHost for mapping back to it later
uniq_id = self.add_vhost_id(ssl_vhost)
self.save_notes += "Adding unique ID {0} to VirtualHost in {1}\n".format(
uniq_id, ssl_vhost.filep)
# Add the actual HSTS header
self.parser.add_dir(ssl_vhost.path, "Header", hsts_header)
note_msg = ("Adding gradually increasing HSTS header with initial value "
"of {0} to VirtualHost in {1}\n".format(
initial_maxage, ssl_vhost.filep))
self.save_notes += note_msg
# Save the current state to pluginstorage
self._autohsts[uniq_id] = {"laststep": 0, "timestamp": time.time()}
def update_autohsts(self, _unused_domain):
"""
Increase the AutoHSTS values of VirtualHosts that the user has enabled
this enhancement for.
:param _unused_domain: Not currently used
:type _unused_domain: Not Available
"""
self._autohsts_fetch_state()
if not self._autohsts:
# No AutoHSTS enabled for any domain
return
curtime = time.time()
save_and_restart = False
for id_str, config in list(self._autohsts.items()):
if config["timestamp"] + constants.AUTOHSTS_FREQ > curtime:
# Skip if last increase was < AUTOHSTS_FREQ ago
continue
nextstep = config["laststep"] + 1
if nextstep < len(constants.AUTOHSTS_STEPS):
# If installer hasn't been prepared yet, do it now
if not self._prepared:
self.prepare()
# Have not reached the max value yet
try:
vhost = self.find_vhost_by_id(id_str)
except errors.PluginError:
msg = ("Could not find VirtualHost with ID {0}, disabling "
"AutoHSTS for this VirtualHost").format(id_str)
logger.warning(msg)
# Remove the orphaned AutoHSTS entry from pluginstorage
self._autohsts.pop(id_str)
continue
self._autohsts_increase(vhost, id_str, nextstep)
msg = ("Increasing HSTS max-age value for VirtualHost with id "
"{0}").format(id_str)
self.save_notes += msg
save_and_restart = True
if save_and_restart:
self.save("Increased HSTS max-age values")
self.restart()
self._autohsts_save_state()
def deploy_autohsts(self, lineage):
"""
Checks if autohsts vhost has reached maximum auto-increased value
and changes the HSTS max-age to a high value.
:param lineage: Certificate lineage object
:type lineage: certbot.storage.RenewableCert
"""
self._autohsts_fetch_state()
if not self._autohsts:
# No autohsts enabled for any vhost
return
vhosts = []
affected_ids = []
# Copy, as we are removing from the dict inside the loop
for id_str, config in list(self._autohsts.items()):
if config["laststep"]+1 >= len(constants.AUTOHSTS_STEPS):
# max value reached, try to make permanent
try:
vhost = self.find_vhost_by_id(id_str)
except errors.PluginError:
msg = ("VirtualHost with id {} was not found, unable to "
"make HSTS max-age permanent.").format(id_str)
logger.warning(msg)
self._autohsts.pop(id_str)
continue
if self._autohsts_vhost_in_lineage(vhost, lineage):
vhosts.append(vhost)
affected_ids.append(id_str)
save_and_restart = False
for vhost in vhosts:
self._autohsts_write(vhost, constants.AUTOHSTS_PERMANENT)
msg = ("Strict-Transport-Security max-age value for "
"VirtualHost in {0} was made permanent.").format(vhost.filep)
logger.debug(msg)
self.save_notes += msg+"\n"
save_and_restart = True
if save_and_restart:
self.save("Made HSTS max-age permanent")
self.restart()
for id_str in affected_ids:
self._autohsts.pop(id_str)
# Update AutoHSTS storage (We potentially removed vhosts from managed)
self._autohsts_save_state()
AutoHSTSEnhancement.register(ApacheConfigurator) # pylint: disable=no-member
| apache-2.0 |
papouso/odoo | addons/hr_payroll/report/report_contribution_register.py | 377 | 3380 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from dateutil import relativedelta
from openerp.osv import osv
from openerp.report import report_sxw
class contribution_register_report(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(contribution_register_report, self).__init__(cr, uid, name, context)
self.localcontext.update({
'get_payslip_lines': self._get_payslip_lines,
'sum_total': self.sum_total,
})
def set_context(self, objects, data, ids, report_type=None):
self.date_from = data['form'].get('date_from', time.strftime('%Y-%m-%d'))
self.date_to = data['form'].get('date_to', str(datetime.now() + relativedelta.relativedelta(months=+1, day=1, days=-1))[:10])
return super(contribution_register_report, self).set_context(objects, data, ids, report_type=report_type)
def sum_total(self):
return self.regi_total
def _get_payslip_lines(self, obj):
payslip_line = self.pool.get('hr.payslip.line')
payslip_lines = []
res = []
self.regi_total = 0.0
self.cr.execute("SELECT pl.id from hr_payslip_line as pl "\
"LEFT JOIN hr_payslip AS hp on (pl.slip_id = hp.id) "\
"WHERE (hp.date_from >= %s) AND (hp.date_to <= %s) "\
"AND pl.register_id = %s "\
"AND hp.state = 'done' "\
"ORDER BY pl.slip_id, pl.sequence",
(self.date_from, self.date_to, obj.id))
payslip_lines = [x[0] for x in self.cr.fetchall()]
for line in payslip_line.browse(self.cr, self.uid, payslip_lines):
res.append({
'payslip_name': line.slip_id.name,
'name': line.name,
'code': line.code,
'quantity': line.quantity,
'amount': line.amount,
'total': line.total,
})
self.regi_total += line.total
return res
class wrapped_report_contribution_register(osv.AbstractModel):
_name = 'report.hr_payroll.report_contributionregister'
_inherit = 'report.abstract_report'
_template = 'hr_payroll.report_contributionregister'
_wrapped_report_class = contribution_register_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
hzy001/ansible | lib/ansible/executor/stats.py | 251 | 1716 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
class AggregateStats:
''' holds stats about per-host activity during playbook runs '''
def __init__(self):
self.processed = {}
self.failures = {}
self.ok = {}
self.dark = {}
self.changed = {}
self.skipped = {}
def increment(self, what, host):
''' helper function to bump a statistic '''
self.processed[host] = 1
prev = (getattr(self, what)).get(host, 0)
getattr(self, what)[host] = prev+1
def summarize(self, host):
''' return information about a particular host '''
return dict(
ok = self.ok.get(host, 0),
failures = self.failures.get(host, 0),
unreachable = self.dark.get(host,0),
changed = self.changed.get(host, 0),
skipped = self.skipped.get(host, 0)
)
| gpl-3.0 |
thezimmee/os-zimmee | sublime/Plugins/Focus Active View/focus-active-view.py | 1 | 3934 | import os
from shutil import copyfile
import sublime
import sublime_plugin
# Returns color scheme path for where the unfocused scheme should be.
def get_unfocused_scheme_path(view):
main_scheme = view.settings().get("color_scheme")
return os.path.join(sublime.packages_path(), "User/UnfocusedThemes", os.path.dirname(main_scheme), "Unfocused - " + os.path.basename(main_scheme))
# Returns whether this plugin is disabled.
def is_disabled():
return not sublime.load_settings("Preferences.sublime-settings").get("focus_active_view")
# Returns true if `view` is a widget, not a file.
def is_widget(view):
return view.settings().get("is_widget")
# Focuses a view.
def focus(view):
# Check if plugin should run on this view.
if is_disabled() or is_widget(view):
return
# Check that this view is not already focused.
view_settings = view.settings()
if view_settings.get("is_focused", False):
return
# Focus this view.
sublime_settings = sublime.load_settings("Preferences.sublime-settings")
scheme = sublime_settings.get("color_scheme")
view_settings.set("is_focused", True)
view_settings.set("color_scheme", scheme)
# Unfocus other views, if not already unfocused.
for window in sublime.windows():
for other_view in window.views():
other_view_settings = other_view.settings()
if other_view.id() != view.id() and not is_widget(other_view) and other_view_settings.get("is_focused"):
unfocus(other_view)
# Unfocuses a view.
def unfocus(view):
view_settings = view.settings()
unfocused_scheme = get_unfocused_scheme_path(view)
if os.path.exists(unfocused_scheme):
view_settings.set("is_focused", False)
view_settings.set("color_scheme", unfocused_scheme)
# Creates an unfocused color scheme based on the currently active scheme.
def create_unfocused_color_scheme():
sublime_settings = sublime.load_settings("Preferences.sublime-settings")
original_scheme = sublime_settings.get("color_scheme")
active_view = sublime.active_window().active_view()
# Get active scheme.
current_scheme = active_view.settings().get("color_scheme")
# Method to restore the current color scheme.
def restore_original_scheme():
sublime_settings.set("color_scheme", original_scheme)
# Tweak current scheme with ThemeTweaker.
sublime_settings.set("color_scheme", current_scheme)
new_scheme = get_unfocused_scheme_path(active_view)
tweaker_settings = sublime_settings.get("unfocused_pane_tweaker")
if tweaker_settings is None:
tweaker_settings = "brightness(0.97);saturation(0.85);"
sublime.run_command("theme_tweaker_custom", {"filters": tweaker_settings})
try:
# Copy the tweaked scheme to its final destination.
if not os.path.exists(os.path.normpath(os.path.dirname(new_scheme))):
os.makedirs(os.path.dirname(os.path.normpath(new_scheme)))
copyfile(os.path.normpath(os.path.join(sublime.packages_path(), "User/ThemeTweaker/tweak-" + os.path.basename(current_scheme))), new_scheme)
sublime.set_timeout_async(restore_original_scheme, 1000)
# Return unfocused scheme path.
return new_scheme
except Exception as e:
restore_original_scheme()
print(e)
return
# Create / update an unfocused scheme for the currently active scheme.
class CreateUnfocusedColorSchemeCommand(sublime_plugin.WindowCommand):
def run(self):
create_unfocused_color_scheme()
# Command to toggle focusing active view.
class ToggleFocusActiveViewCommand(sublime_plugin.WindowCommand):
def run(self):
if is_disabled():
sublime.load_settings("Preferences.sublime-settings").set("focus_active_view", True)
focus(self.window.active_view())
else:
sublime.load_settings("Preferences.sublime-settings").set("focus_active_view", False)
for window in sublime.windows():
for view in window.views():
view.settings().erase("color_scheme")
# View listener.
class FocusActiveViewListener(sublime_plugin.EventListener):
# When view is activated...
def on_activated(self, view):
focus(view)
| mit |
bluecmd/hackrf | firmware/tools/make-dfu.py | 1 | 1819 | #!/usr/bin/env python2
# vim: set ts=4 sw=4 tw=0 et pm=:
import struct
import sys
import os.path
import getopt
import zlib
options, remainder = getopt.getopt(sys.argv[1:], 'p:v:d:S:v', ['pid=',
'vid=',
'did=',
'spec=',
'verbose',
])
pid = 0x000c
vid = 0x1fc9
did = 0
spec = 0x0100
verbose = False
for opt, arg in options:
if opt in ('-p', '--pid'):
pid = int(arg)
if opt in ('-v', '--vid'):
vid = int(arg)
if opt in ('-d', '--did'):
did = int(arg)
if opt in ('-S', '--spec'):
spec = int(arg)
elif opt in ('-v', '--verbose'):
verbose = True
if len(remainder)<1:
in_file = "/dev/stdin"
else:
in_file = remainder[0]
if len(remainder)<2:
out = open("/dev/stdout","wb")
else:
out = open(remainder[1],"wb")
# ref. NXP UM10503 Table 24 (Boot image header description)
header = ""
header += struct.pack ('<B',int("11"+"011010",2)) # AES enc not active + No hash active
header += struct.pack ('<B',int("11"+"111111",2)) # RESERVED + AES_CONTROL
size=os.path.getsize(in_file)
size=(size+511)/512 # 512 byte blocks, rounded up
header += struct.pack('<H',size) # (badly named) HASH_SIZE
header += struct.pack('8B',*[0xff] *8) # HASH_VALUE (unused)
header += struct.pack('4B',*[0xff] *4) # RESERVED
out.write( header )
infile=open(in_file,"rb").read()
out.write( infile )
suffix= ""
suffix+= struct.pack('<H', did) # bcdDevice
suffix+= struct.pack('<H', pid) # idProduct
suffix+= struct.pack('<H', vid) # idVendor
suffix+= struct.pack('<H', spec) # bcdDFU
suffix+= b'DFU'[::-1] # (reverse DFU)
suffix+= struct.pack('<B', 16) # suffix length
out.write( suffix )
checksum=zlib.crc32(header+infile+suffix) & 0xffffffff ^ 0xffffffff
out.write( struct.pack('I', checksum) ) # crc32
| gpl-2.0 |
richardmg/qtcreator | share/qtcreator/debugger/stdtypes.py | 1 | 22697 | ############################################################################
#
# Copyright (C) 2013 Digia Plc and/or its subsidiary(-ies).
# Contact: http://www.qt-project.org/legal
#
# This file is part of Qt Creator.
#
# Commercial License Usage
# Licensees holding valid commercial Qt licenses may use this file in
# accordance with the commercial license agreement provided with the
# Software or, alternatively, in accordance with the terms contained in
# a written agreement between you and Digia. For licensing terms and
# conditions see http://qt.digia.com/licensing. For further information
# use the contact form at http://qt.digia.com/contact-us.
#
# GNU Lesser General Public License Usage
# Alternatively, this file may be used under the terms of the GNU Lesser
# General Public License version 2.1 as published by the Free Software
# Foundation and appearing in the file LICENSE.LGPL included in the
# packaging of this file. Please review the following information to
# ensure the GNU Lesser General Public License version 2.1 requirements
# will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
#
# In addition, as a special exception, Digia gives you certain additional
# rights. These rights are described in the Digia Qt LGPL Exception
# version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
#
#############################################################################
from dumper import *
def qdump____c_style_array__(d, value):
type = value.type.unqualified()
targetType = value[0].type
#d.putAddress(value.address)
d.putType(type)
d.putNumChild(1)
format = d.currentItemFormat()
isDefault = format == None and str(targetType.unqualified()) == "char"
if isDefault or format == 0 or format == 1 or format == 2:
blob = d.readMemory(d.addressOf(value), type.sizeof)
if isDefault:
# Use Latin1 as default for char [].
d.putValue(blob, Hex2EncodedLatin1)
elif format == 0:
# Explicitly requested Latin1 formatting.
d.putValue(blob, Hex2EncodedLatin1)
elif format == 1:
# Explicitly requested UTF-8 formatting.
d.putValue(blob, Hex2EncodedUtf8)
elif format == 2:
# Explicitly requested Local 8-bit formatting.
d.putValue(blob, Hex2EncodedLocal8Bit)
else:
d.putValue("@0x%x" % d.pointerValue(value.cast(targetType.pointer())))
if d.currentIName in d.expandedINames:
p = d.addressOf(value)
ts = targetType.sizeof
if not d.tryPutArrayContents(targetType, p, int(type.sizeof / ts)):
with Children(d, childType=targetType,
addrBase=p, addrStep=ts):
d.putFields(value)
def qdump__std__array(d, value):
size = d.numericTemplateArgument(value.type, 1)
d.putItemCount(size)
d.putNumChild(size)
if d.isExpanded():
innerType = d.templateArgument(value.type, 0)
d.putArrayData(innerType, d.addressOf(value), size)
def qdump__std____1__array(d, value):
qdump__std__array(d, value)
def qdump__std__complex(d, value):
innerType = d.templateArgument(value.type, 0)
real = value.cast(innerType)
imag = d.createValue(d.addressOf(value) + innerType.sizeof, innerType)
d.putValue("(%f, %f)" % (real, imag));
d.putNumChild(2)
if d.isExpanded():
with Children(d, 2, childType=innerType):
d.putSubItem("real", real)
d.putSubItem("imag", imag)
def qdump__std__deque(d, value):
innerType = d.templateArgument(value.type, 0)
innerSize = innerType.sizeof
bufsize = 1
if innerSize < 512:
bufsize = int(512 / innerSize)
impl = value["_M_impl"]
start = impl["_M_start"]
finish = impl["_M_finish"]
size = bufsize * toInteger(finish["_M_node"] - start["_M_node"] - 1)
size += toInteger(finish["_M_cur"] - finish["_M_first"])
size += toInteger(start["_M_last"] - start["_M_cur"])
d.check(0 <= size and size <= 1000 * 1000 * 1000)
d.putItemCount(size)
d.putNumChild(size)
if d.isExpanded():
with Children(d, size, maxNumChild=2000, childType=innerType):
pcur = start["_M_cur"]
pfirst = start["_M_first"]
plast = start["_M_last"]
pnode = start["_M_node"]
for i in d.childRange():
d.putSubItem(i, pcur.dereference())
pcur += 1
if toInteger(pcur) == toInteger(plast):
newnode = pnode + 1
pnode = newnode
pfirst = newnode.dereference()
plast = pfirst + bufsize
pcur = pfirst
def qdump__std____debug__deque(d, value):
qdump__std__deque(d, value)
def qdump__std__list(d, value):
head = d.dereferenceValue(value)
impl = value["_M_impl"]
node = impl["_M_node"]
size = 0
pp = d.dereference(head)
while head != pp and size <= 1001:
size += 1
pp = d.dereference(pp)
d.putItemCount(size, 1000)
d.putNumChild(size)
if d.isExpanded():
p = node["_M_next"]
innerType = d.templateArgument(value.type, 0)
with Children(d, size, maxNumChild=1000, childType=innerType):
for i in d.childRange():
innerPointer = innerType.pointer()
d.putSubItem(i, (p + 1).cast(innerPointer).dereference())
p = p["_M_next"]
def qdump__std____debug__list(d, value):
qdump__std__list(d, value)
def qform__std__map():
return mapForms()
def qdump__std__map(d, value):
impl = value["_M_t"]["_M_impl"]
size = int(impl["_M_node_count"])
d.check(0 <= size and size <= 100*1000*1000)
d.putItemCount(size)
d.putNumChild(size)
if d.isExpanded():
keyType = d.templateArgument(value.type, 0)
valueType = d.templateArgument(value.type, 1)
try:
# Does not work on gcc 4.4, the allocator type (fourth template
# argument) seems not to be available.
pairType = d.templateArgument(d.templateArgument(value.type, 3), 0)
pairPointer = pairType.pointer()
except:
# So use this as workaround:
pairType = d.templateArgument(impl.type, 1)
pairPointer = pairType.pointer()
isCompact = d.isMapCompact(keyType, valueType)
innerType = pairType
if isCompact:
innerType = valueType
node = impl["_M_header"]["_M_left"]
childType = innerType
if size == 0:
childType = pairType
childNumChild = 2
if isCompact:
childNumChild = None
with Children(d, size, maxNumChild=1000,
childType=childType, childNumChild=childNumChild):
for i in d.childRange():
with SubItem(d, i):
pair = (node + 1).cast(pairPointer).dereference()
if isCompact:
d.putMapName(pair["first"])
d.putItem(pair["second"])
else:
d.putEmptyValue()
if d.isExpanded():
with Children(d, 2):
d.putSubItem("first", pair["first"])
d.putSubItem("second", pair["second"])
if d.isNull(node["_M_right"]):
parent = node["_M_parent"]
while node == parent["_M_right"]:
node = parent
parent = parent["_M_parent"]
if node["_M_right"] != parent:
node = parent
else:
node = node["_M_right"]
while not d.isNull(node["_M_left"]):
node = node["_M_left"]
def qdump__std____debug__map(d, value):
qdump__std__map(d, value)
def qdump__std____debug__set(d, value):
qdump__std__set(d, value)
def qdump__std____cxx1998__map(d, value):
qdump__std__map(d, value)
def stdTreeIteratorHelper(d, value):
node = value["_M_node"].dereference()
d.putNumChild(1)
d.putEmptyValue()
if d.isExpanded():
nodeTypeName = str(value.type).replace("_Rb_tree_iterator", "_Rb_tree_node", 1)
nodeTypeName = nodeTypeName.replace("_Rb_tree_const_iterator", "_Rb_tree_node", 1)
nodeType = d.lookupType(nodeTypeName)
data = node.cast(nodeType)["_M_value_field"]
with Children(d):
try:
d.putSubItem("first", data["first"])
d.putSubItem("second", data["second"])
except:
d.putSubItem("value", data)
with SubItem(d, "node"):
d.putNumChild(1)
d.putEmptyValue()
d.putType(" ")
if d.isExpanded():
with Children(d):
d.putSubItem("color", node["_M_color"])
d.putSubItem("left", node["_M_left"])
d.putSubItem("right", node["_M_right"])
d.putSubItem("parent", node["_M_parent"])
def qdump__std___Rb_tree_iterator(d, value):
stdTreeIteratorHelper(d, value)
def qdump__std___Rb_tree_const_iterator(d, value):
stdTreeIteratorHelper(d, value)
def qdump__std__map__iterator(d, value):
stdTreeIteratorHelper(d, value)
def qdump____gnu_debug___Safe_iterator(d, value):
d.putItem(value["_M_current"])
def qdump__std__map__const_iterator(d, value):
stdTreeIteratorHelper(d, value)
def qdump__std__set__iterator(d, value):
stdTreeIteratorHelper(d, value)
def qdump__std__set__const_iterator(d, value):
stdTreeIteratorHelper(d, value)
def qdump__std____cxx1998__set(d, value):
qdump__std__set(d, value)
def qdump__std__set(d, value):
impl = value["_M_t"]["_M_impl"]
size = int(impl["_M_node_count"])
d.check(0 <= size and size <= 100*1000*1000)
d.putItemCount(size)
d.putNumChild(size)
if d.isExpanded():
valueType = d.templateArgument(value.type, 0)
node = impl["_M_header"]["_M_left"]
with Children(d, size, maxNumChild=1000, childType=valueType):
for i in d.childRange():
d.putSubItem(i, (node + 1).cast(valueType.pointer()).dereference())
if d.isNull(node["_M_right"]):
parent = node["_M_parent"]
while node == parent["_M_right"]:
node = parent
parent = parent["_M_parent"]
if node["_M_right"] != parent:
node = parent
else:
node = node["_M_right"]
while not d.isNull(node["_M_left"]):
node = node["_M_left"]
def qdump__std__stack(d, value):
qdump__std__deque(d, value["c"])
def qdump__std____debug__stack(d, value):
qdump__std__stack(d, value)
def qform__std__string():
return "Inline,In Separate Window"
def qdump__std__string(d, value):
qdump__std__stringHelper1(d, value, 1)
def qdump__std__stringHelper1(d, value, charSize):
data = value["_M_dataplus"]["_M_p"]
# We can't lookup the std::string::_Rep type without crashing LLDB,
# so hard-code assumption on member position
# struct { size_type _M_length, size_type _M_capacity, int _M_refcount; }
sizePtr = data.cast(d.sizetType().pointer())
size = int(sizePtr[-3])
alloc = int(sizePtr[-2])
refcount = int(sizePtr[-1])
d.check(refcount >= -1) # Can be -1 accoring to docs.
d.check(0 <= size and size <= alloc and alloc <= 100*1000*1000)
qdump_stringHelper(d, sizePtr, size * charSize, charSize)
def qdump_stringHelper(d, data, size, charSize):
cutoff = min(size, d.stringCutOff)
mem = d.readMemory(data, cutoff)
if charSize == 1:
encodingType = Hex2EncodedLatin1
displayType = DisplayLatin1String
elif charSize == 2:
encodingType = Hex4EncodedLittleEndian
displayType = DisplayUtf16String
else:
encodingType = Hex8EncodedLittleEndian
displayType = DisplayUtf16String
d.putNumChild(0)
d.putValue(mem, encodingType)
format = d.currentItemFormat()
if format == 1:
d.putDisplay(StopDisplay)
elif format == 2:
d.putField("editformat", displayType)
d.putField("editvalue", d.readMemory(data, size))
def qdump__std____1__string(d, value):
inner = d.childAt(d.childAt(value["__r_"]["__first_"], 0), 0)
size = int(inner["__size_"])
alloc = int(inner["__cap_"])
data = d.pointerValue(inner["__data_"])
qdump_stringHelper(d, data, size, 1)
d.putType("std::string")
def qdump__std____1__wstring(d, value):
inner = d.childAt(d.childAt(value["__r_"]["__first_"], 0), 0)
size = int(inner["__size_"]) * 4
alloc = int(inner["__cap_"])
data = d.pointerValue(inner["__data_"])
qdump_stringHelper(d, data, size, 4)
d.putType("std::wstring")
def qdump__std__shared_ptr(d, value):
i = value["_M_ptr"]
if d.isNull(i):
d.putValue("(null)")
d.putNumChild(0)
return
if d.isSimpleType(d.templateArgument(value.type, 0)):
d.putValue("%s @0x%x" % (i.dereference(), d.pointerValue(i)))
else:
i = expensiveDowncast(i)
d.putValue("@0x%x" % d.pointerValue(i))
d.putNumChild(3)
with Children(d, 3):
d.putSubItem("data", i)
refcount = value["_M_refcount"]["_M_pi"]
d.putIntItem("usecount", refcount["_M_use_count"])
d.putIntItem("weakcount", refcount["_M_weak_count"])
def qdump__std____1__shared_ptr(d, value):
i = value["__ptr_"]
if d.isNull(i):
d.putValue("(null)")
d.putNumChild(0)
return
if d.isSimpleType(d.templateArgument(value.type, 0)):
d.putValue("%s @0x%x" % (i.dereference().value, d.pointerValue(i)))
else:
d.putValue("@0x%x" % d.pointerValue(i))
d.putNumChild(3)
with Children(d, 3):
d.putSubItem("data", i.dereference())
d.putFields(value["__cntrl_"].dereference())
#d.putIntItem("usecount", refcount["_M_use_count"])
#d.putIntItem("weakcount", refcount["_M_weak_count"])
def qdump__std__unique_ptr(d, value):
i = value["_M_t"]["_M_head_impl"]
if d.isNull(i):
d.putValue("(null)")
d.putNumChild(0)
return
if d.isSimpleType(d.templateArgument(value.type, 0)):
d.putValue("%s @0x%x" % (i.dereference(), d.pointerValue(i)))
else:
i = expensiveDowncast(i)
d.putValue("@0x%x" % d.pointerValue(i))
d.putNumChild(1)
with Children(d, 1):
d.putSubItem("data", i)
def qdump__std____1__unique_ptr(d, value):
i = d.childAt(d.childAt(value["__ptr_"], 0), 0)
if d.isNull(i):
d.putValue("(null)")
d.putNumChild(0)
return
if d.isSimpleType(d.templateArgument(value.type, 0)):
d.putValue("%s @0x%x" % (i.dereference().value, d.pointerValue(i)))
else:
d.putValue("@0x%x" % d.pointerValue(i))
d.putNumChild(1)
with Children(d, 1):
d.putSubItem("data", i.dereference())
def qform__std__unordered_map():
return mapForms()
def qform__std____debug__unordered_map():
return mapForms()
def qdump__std__unordered_map(d, value):
keyType = d.templateArgument(value.type, 0)
valueType = d.templateArgument(value.type, 1)
allocatorType = d.templateArgument(value.type, 4)
pairType = d.templateArgument(allocatorType, 0)
ptrSize = d.ptrSize()
try:
# gcc >= 4.7
size = value["_M_element_count"]
start = value["_M_before_begin"]["_M_nxt"]
offset = 0
except:
try:
# libc++ (Mac)
size = value["_M_h"]["_M_element_count"]
start = value["_M_h"]["_M_bbegin"]["_M_node"]["_M_nxt"]
offset = 0
except:
# gcc 4.6.2
size = value["_M_element_count"]
start = value["_M_buckets"].dereference()
# FIXME: Pointer-aligned?
offset = pairType.sizeof
d.putItemCount(size)
# We don't know where the data is
d.putNumChild(0)
return
d.putItemCount(size)
d.putNumChild(size)
if d.isExpanded():
p = d.pointerValue(start)
if d.isMapCompact(keyType, valueType):
with Children(d, size, childType=valueType):
for i in d.childRange():
pair = d.createValue(p + ptrSize, pairType)
with SubItem(d, i):
d.putField("iname", d.currentIName)
d.putName("[%s] %s" % (i, pair["first"]))
d.putValue(pair["second"])
p = d.dereference(p)
else:
with Children(d, size, childType=pairType):
for i in d.childRange():
d.putSubItem(i, d.createValue(p + ptrSize - offset, pairType))
p = d.dereference(p + offset)
def qdump__std____debug__unordered_map(d, value):
qdump__std__unordered_map(d, value)
def qdump__std__unordered_set(d, value):
try:
# gcc >= 4.7
size = value["_M_element_count"]
start = value["_M_before_begin"]["_M_nxt"]
offset = 0
except:
try:
# libc++ (Mac)
size = value["_M_h"]["_M_element_count"]
start = value["_M_h"]["_M_bbegin"]["_M_node"]["_M_nxt"]
offset = 0
except:
# gcc 4.6.2
size = value["_M_element_count"]
start = value["_M_buckets"].dereference()
offset = d.ptrSize()
d.putItemCount(size)
d.putNumChild(size)
if d.isExpanded():
p = d.pointerValue(start)
valueType = d.templateArgument(value.type, 0)
with Children(d, size, childType=valueType):
ptrSize = d.ptrSize()
for i in d.childRange():
d.putSubItem(i, d.createValue(p + ptrSize - offset, valueType))
p = d.dereference(p + offset)
def qform__std____1__unordered_map():
return mapForms()
def qdump__std____1__unordered_map(d, value):
n = toInteger(value["__table_"]["__p2_"]["__first_"])
d.putItemCount(n)
if d.isExpanded():
with Children(d, 1):
d.putFields(value)
def qdump__std____debug__unordered_set(d, value):
qdump__std__unordered_set(d, value)
def qedit__std__vector(expr, value):
values = value.split(',')
n = len(values)
ob = gdb.parse_and_eval(expr)
innerType = d.templateArgument(ob.type, 0)
cmd = "set $d = (%s*)calloc(sizeof(%s)*%s,1)" % (innerType, innerType, n)
gdb.execute(cmd)
cmd = "set {void*[3]}%s = {$d, $d+%s, $d+%s}" % (ob.address, n, n)
gdb.execute(cmd)
cmd = "set (%s[%d])*$d={%s}" % (innerType, n, value)
gdb.execute(cmd)
def qdump__std__vector(d, value):
impl = value["_M_impl"]
type = d.templateArgument(value.type, 0)
alloc = impl["_M_end_of_storage"]
isBool = str(type) == 'bool'
if isBool:
start = impl["_M_start"]["_M_p"]
finish = impl["_M_finish"]["_M_p"]
# FIXME: 8 is CHAR_BIT
storage = d.lookupType("unsigned long")
storagesize = storage.sizeof * 8
size = (finish - start) * storagesize
size += impl["_M_finish"]["_M_offset"]
size -= impl["_M_start"]["_M_offset"]
else:
start = impl["_M_start"]
finish = impl["_M_finish"]
size = finish - start
d.check(0 <= size and size <= 1000 * 1000 * 1000)
d.check(finish <= alloc)
d.checkPointer(start)
d.checkPointer(finish)
d.checkPointer(alloc)
d.putItemCount(size)
d.putNumChild(size)
if d.isExpanded():
if isBool:
with Children(d, size, maxNumChild=10000, childType=type):
for i in d.childRange():
q = start + int(i / storagesize)
d.putBoolItem(str(i), (q.dereference() >> (i % storagesize)) & 1)
else:
d.putArrayData(type, start, size)
def qdump__std____1__vector(d, value):
innerType = d.templateArgument(value.type, 0)
if d.isLldb and d.childAt(value, 0).type == innerType:
# That's old lldb automatically formatting
begin = d.dereferenceValue(value)
size = value.GetNumChildren()
else:
# Normal case
begin = d.pointerValue(value['__begin_'])
end = d.pointerValue(value['__end_'])
size = (end - begin) / innerType.sizeof
d.putItemCount(size)
d.putNumChild(size)
if d.isExpanded():
d.putArrayData(innerType, begin, size)
def qdump__std____debug__vector(d, value):
qdump__std__vector(d, value)
def qedit__std__string(expr, value):
cmd = "print (%s).assign(\"%s\")" % (expr, value)
gdb.execute(cmd)
def qedit__string(expr, value):
qedit__std__string(expr, value)
def qdump__string(d, value):
qdump__std__string(d, value)
def qdump__std__wstring(d, value):
charSize = d.lookupType('wchar_t').sizeof
qdump__std__stringHelper1(d, value, charSize)
def qdump__std__basic_string(d, value):
innerType = d.templateArgument(value.type, 0)
qdump__std__stringHelper1(d, value, innerType.sizeof)
def qdump__wstring(d, value):
qdump__std__wstring(d, value)
def qdump____gnu_cxx__hash_set(d, value):
ht = value["_M_ht"]
size = int(ht["_M_num_elements"])
d.check(0 <= size and size <= 1000 * 1000 * 1000)
d.putItemCount(size)
d.putNumChild(size)
type = d.templateArgument(value.type, 0)
d.putType("__gnu__cxx::hash_set<%s>" % type)
if d.isExpanded():
with Children(d, size, maxNumChild=1000, childType=type):
buckets = ht["_M_buckets"]["_M_impl"]
bucketStart = buckets["_M_start"]
bucketFinish = buckets["_M_finish"]
p = bucketStart
itemCount = 0
for i in xrange(toInteger(bucketFinish - bucketStart)):
if not d.isNull(p.dereference()):
cur = p.dereference()
while not d.isNull(cur):
with SubItem(d, itemCount):
d.putValue(cur["_M_val"])
cur = cur["_M_next"]
itemCount += 1
p = p + 1
def qdump__uint8_t(d, value):
d.putNumChild(0)
d.putValue(int(value))
def qdump__int8_t(d, value):
d.putNumChild(0)
d.putValue(int(value))
| lgpl-2.1 |
dhimmel/networkx | examples/graph/unix_email.py | 62 | 2683 | #!/usr/bin/env python
"""
Create a directed graph, allowing multiple edges and self loops, from
a unix mailbox. The nodes are email addresses with links
that point from the sender to the recievers. The edge data
is a Python email.Message object which contains all of
the email message data.
This example shows the power of XDiGraph to hold edge data
of arbitrary Python objects (in this case a list of email messages).
By default, load the sample unix email mailbox called "unix_email.mbox".
You can load your own mailbox by naming it on the command line, eg
python unixemail.py /var/spool/mail/username
"""
__author__ = """Aric Hagberg ([email protected])"""
# Copyright (C) 2005 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import email
from email.utils import getaddresses,parseaddr
import mailbox
import sys
# unix mailbox recipe
# see http://www.python.org/doc/current/lib/module-mailbox.html
def msgfactory(fp):
try:
return email.message_from_file(fp)
except email.Errors.MessageParseError:
# Don't return None since that will stop the mailbox iterator
return ''
if __name__ == '__main__':
import networkx as nx
try:
import matplotlib.pyplot as plt
except:
pass
if len(sys.argv)==1:
filePath = "unix_email.mbox"
else:
filePath = sys.argv[1]
mbox = mailbox.mbox(filePath, msgfactory) # parse unix mailbox
G=nx.MultiDiGraph() # create empty graph
# parse each messages and build graph
for msg in mbox: # msg is python email.Message.Message object
(source_name,source_addr) = parseaddr(msg['From']) # sender
# get all recipients
# see http://www.python.org/doc/current/lib/module-email.Utils.html
tos = msg.get_all('to', [])
ccs = msg.get_all('cc', [])
resent_tos = msg.get_all('resent-to', [])
resent_ccs = msg.get_all('resent-cc', [])
all_recipients = getaddresses(tos + ccs + resent_tos + resent_ccs)
# now add the edges for this mail message
for (target_name,target_addr) in all_recipients:
G.add_edge(source_addr,target_addr,message=msg)
# print edges with message subject
for (u,v,d) in G.edges_iter(data=True):
print("From: %s To: %s Subject: %s"%(u,v,d['message']["Subject"]))
try: # draw
pos=nx.spring_layout(G,iterations=10)
nx.draw(G,pos,node_size=0,alpha=0.4,edge_color='r',font_size=16)
plt.savefig("unix_email.png")
plt.show()
except: # matplotlib not available
pass
| bsd-3-clause |
leliel12/scikit-criteria | skcriteria/divcorr.py | 1 | 3513 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2017, Cabral, Juan; Luczywo, Nadia
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# =============================================================================
# DOCS
# =============================================================================
"""Some wrapps around basic divergence and correlation functions to use with
alternative matrix
"""
# =============================================================================
# IMPORTS
# =============================================================================
import numpy as np
from scipy import stats
# =============================================================================
# CONSTANTS
# =============================================================================
DIVERGENCE_FUNCTIONS = {}
CORRELATION_FUNCTIONS = {}
FUNCTIONS_TYPES = {
"divergence": DIVERGENCE_FUNCTIONS,
"correlation": CORRELATION_FUNCTIONS}
def register_stat(name, ftype):
if ftype not in FUNCTIONS_TYPES:
msg = "'ftype' must be one of {}. Found {}"
raise ValueError(msg.format(FUNCTIONS_TYPES.keys(), ftype))
def _dec(func):
if not hasattr(func, "__call__"):
raise TypeError("'func' must be callable")
fdict = FUNCTIONS_TYPES[ftype]
if name in fdict:
msg = "{} function '{}' already exist"
raise ValueError(msg.format(ftype, name))
fdict[name] = func
return func
return _dec
# =============================================================================
# FUNCTIONS
# =============================================================================
@register_stat("std", "divergence")
def std(arr):
return np.std(arr, axis=0)
@register_stat("var", "divergence")
def var(arr):
return np.var(arr, axis=0)
@register_stat("pearson", "correlation")
def corr_pearson(arr):
return np.corrcoef(arr)
@register_stat("spearman", "correlation")
def corr_spearman(arr):
return stats.spearmanr(arr.T, axis=0).correlation
| bsd-3-clause |
LumaPictures/rez | src/rezplugins/build_system/cmake.py | 1 | 11415 | """
CMake-based build system
"""
from rez.build_system import BuildSystem
from rez.build_process_ import BuildType
from rez.resolved_context import ResolvedContext
from rez.exceptions import BuildSystemError
from rez.util import create_forwarding_script
from rez.packages_ import get_developer_package
from rez.utils.platform_ import platform_
from rez.config import config
from rez.backport.shutilwhich import which
from rez.vendor.schema.schema import Or
from rez.shells import create_shell
import functools
import os.path
import sys
import os
class RezCMakeError(BuildSystemError):
pass
class CMakeBuildSystem(BuildSystem):
"""The CMake build system.
The 'cmake' executable is run within the build environment. Rez supplies a
library of cmake macros in the 'cmake_files' directory; these are added to
cmake's searchpath and are available to use in your own CMakeLists.txt
file.
The following CMake variables are available:
- REZ_BUILD_TYPE: One of 'local', 'central'. Describes whether an install
is going to the local packages path, or the release packages path.
- REZ_BUILD_INSTALL: One of 0 or 1. If 1, an installation is taking place;
if 0, just a build is occurring.
"""
build_systems = {'eclipse': "Eclipse CDT4 - Unix Makefiles",
'codeblocks': "CodeBlocks - Unix Makefiles",
'make': "Unix Makefiles",
'nmake': "NMake Makefiles",
'xcode': "Xcode"}
build_targets = ["Debug", "Release", "RelWithDebInfo"]
schema_dict = {
"build_target": Or(*build_targets),
"build_system": Or(*build_systems.keys()),
"cmake_args": [basestring],
"cmake_binary": Or(None, basestring),
"make_binary": Or(None, basestring)}
@classmethod
def name(cls):
return "cmake"
@classmethod
def child_build_system(cls):
return "make"
@classmethod
def is_valid_root(cls, path):
return os.path.isfile(os.path.join(path, "CMakeLists.txt"))
@classmethod
def bind_cli(cls, parser):
settings = config.plugins.build_system.cmake
parser.add_argument("--bt", "--build-target", dest="build_target",
type=str, choices=cls.build_targets,
default=settings.build_target,
help="set the build target (default: %(default)s).")
parser.add_argument("--bs", "--build-system", dest="build_system",
type=str, choices=cls.build_systems.keys(),
default=settings.build_system,
help="set the cmake build system (default: %(default)s).")
def __init__(self, working_dir, opts=None, package=None, write_build_scripts=False,
verbose=False, build_args=[], child_build_args=[]):
super(CMakeBuildSystem, self).__init__(
working_dir,
opts=opts,
package=package,
write_build_scripts=write_build_scripts,
verbose=verbose,
build_args=build_args,
child_build_args=child_build_args)
self.settings = self.package.config.plugins.build_system.cmake
self.build_target = (opts and opts.build_target) or \
self.settings.build_target
self.cmake_build_system = (opts and opts.build_system) or \
self.settings.build_system
if self.cmake_build_system == 'xcode' and platform_.name != 'osx':
raise RezCMakeError("Generation of Xcode project only available "
"on the OSX platform")
def required_files(self):
return [os.path.join(self.working_dir, "CMakeLists.txt")]
def build(self, context, variant, build_path, install_path, install=False,
build_type=BuildType.local):
def _pr(s):
if self.verbose:
print s
# find cmake binary
if self.settings.cmake_binary:
exe = self.settings.cmake_binary
else:
exe = context.which("cmake", fallback=True)
if not exe:
raise RezCMakeError("could not find cmake binary")
found_exe = which(exe)
if not found_exe:
raise RezCMakeError("cmake binary does not exist: %s" % exe)
sh = create_shell()
# assemble cmake command
cmd = [found_exe, "-d", self.working_dir]
cmd += (self.settings.cmake_args or [])
cmd += (self.build_args or [])
cmd.append("-DCMAKE_INSTALL_PREFIX=%s" % install_path)
cmd.append("-DCMAKE_MODULE_PATH=%s" %
sh.get_key_token("CMAKE_MODULE_PATH").replace('\\', '/'))
cmd.append("-DCMAKE_BUILD_TYPE=%s" % self.build_target)
cmd.append("-DREZ_BUILD_TYPE=%s" % build_type.name)
cmd.append("-DREZ_BUILD_INSTALL=%d" % (1 if install else 0))
cmd.extend(["-G", self.build_systems[self.cmake_build_system]])
if config.rez_1_cmake_variables and \
not config.disable_rez_1_compatibility and \
build_type == BuildType.central:
cmd.append("-DCENTRAL=1")
# execute cmake within the build env
_pr("Executing: %s" % ' '.join(cmd))
if not os.path.abspath(build_path):
build_path = os.path.join(self.working_dir, build_path)
build_path = os.path.realpath(build_path)
callback = functools.partial(self._add_build_actions,
context=context,
package=self.package,
variant=variant,
build_type=build_type,
install=install,
build_path=build_path,
install_path=install_path)
# run the build command and capture/print stderr at the same time
retcode, _, _ = context.execute_shell(command=cmd,
block=True,
cwd=build_path,
actions_callback=callback)
ret = {}
if retcode:
ret["success"] = False
return ret
if self.write_build_scripts:
# write out the script that places the user in a build env, where
# they can run make directly themselves.
build_env_script = os.path.join(build_path, "build-env")
create_forwarding_script(build_env_script,
module=("build_system", "cmake"),
func_name="_FWD__spawn_build_shell",
working_dir=self.working_dir,
build_path=build_path,
variant_index=variant.index,
install=install,
install_path=install_path)
ret["success"] = True
ret["build_env_script"] = build_env_script
return ret
# assemble make command
if self.settings.make_binary:
cmd = [self.settings.make_binary]
else:
cmd = ["make"]
cmd += (self.child_build_args or [])
# nmake has no -j
if self.settings.make_binary != 'nmake':
if not any(x.startswith("-j") for x in (self.child_build_args or [])):
n = variant.config.build_thread_count
cmd.append("-j%d" % n)
# execute make within the build env
_pr("\nExecuting: %s" % ' '.join(cmd))
retcode, _, _ = context.execute_shell(command=cmd,
block=True,
cwd=build_path,
actions_callback=callback)
if not retcode and install and "install" not in cmd:
cmd.append("install")
# execute make install within the build env
_pr("\nExecuting: %s" % ' '.join(cmd))
retcode, _, _ = context.execute_shell(command=cmd,
block=True,
cwd=build_path,
actions_callback=callback)
ret["success"] = (not retcode)
return ret
@classmethod
def _add_build_actions(cls, executor, context, package, variant,
build_type, install, build_path, install_path=None):
settings = package.config.plugins.build_system.cmake
cmake_path = os.path.join(os.path.dirname(__file__), "cmake_files")
template_path = os.path.join(os.path.dirname(__file__), "template_files")
cls.set_standard_vars(executor=executor,
context=context,
variant=variant,
build_type=build_type,
install=install,
build_path=build_path,
install_path=install_path)
executor.env.CMAKE_MODULE_PATH.append(cmake_path.replace('\\', '/'))
executor.env.REZ_BUILD_DOXYFILE = os.path.join(template_path, 'Doxyfile')
executor.env.REZ_BUILD_INSTALL_PYC = '1' if settings.install_pyc else '0'
def _FWD__spawn_build_shell(working_dir, build_path, variant_index, install,
install_path=None):
# This spawns a shell that the user can run 'make' in directly
context = ResolvedContext.load(os.path.join(build_path, "build.rxt"))
package = get_developer_package(working_dir)
variant = package.get_variant(variant_index)
config.override("prompt", "BUILD>")
callback = functools.partial(CMakeBuildSystem._add_build_actions,
context=context,
package=package,
variant=variant,
build_type=BuildType.local,
install=install,
build_path=build_path,
install_path=install_path)
retcode, _, _ = context.execute_shell(block=True,
cwd=build_path,
actions_callback=callback)
sys.exit(retcode)
def register_plugin():
return CMakeBuildSystem
# Copyright 2013-2016 Allan Johns.
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
| lgpl-3.0 |
houzhenggang/hiwifi-openwrt-HC5661-HC5761 | staging_dir/host/lib/python2.7/encodings/iso8859_5.py | 593 | 13271 | """ Python Character Mapping Codec iso8859_5 generated from 'MAPPINGS/ISO8859/8859-5.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-5',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u0401' # 0xA1 -> CYRILLIC CAPITAL LETTER IO
u'\u0402' # 0xA2 -> CYRILLIC CAPITAL LETTER DJE
u'\u0403' # 0xA3 -> CYRILLIC CAPITAL LETTER GJE
u'\u0404' # 0xA4 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
u'\u0405' # 0xA5 -> CYRILLIC CAPITAL LETTER DZE
u'\u0406' # 0xA6 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0407' # 0xA7 -> CYRILLIC CAPITAL LETTER YI
u'\u0408' # 0xA8 -> CYRILLIC CAPITAL LETTER JE
u'\u0409' # 0xA9 -> CYRILLIC CAPITAL LETTER LJE
u'\u040a' # 0xAA -> CYRILLIC CAPITAL LETTER NJE
u'\u040b' # 0xAB -> CYRILLIC CAPITAL LETTER TSHE
u'\u040c' # 0xAC -> CYRILLIC CAPITAL LETTER KJE
u'\xad' # 0xAD -> SOFT HYPHEN
u'\u040e' # 0xAE -> CYRILLIC CAPITAL LETTER SHORT U
u'\u040f' # 0xAF -> CYRILLIC CAPITAL LETTER DZHE
u'\u0410' # 0xB0 -> CYRILLIC CAPITAL LETTER A
u'\u0411' # 0xB1 -> CYRILLIC CAPITAL LETTER BE
u'\u0412' # 0xB2 -> CYRILLIC CAPITAL LETTER VE
u'\u0413' # 0xB3 -> CYRILLIC CAPITAL LETTER GHE
u'\u0414' # 0xB4 -> CYRILLIC CAPITAL LETTER DE
u'\u0415' # 0xB5 -> CYRILLIC CAPITAL LETTER IE
u'\u0416' # 0xB6 -> CYRILLIC CAPITAL LETTER ZHE
u'\u0417' # 0xB7 -> CYRILLIC CAPITAL LETTER ZE
u'\u0418' # 0xB8 -> CYRILLIC CAPITAL LETTER I
u'\u0419' # 0xB9 -> CYRILLIC CAPITAL LETTER SHORT I
u'\u041a' # 0xBA -> CYRILLIC CAPITAL LETTER KA
u'\u041b' # 0xBB -> CYRILLIC CAPITAL LETTER EL
u'\u041c' # 0xBC -> CYRILLIC CAPITAL LETTER EM
u'\u041d' # 0xBD -> CYRILLIC CAPITAL LETTER EN
u'\u041e' # 0xBE -> CYRILLIC CAPITAL LETTER O
u'\u041f' # 0xBF -> CYRILLIC CAPITAL LETTER PE
u'\u0420' # 0xC0 -> CYRILLIC CAPITAL LETTER ER
u'\u0421' # 0xC1 -> CYRILLIC CAPITAL LETTER ES
u'\u0422' # 0xC2 -> CYRILLIC CAPITAL LETTER TE
u'\u0423' # 0xC3 -> CYRILLIC CAPITAL LETTER U
u'\u0424' # 0xC4 -> CYRILLIC CAPITAL LETTER EF
u'\u0425' # 0xC5 -> CYRILLIC CAPITAL LETTER HA
u'\u0426' # 0xC6 -> CYRILLIC CAPITAL LETTER TSE
u'\u0427' # 0xC7 -> CYRILLIC CAPITAL LETTER CHE
u'\u0428' # 0xC8 -> CYRILLIC CAPITAL LETTER SHA
u'\u0429' # 0xC9 -> CYRILLIC CAPITAL LETTER SHCHA
u'\u042a' # 0xCA -> CYRILLIC CAPITAL LETTER HARD SIGN
u'\u042b' # 0xCB -> CYRILLIC CAPITAL LETTER YERU
u'\u042c' # 0xCC -> CYRILLIC CAPITAL LETTER SOFT SIGN
u'\u042d' # 0xCD -> CYRILLIC CAPITAL LETTER E
u'\u042e' # 0xCE -> CYRILLIC CAPITAL LETTER YU
u'\u042f' # 0xCF -> CYRILLIC CAPITAL LETTER YA
u'\u0430' # 0xD0 -> CYRILLIC SMALL LETTER A
u'\u0431' # 0xD1 -> CYRILLIC SMALL LETTER BE
u'\u0432' # 0xD2 -> CYRILLIC SMALL LETTER VE
u'\u0433' # 0xD3 -> CYRILLIC SMALL LETTER GHE
u'\u0434' # 0xD4 -> CYRILLIC SMALL LETTER DE
u'\u0435' # 0xD5 -> CYRILLIC SMALL LETTER IE
u'\u0436' # 0xD6 -> CYRILLIC SMALL LETTER ZHE
u'\u0437' # 0xD7 -> CYRILLIC SMALL LETTER ZE
u'\u0438' # 0xD8 -> CYRILLIC SMALL LETTER I
u'\u0439' # 0xD9 -> CYRILLIC SMALL LETTER SHORT I
u'\u043a' # 0xDA -> CYRILLIC SMALL LETTER KA
u'\u043b' # 0xDB -> CYRILLIC SMALL LETTER EL
u'\u043c' # 0xDC -> CYRILLIC SMALL LETTER EM
u'\u043d' # 0xDD -> CYRILLIC SMALL LETTER EN
u'\u043e' # 0xDE -> CYRILLIC SMALL LETTER O
u'\u043f' # 0xDF -> CYRILLIC SMALL LETTER PE
u'\u0440' # 0xE0 -> CYRILLIC SMALL LETTER ER
u'\u0441' # 0xE1 -> CYRILLIC SMALL LETTER ES
u'\u0442' # 0xE2 -> CYRILLIC SMALL LETTER TE
u'\u0443' # 0xE3 -> CYRILLIC SMALL LETTER U
u'\u0444' # 0xE4 -> CYRILLIC SMALL LETTER EF
u'\u0445' # 0xE5 -> CYRILLIC SMALL LETTER HA
u'\u0446' # 0xE6 -> CYRILLIC SMALL LETTER TSE
u'\u0447' # 0xE7 -> CYRILLIC SMALL LETTER CHE
u'\u0448' # 0xE8 -> CYRILLIC SMALL LETTER SHA
u'\u0449' # 0xE9 -> CYRILLIC SMALL LETTER SHCHA
u'\u044a' # 0xEA -> CYRILLIC SMALL LETTER HARD SIGN
u'\u044b' # 0xEB -> CYRILLIC SMALL LETTER YERU
u'\u044c' # 0xEC -> CYRILLIC SMALL LETTER SOFT SIGN
u'\u044d' # 0xED -> CYRILLIC SMALL LETTER E
u'\u044e' # 0xEE -> CYRILLIC SMALL LETTER YU
u'\u044f' # 0xEF -> CYRILLIC SMALL LETTER YA
u'\u2116' # 0xF0 -> NUMERO SIGN
u'\u0451' # 0xF1 -> CYRILLIC SMALL LETTER IO
u'\u0452' # 0xF2 -> CYRILLIC SMALL LETTER DJE
u'\u0453' # 0xF3 -> CYRILLIC SMALL LETTER GJE
u'\u0454' # 0xF4 -> CYRILLIC SMALL LETTER UKRAINIAN IE
u'\u0455' # 0xF5 -> CYRILLIC SMALL LETTER DZE
u'\u0456' # 0xF6 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0457' # 0xF7 -> CYRILLIC SMALL LETTER YI
u'\u0458' # 0xF8 -> CYRILLIC SMALL LETTER JE
u'\u0459' # 0xF9 -> CYRILLIC SMALL LETTER LJE
u'\u045a' # 0xFA -> CYRILLIC SMALL LETTER NJE
u'\u045b' # 0xFB -> CYRILLIC SMALL LETTER TSHE
u'\u045c' # 0xFC -> CYRILLIC SMALL LETTER KJE
u'\xa7' # 0xFD -> SECTION SIGN
u'\u045e' # 0xFE -> CYRILLIC SMALL LETTER SHORT U
u'\u045f' # 0xFF -> CYRILLIC SMALL LETTER DZHE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| gpl-2.0 |
michaelkuty/horizon-contrib | horizon_contrib/generic/tables.py | 1 | 1617 |
from horizon_contrib import tables
class GenericTable(tables.PaginatedTable):
"""
Generic table
.. attribute:: cls_name String or django model class
note: best way is ModelClass because find by content_type
makes additional db queries
.. attribute:: order_by is default to ("-id")
note: table requires python objects or we must override
``get_object_id`` and ``get_object_display``
"""
def get_object_display(self, datum):
return str(datum)
def get_object_id(self, datum):
"""try id as default pk
if not defined ``primary_key=True``
must be defined on one of model fields
"""
if datum:
if not isinstance(datum, dict):
if hasattr(datum._meta, 'pk'):
id = getattr(datum, 'id', None)
elif hasattr(datum._meta, 'pk'):
id = getattr(datum, str(datum._meta.pk.name))
else:
id = datum.get('id')
return id
def __init__(self, *args, **kwargs):
if 'cls_name' in kwargs:
self.model_class = kwargs.pop('cls_name', None)
super(GenericTable, self).__init__(*args, **kwargs)
if 'table' in kwargs:
self.table_type = kwargs.pop('table', None)
self._meta.template = \
"horizon_contrib/tables/_react_data_table.html"
class Meta:
table_actions = tables.TABLE_ACTIONS
row_actions = tables.ROW_ACTIONS
extra_columns = True
ajax_update = True
| bsd-3-clause |
srluge/SickRage | lib/dogpile/cache/region.py | 47 | 47567 | from __future__ import with_statement
from dogpile.core import Lock, NeedRegenerationException
from dogpile.core.nameregistry import NameRegistry
from . import exception
from .util import function_key_generator, PluginLoader, \
memoized_property, coerce_string_conf, function_multi_key_generator
from .api import NO_VALUE, CachedValue
from .proxy import ProxyBackend
from . import compat
import time
import datetime
from numbers import Number
from functools import wraps
import threading
_backend_loader = PluginLoader("dogpile.cache")
register_backend = _backend_loader.register
from . import backends # noqa
value_version = 1
"""An integer placed in the :class:`.CachedValue`
so that new versions of dogpile.cache can detect cached
values from a previous, backwards-incompatible version.
"""
class CacheRegion(object):
"""A front end to a particular cache backend.
:param name: Optional, a string name for the region.
This isn't used internally
but can be accessed via the ``.name`` parameter, helpful
for configuring a region from a config file.
:param function_key_generator: Optional. A
function that will produce a "cache key" given
a data creation function and arguments, when using
the :meth:`.CacheRegion.cache_on_arguments` method.
The structure of this function
should be two levels: given the data creation function,
return a new function that generates the key based on
the given arguments. Such as::
def my_key_generator(namespace, fn, **kw):
fname = fn.__name__
def generate_key(*arg):
return namespace + "_" + fname + "_".join(str(s) for s in arg)
return generate_key
region = make_region(
function_key_generator = my_key_generator
).configure(
"dogpile.cache.dbm",
expiration_time=300,
arguments={
"filename":"file.dbm"
}
)
The ``namespace`` is that passed to
:meth:`.CacheRegion.cache_on_arguments`. It's not consulted
outside this function, so in fact can be of any form.
For example, it can be passed as a tuple, used to specify
arguments to pluck from \**kw::
def my_key_generator(namespace, fn):
def generate_key(*arg, **kw):
return ":".join(
[kw[k] for k in namespace] +
[str(x) for x in arg]
)
return generate_key
Where the decorator might be used as::
@my_region.cache_on_arguments(namespace=('x', 'y'))
def my_function(a, b, **kw):
return my_data()
:param function_multi_key_generator: Optional.
Similar to ``function_key_generator`` parameter, but it's used in
:meth:`.CacheRegion.cache_multi_on_arguments`. Generated function
should return list of keys. For example::
def my_multi_key_generator(namespace, fn, **kw):
namespace = fn.__name__ + (namespace or '')
def generate_keys(*args):
return [namespace + ':' + str(a) for a in args]
return generate_keys
:param key_mangler: Function which will be used on all incoming
keys before passing to the backend. Defaults to ``None``,
in which case the key mangling function recommended by
the cache backend will be used. A typical mangler
is the SHA1 mangler found at :func:`.sha1_mangle_key`
which coerces keys into a SHA1
hash, so that the string length is fixed. To
disable all key mangling, set to ``False``. Another typical
mangler is the built-in Python function ``str``, which can be used
to convert non-string or Unicode keys to bytestrings, which is
needed when using a backend such as bsddb or dbm under Python 2.x
in conjunction with Unicode keys.
:param async_creation_runner: A callable that, when specified,
will be passed to and called by dogpile.lock when
there is a stale value present in the cache. It will be passed the
mutex and is responsible releasing that mutex when finished.
This can be used to defer the computation of expensive creator
functions to later points in the future by way of, for example, a
background thread, a long-running queue, or a task manager system
like Celery.
For a specific example using async_creation_runner, new values can
be created in a background thread like so::
import threading
def async_creation_runner(cache, somekey, creator, mutex):
''' Used by dogpile.core:Lock when appropriate '''
def runner():
try:
value = creator()
cache.set(somekey, value)
finally:
mutex.release()
thread = threading.Thread(target=runner)
thread.start()
region = make_region(
async_creation_runner=async_creation_runner,
).configure(
'dogpile.cache.memcached',
expiration_time=5,
arguments={
'url': '127.0.0.1:11211',
'distributed_lock': True,
}
)
Remember that the first request for a key with no associated
value will always block; async_creator will not be invoked.
However, subsequent requests for cached-but-expired values will
still return promptly. They will be refreshed by whatever
asynchronous means the provided async_creation_runner callable
implements.
By default the async_creation_runner is disabled and is set
to ``None``.
.. versionadded:: 0.4.2 added the async_creation_runner
feature.
"""
def __init__(
self,
name=None,
function_key_generator=function_key_generator,
function_multi_key_generator=function_multi_key_generator,
key_mangler=None,
async_creation_runner=None,
):
"""Construct a new :class:`.CacheRegion`."""
self.name = name
self.function_key_generator = function_key_generator
self.function_multi_key_generator = function_multi_key_generator
if key_mangler:
self.key_mangler = key_mangler
else:
self.key_mangler = None
self._hard_invalidated = None
self._soft_invalidated = None
self.async_creation_runner = async_creation_runner
def configure(
self, backend,
expiration_time=None,
arguments=None,
_config_argument_dict=None,
_config_prefix=None,
wrap=None
):
"""Configure a :class:`.CacheRegion`.
The :class:`.CacheRegion` itself
is returned.
:param backend: Required. This is the name of the
:class:`.CacheBackend` to use, and is resolved by loading
the class from the ``dogpile.cache`` entrypoint.
:param expiration_time: Optional. The expiration time passed
to the dogpile system. May be passed as an integer number
of seconds, or as a ``datetime.timedelta`` value.
.. versionadded 0.5.0
``expiration_time`` may be optionally passed as a
``datetime.timedelta`` value.
The :meth:`.CacheRegion.get_or_create`
method as well as the :meth:`.CacheRegion.cache_on_arguments`
decorator (though note: **not** the :meth:`.CacheRegion.get`
method) will call upon the value creation function after this
time period has passed since the last generation.
:param arguments: Optional. The structure here is passed
directly to the constructor of the :class:`.CacheBackend`
in use, though is typically a dictionary.
:param wrap: Optional. A list of :class:`.ProxyBackend`
classes and/or instances, each of which will be applied
in a chain to ultimately wrap the original backend,
so that custom functionality augmentation can be applied.
.. versionadded:: 0.5.0
.. seealso::
:ref:`changing_backend_behavior`
"""
if "backend" in self.__dict__:
raise exception.RegionAlreadyConfigured(
"This region is already "
"configured with backend: %s"
% self.backend)
backend_cls = _backend_loader.load(backend)
if _config_argument_dict:
self.backend = backend_cls.from_config_dict(
_config_argument_dict,
_config_prefix
)
else:
self.backend = backend_cls(arguments or {})
if not expiration_time or isinstance(expiration_time, Number):
self.expiration_time = expiration_time
elif isinstance(expiration_time, datetime.timedelta):
self.expiration_time = int(
compat.timedelta_total_seconds(expiration_time))
else:
raise exception.ValidationError(
'expiration_time is not a number or timedelta.')
if self.key_mangler is None:
self.key_mangler = self.backend.key_mangler
self._lock_registry = NameRegistry(self._create_mutex)
if getattr(wrap, '__iter__', False):
for wrapper in reversed(wrap):
self.wrap(wrapper)
return self
def wrap(self, proxy):
''' Takes a ProxyBackend instance or class and wraps the
attached backend. '''
# if we were passed a type rather than an instance then
# initialize it.
if type(proxy) == type:
proxy = proxy()
if not issubclass(type(proxy), ProxyBackend):
raise TypeError("Type %s is not a valid ProxyBackend"
% type(proxy))
self.backend = proxy.wrap(self.backend)
def _mutex(self, key):
return self._lock_registry.get(key)
class _LockWrapper(object):
"""weakref-capable wrapper for threading.Lock"""
def __init__(self):
self.lock = threading.Lock()
def acquire(self, wait=True):
return self.lock.acquire(wait)
def release(self):
self.lock.release()
def _create_mutex(self, key):
mutex = self.backend.get_mutex(key)
if mutex is not None:
return mutex
else:
return self._LockWrapper()
def invalidate(self, hard=True):
"""Invalidate this :class:`.CacheRegion`.
Invalidation works by setting a current timestamp
(using ``time.time()``)
representing the "minimum creation time" for
a value. Any retrieved value whose creation
time is prior to this timestamp
is considered to be stale. It does not
affect the data in the cache in any way, and is also
local to this instance of :class:`.CacheRegion`.
Once set, the invalidation time is honored by
the :meth:`.CacheRegion.get_or_create`,
:meth:`.CacheRegion.get_or_create_multi` and
:meth:`.CacheRegion.get` methods.
The method supports both "hard" and "soft" invalidation
options. With "hard" invalidation,
:meth:`.CacheRegion.get_or_create` will force an immediate
regeneration of the value which all getters will wait for.
With "soft" invalidation, subsequent getters will return the
"old" value until the new one is available.
Usage of "soft" invalidation requires that the region or the method
is given a non-None expiration time.
.. versionadded:: 0.3.0
:param hard: if True, cache values will all require immediate
regeneration; dogpile logic won't be used. If False, the
creation time of existing values will be pushed back before
the expiration time so that a return+regen will be invoked.
.. versionadded:: 0.5.1
"""
if hard:
self._hard_invalidated = time.time()
self._soft_invalidated = None
else:
self._hard_invalidated = None
self._soft_invalidated = time.time()
def configure_from_config(self, config_dict, prefix):
"""Configure from a configuration dictionary
and a prefix.
Example::
local_region = make_region()
memcached_region = make_region()
# regions are ready to use for function
# decorators, but not yet for actual caching
# later, when config is available
myconfig = {
"cache.local.backend":"dogpile.cache.dbm",
"cache.local.arguments.filename":"/path/to/dbmfile.dbm",
"cache.memcached.backend":"dogpile.cache.pylibmc",
"cache.memcached.arguments.url":"127.0.0.1, 10.0.0.1",
}
local_region.configure_from_config(myconfig, "cache.local.")
memcached_region.configure_from_config(myconfig,
"cache.memcached.")
"""
config_dict = coerce_string_conf(config_dict)
return self.configure(
config_dict["%sbackend" % prefix],
expiration_time=config_dict.get(
"%sexpiration_time" % prefix, None),
_config_argument_dict=config_dict,
_config_prefix="%sarguments." % prefix,
wrap=config_dict.get(
"%swrap" % prefix, None),
)
@memoized_property
def backend(self):
raise exception.RegionNotConfigured(
"No backend is configured on this region.")
@property
def is_configured(self):
"""Return True if the backend has been configured via the
:meth:`.CacheRegion.configure` method already.
.. versionadded:: 0.5.1
"""
return 'backend' in self.__dict__
def get(self, key, expiration_time=None, ignore_expiration=False):
"""Return a value from the cache, based on the given key.
If the value is not present, the method returns the token
``NO_VALUE``. ``NO_VALUE`` evaluates to False, but is separate from
``None`` to distinguish between a cached value of ``None``.
By default, the configured expiration time of the
:class:`.CacheRegion`, or alternatively the expiration
time supplied by the ``expiration_time`` argument,
is tested against the creation time of the retrieved
value versus the current time (as reported by ``time.time()``).
If stale, the cached value is ignored and the ``NO_VALUE``
token is returned. Passing the flag ``ignore_expiration=True``
bypasses the expiration time check.
.. versionchanged:: 0.3.0
:meth:`.CacheRegion.get` now checks the value's creation time
against the expiration time, rather than returning
the value unconditionally.
The method also interprets the cached value in terms
of the current "invalidation" time as set by
the :meth:`.invalidate` method. If a value is present,
but its creation time is older than the current
invalidation time, the ``NO_VALUE`` token is returned.
Passing the flag ``ignore_expiration=True`` bypasses
the invalidation time check.
.. versionadded:: 0.3.0
Support for the :meth:`.CacheRegion.invalidate`
method.
:param key: Key to be retrieved. While it's typical for a key to be a
string, it is ultimately passed directly down to the cache backend,
before being optionally processed by the key_mangler function, so can
be of any type recognized by the backend or by the key_mangler
function, if present.
:param expiration_time: Optional expiration time value
which will supersede that configured on the :class:`.CacheRegion`
itself.
.. versionadded:: 0.3.0
:param ignore_expiration: if ``True``, the value is returned
from the cache if present, regardless of configured
expiration times or whether or not :meth:`.invalidate`
was called.
.. versionadded:: 0.3.0
"""
if self.key_mangler:
key = self.key_mangler(key)
value = self.backend.get(key)
value = self._unexpired_value_fn(
expiration_time, ignore_expiration)(value)
return value.payload
def _unexpired_value_fn(self, expiration_time, ignore_expiration):
if ignore_expiration:
return lambda value: value
else:
if expiration_time is None:
expiration_time = self.expiration_time
current_time = time.time()
invalidated = self._hard_invalidated or self._soft_invalidated
def value_fn(value):
if value is NO_VALUE:
return value
elif expiration_time is not None and \
current_time - value.metadata["ct"] > expiration_time:
return NO_VALUE
elif invalidated and \
value.metadata["ct"] < invalidated:
return NO_VALUE
else:
return value
return value_fn
def get_multi(self, keys, expiration_time=None, ignore_expiration=False):
"""Return multiple values from the cache, based on the given keys.
Returns values as a list matching the keys given.
E.g.::
values = region.get_multi(["one", "two", "three"])
To convert values to a dictionary, use ``zip()``::
keys = ["one", "two", "three"]
values = region.get_multi(keys)
dictionary = dict(zip(keys, values))
Keys which aren't present in the list are returned as
the ``NO_VALUE`` token. ``NO_VALUE`` evaluates to False,
but is separate from
``None`` to distinguish between a cached value of ``None``.
By default, the configured expiration time of the
:class:`.CacheRegion`, or alternatively the expiration
time supplied by the ``expiration_time`` argument,
is tested against the creation time of the retrieved
value versus the current time (as reported by ``time.time()``).
If stale, the cached value is ignored and the ``NO_VALUE``
token is returned. Passing the flag ``ignore_expiration=True``
bypasses the expiration time check.
.. versionadded:: 0.5.0
"""
if not keys:
return []
if self.key_mangler:
keys = list(map(lambda key: self.key_mangler(key), keys))
backend_values = self.backend.get_multi(keys)
_unexpired_value_fn = self._unexpired_value_fn(
expiration_time, ignore_expiration)
return [
value.payload if value is not NO_VALUE else value
for value in
(
_unexpired_value_fn(value) for value in
backend_values
)
]
def get_or_create(
self, key, creator, expiration_time=None, should_cache_fn=None):
"""Return a cached value based on the given key.
If the value does not exist or is considered to be expired
based on its creation time, the given
creation function may or may not be used to recreate the value
and persist the newly generated value in the cache.
Whether or not the function is used depends on if the
*dogpile lock* can be acquired or not. If it can't, it means
a different thread or process is already running a creation
function for this key against the cache. When the dogpile
lock cannot be acquired, the method will block if no
previous value is available, until the lock is released and
a new value available. If a previous value
is available, that value is returned immediately without blocking.
If the :meth:`.invalidate` method has been called, and
the retrieved value's timestamp is older than the invalidation
timestamp, the value is unconditionally prevented from
being returned. The method will attempt to acquire the dogpile
lock to generate a new value, or will wait
until the lock is released to return the new value.
.. versionchanged:: 0.3.0
The value is unconditionally regenerated if the creation
time is older than the last call to :meth:`.invalidate`.
:param key: Key to be retrieved. While it's typical for a key to be a
string, it is ultimately passed directly down to the cache backend,
before being optionally processed by the key_mangler function, so can
be of any type recognized by the backend or by the key_mangler
function, if present.
:param creator: function which creates a new value.
:param expiration_time: optional expiration time which will overide
the expiration time already configured on this :class:`.CacheRegion`
if not None. To set no expiration, use the value -1.
:param should_cache_fn: optional callable function which will receive
the value returned by the "creator", and will then return True or
False, indicating if the value should actually be cached or not. If
it returns False, the value is still returned, but isn't cached.
E.g.::
def dont_cache_none(value):
return value is not None
value = region.get_or_create("some key",
create_value,
should_cache_fn=dont_cache_none)
Above, the function returns the value of create_value() if
the cache is invalid, however if the return value is None,
it won't be cached.
.. versionadded:: 0.4.3
.. seealso::
:meth:`.CacheRegion.cache_on_arguments` - applies
:meth:`.get_or_create` to any function using a decorator.
:meth:`.CacheRegion.get_or_create_multi` - multiple key/value
version
"""
orig_key = key
if self.key_mangler:
key = self.key_mangler(key)
def get_value():
value = self.backend.get(key)
if value is NO_VALUE or \
value.metadata['v'] != value_version or \
(
self._hard_invalidated and
value.metadata["ct"] < self._hard_invalidated):
raise NeedRegenerationException()
ct = value.metadata["ct"]
if self._soft_invalidated:
if ct < self._soft_invalidated:
ct = time.time() - expiration_time - .0001
return value.payload, ct
def gen_value():
created_value = creator()
value = self._value(created_value)
if not should_cache_fn or \
should_cache_fn(created_value):
self.backend.set(key, value)
return value.payload, value.metadata["ct"]
if expiration_time is None:
expiration_time = self.expiration_time
if expiration_time is None and self._soft_invalidated:
raise exception.DogpileCacheException(
"Non-None expiration time required "
"for soft invalidation")
if expiration_time == -1:
expiration_time = None
if self.async_creation_runner:
def async_creator(mutex):
return self.async_creation_runner(
self, orig_key, creator, mutex)
else:
async_creator = None
with Lock(
self._mutex(key),
gen_value,
get_value,
expiration_time,
async_creator) as value:
return value
def get_or_create_multi(
self, keys, creator, expiration_time=None, should_cache_fn=None):
"""Return a sequence of cached values based on a sequence of keys.
The behavior for generation of values based on keys corresponds
to that of :meth:`.Region.get_or_create`, with the exception that
the ``creator()`` function may be asked to generate any subset of
the given keys. The list of keys to be generated is passed to
``creator()``, and ``creator()`` should return the generated values
as a sequence corresponding to the order of the keys.
The method uses the same approach as :meth:`.Region.get_multi`
and :meth:`.Region.set_multi` to get and set values from the
backend.
:param keys: Sequence of keys to be retrieved.
:param creator: function which accepts a sequence of keys and
returns a sequence of new values.
:param expiration_time: optional expiration time which will overide
the expiration time already configured on this :class:`.CacheRegion`
if not None. To set no expiration, use the value -1.
:param should_cache_fn: optional callable function which will receive
each value returned by the "creator", and will then return True or
False, indicating if the value should actually be cached or not. If
it returns False, the value is still returned, but isn't cached.
.. versionadded:: 0.5.0
.. seealso::
:meth:`.CacheRegion.cache_multi_on_arguments`
:meth:`.CacheRegion.get_or_create`
"""
def get_value(key):
value = values.get(key, NO_VALUE)
if value is NO_VALUE or \
value.metadata['v'] != value_version or \
(self._hard_invalidated and
value.metadata["ct"] < self._hard_invalidated):
# dogpile.core understands a 0 here as
# "the value is not available", e.g.
# _has_value() will return False.
return value.payload, 0
else:
ct = value.metadata["ct"]
if self._soft_invalidated:
if ct < self._soft_invalidated:
ct = time.time() - expiration_time - .0001
return value.payload, ct
def gen_value():
raise NotImplementedError()
def async_creator(key, mutex):
mutexes[key] = mutex
if expiration_time is None:
expiration_time = self.expiration_time
if expiration_time is None and self._soft_invalidated:
raise exception.DogpileCacheException(
"Non-None expiration time required "
"for soft invalidation")
if expiration_time == -1:
expiration_time = None
mutexes = {}
sorted_unique_keys = sorted(set(keys))
if self.key_mangler:
mangled_keys = [self.key_mangler(k) for k in sorted_unique_keys]
else:
mangled_keys = sorted_unique_keys
orig_to_mangled = dict(zip(sorted_unique_keys, mangled_keys))
values = dict(zip(mangled_keys, self.backend.get_multi(mangled_keys)))
for orig_key, mangled_key in orig_to_mangled.items():
with Lock(
self._mutex(mangled_key),
gen_value,
lambda: get_value(mangled_key),
expiration_time,
async_creator=lambda mutex: async_creator(orig_key, mutex)
):
pass
try:
if mutexes:
# sort the keys, the idea is to prevent deadlocks.
# though haven't been able to simulate one anyway.
keys_to_get = sorted(mutexes)
new_values = creator(*keys_to_get)
values_w_created = dict(
(orig_to_mangled[k], self._value(v))
for k, v in zip(keys_to_get, new_values)
)
if not should_cache_fn:
self.backend.set_multi(values_w_created)
else:
self.backend.set_multi(dict(
(k, v)
for k, v in values_w_created.items()
if should_cache_fn(v[0])
))
values.update(values_w_created)
return [values[orig_to_mangled[k]].payload for k in keys]
finally:
for mutex in mutexes.values():
mutex.release()
def _value(self, value):
"""Return a :class:`.CachedValue` given a value."""
return CachedValue(
value,
{
"ct": time.time(),
"v": value_version
})
def set(self, key, value):
"""Place a new value in the cache under the given key."""
if self.key_mangler:
key = self.key_mangler(key)
self.backend.set(key, self._value(value))
def set_multi(self, mapping):
"""Place new values in the cache under the given keys.
.. versionadded:: 0.5.0
"""
if not mapping:
return
if self.key_mangler:
mapping = dict((
self.key_mangler(k), self._value(v))
for k, v in mapping.items())
else:
mapping = dict((k, self._value(v)) for k, v in mapping.items())
self.backend.set_multi(mapping)
def delete(self, key):
"""Remove a value from the cache.
This operation is idempotent (can be called multiple times, or on a
non-existent key, safely)
"""
if self.key_mangler:
key = self.key_mangler(key)
self.backend.delete(key)
def delete_multi(self, keys):
"""Remove multiple values from the cache.
This operation is idempotent (can be called multiple times, or on a
non-existent key, safely)
.. versionadded:: 0.5.0
"""
if self.key_mangler:
keys = list(map(lambda key: self.key_mangler(key), keys))
self.backend.delete_multi(keys)
def cache_on_arguments(
self, namespace=None,
expiration_time=None,
should_cache_fn=None,
to_str=compat.string_type,
function_key_generator=None):
"""A function decorator that will cache the return
value of the function using a key derived from the
function itself and its arguments.
The decorator internally makes use of the
:meth:`.CacheRegion.get_or_create` method to access the
cache and conditionally call the function. See that
method for additional behavioral details.
E.g.::
@someregion.cache_on_arguments()
def generate_something(x, y):
return somedatabase.query(x, y)
The decorated function can then be called normally, where
data will be pulled from the cache region unless a new
value is needed::
result = generate_something(5, 6)
The function is also given an attribute ``invalidate()``, which
provides for invalidation of the value. Pass to ``invalidate()``
the same arguments you'd pass to the function itself to represent
a particular value::
generate_something.invalidate(5, 6)
Another attribute ``set()`` is added to provide extra caching
possibilities relative to the function. This is a convenience
method for :meth:`.CacheRegion.set` which will store a given
value directly without calling the decorated function.
The value to be cached is passed as the first argument, and the
arguments which would normally be passed to the function
should follow::
generate_something.set(3, 5, 6)
The above example is equivalent to calling
``generate_something(5, 6)``, if the function were to produce
the value ``3`` as the value to be cached.
.. versionadded:: 0.4.1 Added ``set()`` method to decorated function.
Similar to ``set()`` is ``refresh()``. This attribute will
invoke the decorated function and populate a new value into
the cache with the new value, as well as returning that value::
newvalue = generate_something.refresh(5, 6)
.. versionadded:: 0.5.0 Added ``refresh()`` method to decorated
function.
Lastly, the ``get()`` method returns either the value cached
for the given key, or the token ``NO_VALUE`` if no such key
exists::
value = generate_something.get(5, 6)
.. versionadded:: 0.5.3 Added ``get()`` method to decorated
function.
The default key generation will use the name
of the function, the module name for the function,
the arguments passed, as well as an optional "namespace"
parameter in order to generate a cache key.
Given a function ``one`` inside the module
``myapp.tools``::
@region.cache_on_arguments(namespace="foo")
def one(a, b):
return a + b
Above, calling ``one(3, 4)`` will produce a
cache key as follows::
myapp.tools:one|foo|3 4
The key generator will ignore an initial argument
of ``self`` or ``cls``, making the decorator suitable
(with caveats) for use with instance or class methods.
Given the example::
class MyClass(object):
@region.cache_on_arguments(namespace="foo")
def one(self, a, b):
return a + b
The cache key above for ``MyClass().one(3, 4)`` will
again produce the same cache key of ``myapp.tools:one|foo|3 4`` -
the name ``self`` is skipped.
The ``namespace`` parameter is optional, and is used
normally to disambiguate two functions of the same
name within the same module, as can occur when decorating
instance or class methods as below::
class MyClass(object):
@region.cache_on_arguments(namespace='MC')
def somemethod(self, x, y):
""
class MyOtherClass(object):
@region.cache_on_arguments(namespace='MOC')
def somemethod(self, x, y):
""
Above, the ``namespace`` parameter disambiguates
between ``somemethod`` on ``MyClass`` and ``MyOtherClass``.
Python class declaration mechanics otherwise prevent
the decorator from having awareness of the ``MyClass``
and ``MyOtherClass`` names, as the function is received
by the decorator before it becomes an instance method.
The function key generation can be entirely replaced
on a per-region basis using the ``function_key_generator``
argument present on :func:`.make_region` and
:class:`.CacheRegion`. If defaults to
:func:`.function_key_generator`.
:param namespace: optional string argument which will be
established as part of the cache key. This may be needed
to disambiguate functions of the same name within the same
source file, such as those
associated with classes - note that the decorator itself
can't see the parent class on a function as the class is
being declared.
:param expiration_time: if not None, will override the normal
expiration time.
May be specified as a callable, taking no arguments, that
returns a value to be used as the ``expiration_time``. This callable
will be called whenever the decorated function itself is called, in
caching or retrieving. Thus, this can be used to
determine a *dynamic* expiration time for the cached function
result. Example use cases include "cache the result until the
end of the day, week or time period" and "cache until a certain date
or time passes".
.. versionchanged:: 0.5.0
``expiration_time`` may be passed as a callable to
:meth:`.CacheRegion.cache_on_arguments`.
:param should_cache_fn: passed to :meth:`.CacheRegion.get_or_create`.
.. versionadded:: 0.4.3
:param to_str: callable, will be called on each function argument
in order to convert to a string. Defaults to ``str()``. If the
function accepts non-ascii unicode arguments on Python 2.x, the
``unicode()`` builtin can be substituted, but note this will
produce unicode cache keys which may require key mangling before
reaching the cache.
.. versionadded:: 0.5.0
:param function_key_generator: a function that will produce a
"cache key". This function will supersede the one configured on the
:class:`.CacheRegion` itself.
.. versionadded:: 0.5.5
.. seealso::
:meth:`.CacheRegion.cache_multi_on_arguments`
:meth:`.CacheRegion.get_or_create`
"""
expiration_time_is_callable = compat.callable(expiration_time)
if function_key_generator is None:
function_key_generator = self.function_key_generator
def decorator(fn):
if to_str is compat.string_type:
# backwards compatible
key_generator = function_key_generator(namespace, fn)
else:
key_generator = function_key_generator(
namespace, fn,
to_str=to_str)
@wraps(fn)
def decorate(*arg, **kw):
key = key_generator(*arg, **kw)
@wraps(fn)
def creator():
return fn(*arg, **kw)
timeout = expiration_time() if expiration_time_is_callable \
else expiration_time
return self.get_or_create(key, creator, timeout,
should_cache_fn)
def invalidate(*arg, **kw):
key = key_generator(*arg, **kw)
self.delete(key)
def set_(value, *arg, **kw):
key = key_generator(*arg, **kw)
self.set(key, value)
def get(*arg, **kw):
key = key_generator(*arg, **kw)
return self.get(key)
def refresh(*arg, **kw):
key = key_generator(*arg, **kw)
value = fn(*arg, **kw)
self.set(key, value)
return value
decorate.set = set_
decorate.invalidate = invalidate
decorate.refresh = refresh
decorate.get = get
return decorate
return decorator
def cache_multi_on_arguments(
self, namespace=None, expiration_time=None,
should_cache_fn=None,
asdict=False, to_str=compat.string_type,
function_multi_key_generator=None):
"""A function decorator that will cache multiple return
values from the function using a sequence of keys derived from the
function itself and the arguments passed to it.
This method is the "multiple key" analogue to the
:meth:`.CacheRegion.cache_on_arguments` method.
Example::
@someregion.cache_multi_on_arguments()
def generate_something(*keys):
return [
somedatabase.query(key)
for key in keys
]
The decorated function can be called normally. The decorator
will produce a list of cache keys using a mechanism similar to
that of :meth:`.CacheRegion.cache_on_arguments`, combining the
name of the function with the optional namespace and with the
string form of each key. It will then consult the cache using
the same mechanism as that of :meth:`.CacheRegion.get_multi`
to retrieve all current values; the originally passed keys
corresponding to those values which aren't generated or need
regeneration will be assembled into a new argument list, and
the decorated function is then called with that subset of
arguments.
The returned result is a list::
result = generate_something("key1", "key2", "key3")
The decorator internally makes use of the
:meth:`.CacheRegion.get_or_create_multi` method to access the
cache and conditionally call the function. See that
method for additional behavioral details.
Unlike the :meth:`.CacheRegion.cache_on_arguments` method,
:meth:`.CacheRegion.cache_multi_on_arguments` works only with
a single function signature, one which takes a simple list of
keys as arguments.
Like :meth:`.CacheRegion.cache_on_arguments`, the decorated function
is also provided with a ``set()`` method, which here accepts a
mapping of keys and values to set in the cache::
generate_something.set({"k1": "value1",
"k2": "value2", "k3": "value3"})
...an ``invalidate()`` method, which has the effect of deleting
the given sequence of keys using the same mechanism as that of
:meth:`.CacheRegion.delete_multi`::
generate_something.invalidate("k1", "k2", "k3")
...a ``refresh()`` method, which will call the creation
function, cache the new values, and return them::
values = generate_something.refresh("k1", "k2", "k3")
...and a ``get()`` method, which will return values
based on the given arguments::
values = generate_something.get("k1", "k2", "k3")
.. versionadded:: 0.5.3 Added ``get()`` method to decorated
function.
Parameters passed to :meth:`.CacheRegion.cache_multi_on_arguments`
have the same meaning as those passed to
:meth:`.CacheRegion.cache_on_arguments`.
:param namespace: optional string argument which will be
established as part of each cache key.
:param expiration_time: if not None, will override the normal
expiration time. May be passed as an integer or a
callable.
:param should_cache_fn: passed to
:meth:`.CacheRegion.get_or_create_multi`. This function is given a
value as returned by the creator, and only if it returns True will
that value be placed in the cache.
:param asdict: if ``True``, the decorated function should return
its result as a dictionary of keys->values, and the final result
of calling the decorated function will also be a dictionary.
If left at its default value of ``False``, the decorated function
should return its result as a list of values, and the final
result of calling the decorated function will also be a list.
When ``asdict==True`` if the dictionary returned by the decorated
function is missing keys, those keys will not be cached.
:param to_str: callable, will be called on each function argument
in order to convert to a string. Defaults to ``str()``. If the
function accepts non-ascii unicode arguments on Python 2.x, the
``unicode()`` builtin can be substituted, but note this will
produce unicode cache keys which may require key mangling before
reaching the cache.
.. versionadded:: 0.5.0
:param function_multi_key_generator: a function that will produce a
list of keys. This function will supersede the one configured on the
:class:`.CacheRegion` itself.
.. versionadded:: 0.5.5
.. seealso::
:meth:`.CacheRegion.cache_on_arguments`
:meth:`.CacheRegion.get_or_create_multi`
"""
expiration_time_is_callable = compat.callable(expiration_time)
if function_multi_key_generator is None:
function_multi_key_generator = self.function_multi_key_generator
def decorator(fn):
key_generator = function_multi_key_generator(
namespace, fn,
to_str=to_str)
@wraps(fn)
def decorate(*arg, **kw):
cache_keys = arg
keys = key_generator(*arg, **kw)
key_lookup = dict(zip(keys, cache_keys))
@wraps(fn)
def creator(*keys_to_create):
return fn(*[key_lookup[k] for k in keys_to_create])
timeout = expiration_time() if expiration_time_is_callable \
else expiration_time
if asdict:
def dict_create(*keys):
d_values = creator(*keys)
return [
d_values.get(key_lookup[k], NO_VALUE)
for k in keys]
def wrap_cache_fn(value):
if value is NO_VALUE:
return False
elif not should_cache_fn:
return True
else:
return should_cache_fn(value)
result = self.get_or_create_multi(
keys, dict_create, timeout, wrap_cache_fn)
result = dict(
(k, v) for k, v in zip(cache_keys, result)
if v is not NO_VALUE)
else:
result = self.get_or_create_multi(
keys, creator, timeout,
should_cache_fn)
return result
def invalidate(*arg):
keys = key_generator(*arg)
self.delete_multi(keys)
def set_(mapping):
keys = list(mapping)
gen_keys = key_generator(*keys)
self.set_multi(dict(
(gen_key, mapping[key])
for gen_key, key
in zip(gen_keys, keys))
)
def get(*arg):
keys = key_generator(*arg)
return self.get_multi(keys)
def refresh(*arg):
keys = key_generator(*arg)
values = fn(*arg)
if asdict:
self.set_multi(
dict(zip(keys, [values[a] for a in arg]))
)
return values
else:
self.set_multi(
dict(zip(keys, values))
)
return values
decorate.set = set_
decorate.invalidate = invalidate
decorate.refresh = refresh
decorate.get = get
return decorate
return decorator
def make_region(*arg, **kw):
"""Instantiate a new :class:`.CacheRegion`.
Currently, :func:`.make_region` is a passthrough
to :class:`.CacheRegion`. See that class for
constructor arguments.
"""
return CacheRegion(*arg, **kw)
| gpl-3.0 |
eduNEXT/edunext-platform | openedx/core/lib/blockstore_api/tests/test_blockstore_api.py | 4 | 8710 | # -*- coding: utf-8 -*-
"""
Tests for xblock_utils.py
"""
import unittest
from uuid import UUID
from django.conf import settings
from openedx.core.lib import blockstore_api as api
# A fake UUID that won't represent any real bundle/draft/collection:
BAD_UUID = UUID('12345678-0000-0000-0000-000000000000')
@unittest.skipUnless(settings.RUN_BLOCKSTORE_TESTS, "Requires a running Blockstore server")
class BlockstoreApiClientTest(unittest.TestCase):
"""
Test for the Blockstore API Client.
The goal of these tests is not to test that Blockstore works correctly, but
that the API client can interact with it and all the API client methods
work.
"""
# Collections
def test_nonexistent_collection(self):
""" Request a collection that doesn't exist -> CollectionNotFound """
with self.assertRaises(api.CollectionNotFound):
api.get_collection(BAD_UUID)
def test_collection_crud(self):
""" Create, Fetch, Update, and Delete a Collection """
title = "Fire 🔥 Collection"
# Create:
coll = api.create_collection(title)
self.assertEqual(coll.title, title)
self.assertIsInstance(coll.uuid, UUID)
# Fetch:
coll2 = api.get_collection(coll.uuid)
self.assertEqual(coll, coll2)
# Update:
new_title = "Air 🌀 Collection"
coll3 = api.update_collection(coll.uuid, title=new_title)
self.assertEqual(coll3.title, new_title)
coll4 = api.get_collection(coll.uuid)
self.assertEqual(coll4.title, new_title)
# Delete:
api.delete_collection(coll.uuid)
with self.assertRaises(api.CollectionNotFound):
api.get_collection(coll.uuid)
# Bundles
def test_nonexistent_bundle(self):
""" Request a bundle that doesn't exist -> BundleNotFound """
with self.assertRaises(api.BundleNotFound):
api.get_bundle(BAD_UUID)
def test_bundle_crud(self):
""" Create, Fetch, Update, and Delete a Bundle """
coll = api.create_collection("Test Collection")
args = {
"title": "Water 💧 Bundle",
"slug": "h2o",
"description": "Sploosh",
}
# Create:
bundle = api.create_bundle(coll.uuid, **args)
for attr, value in args.items():
self.assertEqual(getattr(bundle, attr), value)
self.assertIsInstance(bundle.uuid, UUID)
# Fetch:
bundle2 = api.get_bundle(bundle.uuid)
self.assertEqual(bundle, bundle2)
# Update:
new_description = "Water Nation Bending Lessons"
bundle3 = api.update_bundle(bundle.uuid, description=new_description)
self.assertEqual(bundle3.description, new_description)
bundle4 = api.get_bundle(bundle.uuid)
self.assertEqual(bundle4.description, new_description)
# Delete:
api.delete_bundle(bundle.uuid)
with self.assertRaises(api.BundleNotFound):
api.get_bundle(bundle.uuid)
# Drafts, files, and reading/writing file contents:
def test_nonexistent_draft(self):
""" Request a draft that doesn't exist -> DraftNotFound """
with self.assertRaises(api.DraftNotFound):
api.get_draft(BAD_UUID)
def test_drafts_and_files(self):
"""
Test creating, reading, writing, committing, and reverting drafts and
files.
"""
coll = api.create_collection("Test Collection")
bundle = api.create_bundle(coll.uuid, title="Earth 🗿 Bundle", slug="earth", description="another test bundle")
# Create a draft
draft = api.get_or_create_bundle_draft(bundle.uuid, draft_name="test-draft")
self.assertEqual(draft.bundle_uuid, bundle.uuid)
self.assertEqual(draft.name, "test-draft")
self.assertGreaterEqual(draft.updated_at.year, 2019)
# And retrieve it again:
draft2 = api.get_or_create_bundle_draft(bundle.uuid, draft_name="test-draft")
self.assertEqual(draft, draft2)
# Also test retrieving using get_draft
draft3 = api.get_draft(draft.uuid)
self.assertEqual(draft, draft3)
# Write a file into the bundle:
api.write_draft_file(draft.uuid, "test.txt", b"initial version")
# Now the file should be visible in the draft:
draft_contents = api.get_bundle_file_data(bundle.uuid, "test.txt", use_draft=draft.name)
self.assertEqual(draft_contents, b"initial version")
api.commit_draft(draft.uuid)
# Write a new version into the draft:
api.write_draft_file(draft.uuid, "test.txt", b"modified version")
published_contents = api.get_bundle_file_data(bundle.uuid, "test.txt")
self.assertEqual(published_contents, b"initial version")
draft_contents2 = api.get_bundle_file_data(bundle.uuid, "test.txt", use_draft=draft.name)
self.assertEqual(draft_contents2, b"modified version")
# Now delete the draft:
api.delete_draft(draft.uuid)
draft_contents3 = api.get_bundle_file_data(bundle.uuid, "test.txt", use_draft=draft.name)
# Confirm the file is now reset:
self.assertEqual(draft_contents3, b"initial version")
# Finaly, test the get_bundle_file* methods:
file_info1 = api.get_bundle_file_metadata(bundle.uuid, "test.txt")
self.assertEqual(file_info1.path, "test.txt")
self.assertEqual(file_info1.size, len(b"initial version"))
self.assertEqual(file_info1.hash_digest, "a45a5c6716276a66c4005534a51453ab16ea63c4")
self.assertEqual(list(api.get_bundle_files(bundle.uuid)), [file_info1])
self.assertEqual(api.get_bundle_files_dict(bundle.uuid), {
"test.txt": file_info1,
})
# Links
def test_links(self):
"""
Test operations involving bundle links.
"""
coll = api.create_collection("Test Collection")
# Create two library bundles and a course bundle:
lib1_bundle = api.create_bundle(coll.uuid, title="Library 1", slug="lib1")
lib1_draft = api.get_or_create_bundle_draft(lib1_bundle.uuid, draft_name="test-draft")
lib2_bundle = api.create_bundle(coll.uuid, title="Library 1", slug="lib2")
lib2_draft = api.get_or_create_bundle_draft(lib2_bundle.uuid, draft_name="other-draft")
course_bundle = api.create_bundle(coll.uuid, title="Library 1", slug="course")
course_draft = api.get_or_create_bundle_draft(course_bundle.uuid, draft_name="test-draft")
# To create links, we need valid BundleVersions, which requires having committed at least one change:
api.write_draft_file(lib1_draft.uuid, "lib1-data.txt", "hello world")
api.commit_draft(lib1_draft.uuid) # Creates version 1
api.write_draft_file(lib2_draft.uuid, "lib2-data.txt", "hello world")
api.commit_draft(lib2_draft.uuid) # Creates version 1
# Lib2 has no links:
self.assertFalse(api.get_bundle_links(lib2_bundle.uuid))
# Create a link from lib2 to lib1
link1_name = "lib2_to_lib1"
api.set_draft_link(lib2_draft.uuid, link1_name, lib1_bundle.uuid, version=1)
# Now confirm the link exists in the draft:
lib2_draft_links = api.get_bundle_links(lib2_bundle.uuid, use_draft=lib2_draft.name)
self.assertIn(link1_name, lib2_draft_links)
self.assertEqual(lib2_draft_links[link1_name].direct.bundle_uuid, lib1_bundle.uuid)
self.assertEqual(lib2_draft_links[link1_name].direct.version, 1)
# Now commit the change to lib2:
api.commit_draft(lib2_draft.uuid) # Creates version 2
# Now create a link from course to lib2
link2_name = "course_to_lib2"
api.set_draft_link(course_draft.uuid, link2_name, lib2_bundle.uuid, version=2)
api.commit_draft(course_draft.uuid)
# And confirm the link exists in the resulting bundle version:
course_links = api.get_bundle_links(course_bundle.uuid)
self.assertIn(link2_name, course_links)
self.assertEqual(course_links[link2_name].direct.bundle_uuid, lib2_bundle.uuid)
self.assertEqual(course_links[link2_name].direct.version, 2)
# And since the links go course->lib2->lib1, course has an indirect link to lib1:
self.assertEqual(course_links[link2_name].indirect[0].bundle_uuid, lib1_bundle.uuid)
self.assertEqual(course_links[link2_name].indirect[0].version, 1)
# Finally, test deleting a link from course's draft:
api.set_draft_link(course_draft.uuid, link2_name, None, None)
self.assertFalse(api.get_bundle_links(course_bundle.uuid, use_draft=course_draft.name))
| agpl-3.0 |
ReganBell/QReview | build/lib/networkx/algorithms/tree/tests/test_recognition.py | 54 | 4061 |
from nose.tools import *
import networkx as nx
class TestTreeRecognition(object):
graph = nx.Graph
multigraph = nx.MultiGraph
def setUp(self):
self.T1 = self.graph()
self.T2 = self.graph()
self.T2.add_node(1)
self.T3 = self.graph()
self.T3.add_nodes_from(range(5))
edges = [(i,i+1) for i in range(4)]
self.T3.add_edges_from(edges)
self.T5 = self.multigraph()
self.T5.add_nodes_from(range(5))
edges = [(i,i+1) for i in range(4)]
self.T5.add_edges_from(edges)
self.T6 = self.graph()
self.T6.add_nodes_from([6,7])
self.T6.add_edge(6,7)
self.F1 = nx.compose(self.T6, self.T3)
self.N4 = self.graph()
self.N4.add_node(1)
self.N4.add_edge(1,1)
self.N5 = self.graph()
self.N5.add_nodes_from(range(5))
self.N6 = self.graph()
self.N6.add_nodes_from(range(3))
self.N6.add_edges_from([(0,1),(1,2),(2,0)])
self.NF1 = nx.compose(self.T6,self.N6)
@raises(nx.NetworkXPointlessConcept)
def test_null_tree(self):
nx.is_tree(self.graph())
nx.is_tree(self.multigraph())
@raises(nx.NetworkXPointlessConcept)
def test_null_forest(self):
nx.is_forest(self.graph())
nx.is_forest(self.multigraph())
def test_is_tree(self):
assert_true(nx.is_tree(self.T2))
assert_true(nx.is_tree(self.T3))
assert_true(nx.is_tree(self.T5))
def test_is_not_tree(self):
assert_false(nx.is_tree(self.N4))
assert_false(nx.is_tree(self.N5))
assert_false(nx.is_tree(self.N6))
def test_is_forest(self):
assert_true(nx.is_forest(self.T2))
assert_true(nx.is_forest(self.T3))
assert_true(nx.is_forest(self.T5))
assert_true(nx.is_forest(self.F1))
assert_true(nx.is_forest(self.N5))
def test_is_not_forest(self):
assert_false(nx.is_forest(self.N4))
assert_false(nx.is_forest(self.N6))
assert_false(nx.is_forest(self.NF1))
class TestDirectedTreeRecognition(TestTreeRecognition):
graph = nx.DiGraph
multigraph = nx.MultiDiGraph
def test_disconnected_graph():
# https://github.com/networkx/networkx/issues/1144
G = nx.Graph()
G.add_edges_from([(0, 1), (1, 2), (2, 0), (3, 4)])
assert_false(nx.is_tree(G))
G = nx.DiGraph()
G.add_edges_from([(0, 1), (1, 2), (2, 0), (3, 4)])
assert_false(nx.is_tree(G))
def test_dag_nontree():
G = nx.DiGraph()
G.add_edges_from([(0,1), (0,2), (1,2)])
assert_false(nx.is_tree(G))
assert_true(nx.is_directed_acyclic_graph(G))
def test_multicycle():
G = nx.MultiDiGraph()
G.add_edges_from([(0,1), (0,1)])
assert_false(nx.is_tree(G))
assert_true(nx.is_directed_acyclic_graph(G))
def test_emptybranch():
G = nx.DiGraph()
G.add_nodes_from(range(10))
assert_true(nx.is_branching(G))
assert_false(nx.is_arborescence(G))
def test_path():
G = nx.DiGraph()
G.add_path(range(5))
assert_true(nx.is_branching(G))
assert_true(nx.is_arborescence(G))
def test_notbranching1():
# Acyclic violation.
G = nx.MultiDiGraph()
G.add_nodes_from(range(10))
G.add_edges_from([(0,1),(1,0)])
assert_false(nx.is_branching(G))
assert_false(nx.is_arborescence(G))
def test_notbranching2():
# In-degree violation.
G = nx.MultiDiGraph()
G.add_nodes_from(range(10))
G.add_edges_from([(0,1),(0,2),(3,2)])
assert_false(nx.is_branching(G))
assert_false(nx.is_arborescence(G))
def test_notarborescence1():
# Not an arborescence due to not spanning.
G = nx.MultiDiGraph()
G.add_nodes_from(range(10))
G.add_edges_from([(0,1),(0,2),(1,3),(5,6)])
assert_true(nx.is_branching(G))
assert_false(nx.is_arborescence(G))
def test_notarborescence2():
# Not an arborescence due to in-degree violation.
G = nx.MultiDiGraph()
G.add_path(range(5))
G.add_edge(6, 4)
assert_false(nx.is_branching(G))
assert_false(nx.is_arborescence(G))
| bsd-3-clause |
sencha/chromium-spacewalk | third_party/markupsafe/_native.py | 1243 | 1187 | # -*- coding: utf-8 -*-
"""
markupsafe._native
~~~~~~~~~~~~~~~~~~
Native Python implementation the C module is not compiled.
:copyright: (c) 2010 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from markupsafe import Markup
from markupsafe._compat import text_type
def escape(s):
"""Convert the characters &, <, >, ' and " in string s to HTML-safe
sequences. Use this if you need to display text that might contain
such characters in HTML. Marks return value as markup string.
"""
if hasattr(s, '__html__'):
return s.__html__()
return Markup(text_type(s)
.replace('&', '&')
.replace('>', '>')
.replace('<', '<')
.replace("'", ''')
.replace('"', '"')
)
def escape_silent(s):
"""Like :func:`escape` but converts `None` into an empty
markup string.
"""
if s is None:
return Markup()
return escape(s)
def soft_unicode(s):
"""Make a string unicode if it isn't already. That way a markup
string is not converted back to unicode.
"""
if not isinstance(s, text_type):
s = text_type(s)
return s
| bsd-3-clause |
jarvys/django-1.7-jdb | django/db/migrations/graph.py | 6 | 6570 | from __future__ import unicode_literals
from django.utils.datastructures import OrderedSet
from django.db.migrations.state import ProjectState
class MigrationGraph(object):
"""
Represents the digraph of all migrations in a project.
Each migration is a node, and each dependency is an edge. There are
no implicit dependencies between numbered migrations - the numbering is
merely a convention to aid file listing. Every new numbered migration
has a declared dependency to the previous number, meaning that VCS
branch merges can be detected and resolved.
Migrations files can be marked as replacing another set of migrations -
this is to support the "squash" feature. The graph handler isn't responsible
for these; instead, the code to load them in here should examine the
migration files and if the replaced migrations are all either unapplied
or not present, it should ignore the replaced ones, load in just the
replacing migration, and repoint any dependencies that pointed to the
replaced migrations to point to the replacing one.
A node should be a tuple: (app_path, migration_name). The tree special-cases
things within an app - namely, root nodes and leaf nodes ignore dependencies
to other apps.
"""
def __init__(self):
self.nodes = {}
self.dependencies = {}
self.dependents = {}
def add_node(self, node, implementation):
self.nodes[node] = implementation
def add_dependency(self, migration, child, parent):
if child not in self.nodes:
raise KeyError("Migration %s dependencies references nonexistent child node %r" % (migration, child))
if parent not in self.nodes:
raise KeyError("Migration %s dependencies references nonexistent parent node %r" % (migration, parent))
self.dependencies.setdefault(child, set()).add(parent)
self.dependents.setdefault(parent, set()).add(child)
def forwards_plan(self, node):
"""
Given a node, returns a list of which previous nodes (dependencies)
must be applied, ending with the node itself.
This is the list you would follow if applying the migrations to
a database.
"""
if node not in self.nodes:
raise ValueError("Node %r not a valid node" % (node, ))
return self.dfs(node, lambda x: self.dependencies.get(x, set()))
def backwards_plan(self, node):
"""
Given a node, returns a list of which dependent nodes (dependencies)
must be unapplied, ending with the node itself.
This is the list you would follow if removing the migrations from
a database.
"""
if node not in self.nodes:
raise ValueError("Node %r not a valid node" % (node, ))
return self.dfs(node, lambda x: self.dependents.get(x, set()))
def root_nodes(self, app=None):
"""
Returns all root nodes - that is, nodes with no dependencies inside
their app. These are the starting point for an app.
"""
roots = set()
for node in self.nodes:
if not any(key[0] == node[0] for key in self.dependencies.get(node, set())) and (not app or app == node[0]):
roots.add(node)
return sorted(roots)
def leaf_nodes(self, app=None):
"""
Returns all leaf nodes - that is, nodes with no dependents in their app.
These are the "most current" version of an app's schema.
Having more than one per app is technically an error, but one that
gets handled further up, in the interactive command - it's usually the
result of a VCS merge and needs some user input.
"""
leaves = set()
for node in self.nodes:
if not any(key[0] == node[0] for key in self.dependents.get(node, set())) and (not app or app == node[0]):
leaves.add(node)
return sorted(leaves)
def dfs(self, start, get_children):
"""
Dynamic programming based depth first search, for finding dependencies.
"""
cache = {}
def _dfs(start, get_children, path):
# If we already computed this, use that (dynamic programming)
if (start, get_children) in cache:
return cache[(start, get_children)]
# If we've traversed here before, that's a circular dep
if start in path:
raise CircularDependencyError(path[path.index(start):] + [start])
# Build our own results list, starting with us
results = []
results.append(start)
# We need to add to results all the migrations this one depends on
children = sorted(get_children(start))
path.append(start)
for n in children:
results = _dfs(n, get_children, path) + results
path.pop()
# Use OrderedSet to ensure only one instance of each result
results = list(OrderedSet(results))
# Populate DP cache
cache[(start, get_children)] = results
# Done!
return results
return _dfs(start, get_children, [])
def __str__(self):
return "Graph: %s nodes, %s edges" % (len(self.nodes), sum(len(x) for x in self.dependencies.values()))
def make_state(self, nodes=None, at_end=True, real_apps=None):
"""
Given a migration node or nodes, returns a complete ProjectState for it.
If at_end is False, returns the state before the migration has run.
If nodes is not provided, returns the overall most current project state.
"""
if nodes is None:
nodes = list(self.leaf_nodes())
if len(nodes) == 0:
return ProjectState()
if not isinstance(nodes[0], tuple):
nodes = [nodes]
plan = []
for node in nodes:
for migration in self.forwards_plan(node):
if migration not in plan:
if not at_end and migration in nodes:
continue
plan.append(migration)
project_state = ProjectState(real_apps=real_apps)
for node in plan:
project_state = self.nodes[node].mutate_state(project_state)
return project_state
def __contains__(self, node):
return node in self.nodes
class CircularDependencyError(Exception):
"""
Raised when there's an impossible-to-resolve circular dependency.
"""
pass
| bsd-3-clause |
ReachingOut/unisubs | libs/markdown/extensions/abbr.py | 131 | 2899 | '''
Abbreviation Extension for Python-Markdown
==========================================
This extension adds abbreviation handling to Python-Markdown.
Simple Usage:
>>> import markdown
>>> text = """
... Some text with an ABBR and a REF. Ignore REFERENCE and ref.
...
... *[ABBR]: Abbreviation
... *[REF]: Abbreviation Reference
... """
>>> markdown.markdown(text, ['abbr'])
u'<p>Some text with an <abbr title="Abbreviation">ABBR</abbr> and a <abbr title="Abbreviation Reference">REF</abbr>. Ignore REFERENCE and ref.</p>'
Copyright 2007-2008
* [Waylan Limberg](http://achinghead.com/)
* [Seemant Kulleen](http://www.kulleen.org/)
'''
import markdown, re
from markdown import etree
# Global Vars
ABBR_REF_RE = re.compile(r'[*]\[(?P<abbr>[^\]]*)\][ ]?:\s*(?P<title>.*)')
class AbbrExtension(markdown.Extension):
""" Abbreviation Extension for Python-Markdown. """
def extendMarkdown(self, md, md_globals):
""" Insert AbbrPreprocessor before ReferencePreprocessor. """
md.preprocessors.add('abbr', AbbrPreprocessor(md), '<reference')
class AbbrPreprocessor(markdown.preprocessors.Preprocessor):
""" Abbreviation Preprocessor - parse text for abbr references. """
def run(self, lines):
'''
Find and remove all Abbreviation references from the text.
Each reference is set as a new AbbrPattern in the markdown instance.
'''
new_text = []
for line in lines:
m = ABBR_REF_RE.match(line)
if m:
abbr = m.group('abbr').strip()
title = m.group('title').strip()
self.markdown.inlinePatterns['abbr-%s'%abbr] = \
AbbrPattern(self._generate_pattern(abbr), title)
else:
new_text.append(line)
return new_text
def _generate_pattern(self, text):
'''
Given a string, returns an regex pattern to match that string.
'HTML' -> r'(?P<abbr>[H][T][M][L])'
Note: we force each char as a literal match (in brackets) as we don't
know what they will be beforehand.
'''
chars = list(text)
for i in range(len(chars)):
chars[i] = r'[%s]' % chars[i]
return r'(?P<abbr>\b%s\b)' % (r''.join(chars))
class AbbrPattern(markdown.inlinepatterns.Pattern):
""" Abbreviation inline pattern. """
def __init__(self, pattern, title):
markdown.inlinepatterns.Pattern.__init__(self, pattern)
self.title = title
def handleMatch(self, m):
abbr = etree.Element('abbr')
abbr.text = m.group('abbr')
abbr.set('title', self.title)
return abbr
def makeExtension(configs=None):
return AbbrExtension(configs=configs)
if __name__ == "__main__":
import doctest
doctest.testmod()
| agpl-3.0 |
jostep/tensorflow | tensorflow/python/estimator/canned/head.py | 4 | 36816 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Abstractions for the head(s) of a model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import six
from tensorflow.python.estimator import model_fn
from tensorflow.python.estimator.canned import metric_keys
from tensorflow.python.estimator.canned import prediction_keys
from tensorflow.python.estimator.export import export_output
from tensorflow.python.feature_column import feature_column as feature_column_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.ops import nn
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.summary import summary
_DEFAULT_SERVING_KEY = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
LossAndLabels = collections.namedtuple('LossAndLabels',
['unweighted_loss', 'processed_labels'])
def _summary_key(head_name, val):
return '%s/%s' % (val, head_name) if head_name else val
class _Head(object):
"""Interface for the head/top of a model.
Given logits (or output of a hidden layer), a Head knows how to compute
predictions, loss, train_op, metrics and export outputs. It is meant to:
1. Simplify writing model_fn and to make model_fn more configurable
2. Support wide range of machine learning models. Since most heads can work
with logits, they can support DNN, RNN, Wide, Wide&Deep,
Global objectives, Gradient boosted trees and many other types
of machine learning models.
Common usage:
Here is simplified model_fn to build a DNN regression model.
```python
def _my_dnn_model_fn(features, labels, mode, params, config=None):
# Optionally your callers can pass head to model_fn as a param.
head = tf.contrib.learn.regression_head(...)
input = tf.contrib.layers.input_from_feature_columns(features, ...)
last_hidden_layer_out = tf.contrib.layers.stack(
input, tf.contrib.layers.fully_connected, [1000, 500])
logits = tf.contrib.layers.fully_connected(
last_hidden_layer_out, head.logits_dimension, activation_fn=None)
def _train_op_fn(loss):
return optimizer.minimize(loss)
return head.create_estimator_spec(
features=features,
labels=labels,
mode=mode,
logits=logits,
train_op_fn=_train_op_fn)
```
There are cases where computing and applying gradients can not be meaningfully
captured with train_op_fn we support (for example, with sync optimizer). In
such case, you can take the responsibility on your own. Here is a common
use case,
```python
estimator_spec = head.create_estimator_spec(
features=features,
labels=labels,
mode=mode,
logits=logits,
train_op_fn=tf.contrib.learn.no_op_train_fn)
if mode == model_fn.ModeKeys.TRAIN:
optimizer = ...
sync = tf.train.SyncReplicasOptimizer(opt=optimizer, ...)
update_op = tf.contrib.layers.optimize_loss(optimizer=sync,
loss=estimator_spec.loss, ...)
hooks = [sync.make_session_run_hook(is_chief)]
... upate train_op and hooks in EstimatorSpec and return
```
"""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def logits_dimension(self):
"""Size of the last dimension of the logits `Tensor`.
Typically, logits is of shape `[batch_size, logits_dimension]`.
Returns:
The expected size of the `logits` tensor.
"""
raise NotImplementedError('Calling an abstract method.')
@abc.abstractmethod
def create_loss(self, features, mode, logits, labels):
"""Returns a loss Tensor from provided logits.
This function is designed to be used by framework developers. Almost all
users should use create_estimator_spec(), which calls this internally.
`mode` and `features` are most likely not used, but some Head
implementations may require them.
Args:
features: Input `dict` of `Tensor` objects.
mode: Estimator's `ModeKeys`.
logits: logits `Tensor` to be used for loss construction.
labels: Labels `Tensor`.
Returns:
A LossAndLabels that contains the `Tensor` representing the loss and
possibly processed labels (e.g. vocabulary lookup, shape manipulation,
etc.), to be extendable in the future.
"""
raise NotImplementedError('Calling an abstract method.')
@abc.abstractmethod
def create_estimator_spec(
self, features, mode, logits, labels=None, train_op_fn=None):
"""Returns `EstimatorSpec` that a model_fn can return.
Please note that,
+ All args must be passed via name.
Args:
features: Input `dict` of `Tensor` objects.
mode: Estimator's `ModeKeys`.
logits: logits `Tensor` to be used by the head.
labels: Labels `Tensor`, or `dict` of same.
train_op_fn: Function that takes a scalar loss `Tensor` and returns an op
to optimize the model with the loss. This is used in TRAIN mode and
must not be None. None is allowed in other modes. If you want to
optimize loss yourself you can pass `no_op_train_fn` and then use
EstimatorSpec.loss to compute and apply gradients.
Returns:
`EstimatorSpec`.
"""
raise NotImplementedError('Calling an abstract method.')
def _maybe_expand_dim(tensor):
"""Expand the dim of `tensor` with static rank 1."""
with ops.name_scope(None, 'maybe_expand_dim', (tensor,)):
tensor = sparse_tensor.convert_to_tensor_or_sparse_tensor(tensor)
if isinstance(tensor, sparse_tensor.SparseTensor):
raise ValueError('SparseTensor labels are not supported.')
static_shape = tensor.shape
if static_shape is None:
return tensor
return (array_ops.expand_dims(tensor, -1) if static_shape.ndims == 1
else tensor)
def _check_labels(labels, expected_labels_dimension):
"""Check labels type and shape."""
with ops.name_scope(None, 'labels', (labels,)) as scope:
labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels)
if isinstance(labels, sparse_tensor.SparseTensor):
raise ValueError('SparseTensor labels are not supported.')
labels_shape = array_ops.shape(labels)
err_msg = 'labels shape must be [batch_size, {}]'.format(
expected_labels_dimension)
assert_rank = check_ops.assert_rank(labels, 2, message=err_msg)
with ops.control_dependencies([assert_rank]):
static_shape = labels.shape
if static_shape is not None:
dim1 = static_shape[1]
if (dim1 is not None) and (dim1 != expected_labels_dimension):
raise ValueError(
'labels shape must be [batch_size, labels_dimension], got %s.' %
(static_shape,))
assert_dimension = check_ops.assert_equal(
expected_labels_dimension, labels_shape[1], message=err_msg)
with ops.control_dependencies([assert_dimension]):
return array_ops.identity(labels, name=scope)
def _check_logits(logits, expected_logits_dimension):
"""Check logits type and shape."""
with ops.name_scope(None, 'logits', (logits,)) as scope:
logits = math_ops.to_float(logits)
logits_shape = array_ops.shape(logits)
assert_rank = check_ops.assert_rank(
logits, 2, data=[logits_shape],
message='logits shape must be [batch_size, logits_dimension]')
with ops.control_dependencies([assert_rank]):
static_shape = logits.shape
if static_shape is not None:
dim1 = static_shape[1]
if (dim1 is not None) and (dim1 != expected_logits_dimension):
raise ValueError(
'logits shape must be [batch_size, logits_dimension], got %s.' %
(static_shape,))
assert_dimension = check_ops.assert_equal(
expected_logits_dimension, logits_shape[1], data=[logits_shape],
message='logits shape must be [batch_size, logits_dimension]')
with ops.control_dependencies([assert_dimension]):
return array_ops.identity(logits, name=scope)
def _indicator_labels_mean(labels, weights=None, name=None):
with ops.name_scope(name, 'labels_mean', (labels, weights)) as scope:
labels = math_ops.to_float(labels, name='labels')
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(weights, labels)
return metrics_lib.mean(labels, weights=weights, name=scope)
def _accuracy_baseline(labels_mean):
"""Return accuracy baseline based on labels mean.
This is the best the model could do by always predicting one class.
Args:
labels_mean: Tuple of value and update op.
Returns:
Tuple of value and update op.
"""
with ops.name_scope(None, 'accuracy_baseline', labels_mean):
value, update_op = labels_mean
return (
math_ops.maximum(value, 1. - value, name='value'),
math_ops.maximum(update_op, 1 - update_op, name='update_op'))
def _predictions_mean(predictions, weights=None, name=None):
with ops.name_scope(
name, 'predictions_mean', (predictions, weights)) as scope:
predictions = math_ops.to_float(predictions, name='predictions')
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(weights, predictions)
return metrics_lib.mean(predictions, weights=weights, name=scope)
def _auc(labels, predictions, weights=None, curve='ROC', name=None):
with ops.name_scope(name, 'auc', (predictions, labels, weights)) as scope:
predictions = math_ops.to_float(predictions, name='predictions')
if labels.dtype.base_dtype != dtypes.bool:
logging.warning('Casting %s labels to bool.', labels.dtype)
labels = math_ops.cast(labels, dtypes.bool)
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(weights, predictions)
return metrics_lib.auc(
labels=labels, predictions=predictions, weights=weights, curve=curve,
name=scope)
def _accuracy_at_threshold(labels, predictions, weights, threshold, name=None):
with ops.name_scope(
name, 'accuracy_at_%s' % threshold,
(predictions, labels, weights, threshold)) as scope:
threshold_predictions = math_ops.to_float(
math_ops.greater_equal(predictions, threshold))
return metrics_lib.accuracy(
labels=labels, predictions=threshold_predictions, weights=weights,
name=scope)
def _precision_at_threshold(labels, predictions, weights, threshold, name=None):
with ops.name_scope(
name, 'precision_at_%s' % threshold,
(predictions, labels, weights, threshold)) as scope:
precision_tensor, update_op = metrics_lib.precision_at_thresholds(
labels=labels, predictions=predictions, thresholds=(threshold,),
weights=weights, name=scope)
return array_ops.squeeze(precision_tensor), array_ops.squeeze(update_op)
def _recall_at_threshold(labels, predictions, weights, threshold, name=None):
with ops.name_scope(
name, 'recall_at_%s' % threshold,
(predictions, labels, weights, threshold)) as scope:
precision_tensor, update_op = metrics_lib.recall_at_thresholds(
labels=labels, predictions=predictions, thresholds=(threshold,),
weights=weights, name=scope)
return array_ops.squeeze(precision_tensor), array_ops.squeeze(update_op)
def _multi_class_head_with_softmax_cross_entropy_loss(n_classes,
weight_column=None,
label_vocabulary=None,
head_name=None):
"""Creates a '_Head' for multi class classification.
This head expects to be fed integer labels specifying the class index.
Args:
n_classes: Number of classes, must be greater than 2 (for 2 classes, use
`_BinaryLogisticHeadWithSigmoidCrossEntropyLoss`).
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
label_vocabulary: A list of strings represents possible label values. If it
is not given, that means labels are already encoded as integer within
[0, n_classes). If given, labels must be string type and have any value in
`label_vocabulary`. Also there will be errors if vocabulary is not
provided and labels are string.
head_name: name of the head. If provided, summary and metrics keys will be
suffixed by `"/" + head_name`.
Returns:
An instance of `_Head` for multi class classification.
Raises:
ValueError: if `n_classes`, `metric_class_ids` or `label_keys` is invalid.
"""
if label_vocabulary is not None and not isinstance(label_vocabulary,
(list, tuple)):
raise ValueError('label_vocabulary should be a list. Given type: {}'.format(
type(label_vocabulary)))
return _MultiClassHeadWithSoftmaxCrossEntropyLoss(n_classes, weight_column,
label_vocabulary, head_name)
class _MultiClassHeadWithSoftmaxCrossEntropyLoss(_Head):
"""See `_multi_class_head_with_softmax_cross_entropy_loss`."""
def __init__(self,
n_classes,
weight_column=None,
label_vocabulary=None,
head_name=None):
if (n_classes is None) or (n_classes <= 2):
raise ValueError('n_classes must be > 2: %s.' % n_classes)
self._n_classes = n_classes
self._weight_column = weight_column
self._label_vocabulary = label_vocabulary
self._head_name = head_name
@property
def logits_dimension(self):
return self._n_classes
def _eval_metric_ops(self, labels, probabilities, logits,
class_ids, weights, unweighted_loss):
"""Returns the Eval metric ops."""
with ops.name_scope(
None, 'metrics',
(labels, probabilities, logits, class_ids, weights, unweighted_loss)):
keys = metric_keys.MetricKeys
metric_ops = {
# Estimator already adds a metric for loss.
# TODO(xiejw): Any other metrics?
_summary_key(self._head_name, keys.LOSS_MEAN):
metrics_lib.mean(
unweighted_loss, weights=weights, name=keys.LOSS_MEAN),
_summary_key(self._head_name, keys.ACCURACY):
metrics_lib.accuracy(
labels=labels,
predictions=class_ids,
weights=weights,
name=keys.ACCURACY),
}
return metric_ops
def _label_ids(self, labels):
"""Converts labels to integer id space."""
if self._label_vocabulary is None:
if not labels.dtype.is_integer:
raise ValueError('Labels dtype should be integer '
'Instead got %s.' % labels.dtype)
label_ids = labels
else:
if labels.dtype != dtypes.string:
raise ValueError('Labels dtype should be string if there is a '
'vocabulary. Instead got {}'.format(labels.dtype))
label_ids = lookup_ops.index_table_from_tensor(
vocabulary_list=tuple(self._label_vocabulary),
name='class_id_lookup').lookup(labels)
return _assert_range(label_ids, self._n_classes)
def create_loss(self, features, mode, logits, labels):
"""See `Head`."""
del mode, features # Unused for this head.
label_ids = self._label_ids(_check_labels(_maybe_expand_dim(labels), 1))
unweighted_loss = losses.sparse_softmax_cross_entropy(
labels=label_ids, logits=logits, reduction=losses.Reduction.NONE)
# Restore the squeezed dim, so unweighted_loss matches the weights shape.
return LossAndLabels(
unweighted_loss=array_ops.expand_dims(unweighted_loss, axis=(1,)),
processed_labels=label_ids)
def create_estimator_spec(
self, features, mode, logits, labels=None, train_op_fn=None):
"""See `Head`."""
with ops.name_scope('head'):
logits = _check_logits(logits, self.logits_dimension)
# Predict.
pred_keys = prediction_keys.PredictionKeys
with ops.name_scope(None, 'predictions', (logits,)):
# class_ids's shape is [batch_size]
class_ids = math_ops.argmax(logits, 1, name=pred_keys.CLASS_IDS)
class_ids = array_ops.expand_dims(class_ids, axis=(1,))
if self._label_vocabulary:
table = lookup_ops.index_to_string_table_from_tensor(
vocabulary_list=self._label_vocabulary,
name='class_string_lookup')
classes = table.lookup(class_ids)
else:
classes = string_ops.as_string(class_ids, name='str_classes')
probabilities = nn.softmax(logits, name=pred_keys.PROBABILITIES)
predictions = {
pred_keys.LOGITS: logits,
pred_keys.PROBABILITIES: probabilities,
# Expand to [batch_size, 1]
pred_keys.CLASS_IDS: class_ids,
pred_keys.CLASSES: classes,
}
if mode == model_fn.ModeKeys.PREDICT:
batch_size = array_ops.shape(probabilities)[0]
export_class_list = self._label_vocabulary
if not export_class_list:
export_class_list = string_ops.as_string(
math_ops.range(self._n_classes))
export_output_classes = array_ops.tile(
input=array_ops.expand_dims(input=export_class_list, axis=0),
multiples=[batch_size, 1])
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.PREDICT,
predictions=predictions,
export_outputs={
'':
export_output.ClassificationOutput(
scores=probabilities,
# `ClassificationOutput` requires string classes.
classes=export_output_classes)
})
# Eval.
unweighted_loss, label_ids = self.create_loss(
features=features, mode=mode, logits=logits, labels=labels)
weights = _weights(features, self._weight_column)
training_loss = losses.compute_weighted_loss(
unweighted_loss, weights=weights, reduction=losses.Reduction.SUM)
if mode == model_fn.ModeKeys.EVAL:
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.EVAL,
predictions=predictions,
loss=training_loss,
eval_metric_ops=self._eval_metric_ops(
labels=label_ids,
probabilities=probabilities,
logits=logits,
class_ids=class_ids,
unweighted_loss=unweighted_loss,
weights=weights))
# Train.
if train_op_fn is None:
raise ValueError('train_op_fn can not be None.')
with ops.name_scope(''):
summary.scalar(
_summary_key(self._head_name, metric_keys.MetricKeys.LOSS),
training_loss)
summary.scalar(
_summary_key(self._head_name, metric_keys.MetricKeys.LOSS_MEAN),
losses.compute_weighted_loss(
unweighted_loss, weights=weights,
reduction=losses.Reduction.MEAN))
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.TRAIN,
predictions=predictions,
loss=training_loss,
train_op=train_op_fn(training_loss))
def _binary_logistic_head_with_sigmoid_cross_entropy_loss(
weight_column=None, thresholds=None, label_vocabulary=None, head_name=None):
"""Creates a `Head` for single label binary classification.
This head uses `sigmoid_cross_entropy_with_logits` loss.
This head expects to be fed float labels of shape `(batch_size, 1)`.
Args:
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
thresholds: Iterable of floats in the range `(0, 1)`. For binary
classification metrics such as precision and recall, an eval metric is
generated for each threshold value. This threshold is applied to the
logistic values to determine the binary classification (i.e., above the
threshold is `true`, below is `false`.
label_vocabulary: A list of strings represents possible label values. If it
is not given, that means labels are already encoded within [0, 1]. If
given, labels must be string type and have any value in
`label_vocabulary`. Also there will be errors if vocabulary is not
provided and labels are string.
head_name: name of the head. If provided, summary and metrics keys will be
suffixed by `"/" + head_name`.
Returns:
An instance of `Head` for binary classification.
Raises:
ValueError: if `thresholds` contains a value outside of `(0, 1)`.
"""
thresholds = tuple(thresholds) if thresholds else tuple()
if label_vocabulary is not None and not isinstance(label_vocabulary,
(list, tuple)):
raise ValueError('label_vocabulary should be a list. Given type: {}'.format(
type(label_vocabulary)))
for threshold in thresholds:
if (threshold <= 0.0) or (threshold >= 1.0):
raise ValueError('thresholds not in (0, 1): %s.' % (thresholds,))
return _BinaryLogisticHeadWithSigmoidCrossEntropyLoss(
weight_column=weight_column,
thresholds=thresholds,
label_vocabulary=label_vocabulary,
head_name=head_name)
class _BinaryLogisticHeadWithSigmoidCrossEntropyLoss(_Head):
"""See `_binary_logistic_head_with_sigmoid_cross_entropy_loss`."""
def __init__(self,
weight_column=None,
thresholds=None,
label_vocabulary=None,
head_name=None):
self._weight_column = weight_column
self._thresholds = thresholds
self._label_vocabulary = label_vocabulary
self._head_name = head_name
@property
def logits_dimension(self):
return 1
def _eval_metric_ops(self,
labels,
logits,
logistic,
scores,
class_ids,
unweighted_loss,
weights=None):
with ops.name_scope(None, 'metrics', (labels, logits, logistic, scores,
class_ids, unweighted_loss, weights)):
keys = metric_keys.MetricKeys
labels_mean = _indicator_labels_mean(
labels=labels, weights=weights, name=keys.LABEL_MEAN)
metric_ops = {
# Estimator already adds a metric for loss.
_summary_key(self._head_name, keys.LOSS_MEAN):
metrics_lib.mean(
unweighted_loss, weights=weights, name=keys.LOSS_MEAN),
_summary_key(self._head_name, keys.ACCURACY):
metrics_lib.accuracy(
labels=labels,
predictions=class_ids,
weights=weights,
name=keys.ACCURACY),
_summary_key(self._head_name, keys.PREDICTION_MEAN):
_predictions_mean(
predictions=logistic,
weights=weights,
name=keys.PREDICTION_MEAN),
_summary_key(self._head_name, keys.LABEL_MEAN):
labels_mean,
_summary_key(self._head_name, keys.ACCURACY_BASELINE):
_accuracy_baseline(labels_mean),
_summary_key(self._head_name, keys.AUC):
_auc(
labels=labels,
predictions=logistic,
weights=weights,
name=keys.AUC),
_summary_key(self._head_name, keys.AUC_PR):
_auc(
labels=labels,
predictions=logistic,
weights=weights,
curve='PR',
name=keys.AUC_PR)
}
for threshold in self._thresholds:
accuracy_key = keys.ACCURACY_AT_THRESHOLD % threshold
metric_ops[_summary_key(self._head_name,
accuracy_key)] = _accuracy_at_threshold(
labels=labels,
predictions=logistic,
weights=weights,
threshold=threshold,
name=accuracy_key)
# Precision for positive examples.
precision_key = keys.PRECISION_AT_THRESHOLD % threshold
metric_ops[_summary_key(self._head_name,
precision_key)] = _precision_at_threshold(
labels=labels,
predictions=logistic,
weights=weights,
threshold=threshold,
name=precision_key)
# Recall for positive examples.
recall_key = keys.RECALL_AT_THRESHOLD % threshold
metric_ops[_summary_key(self._head_name,
recall_key)] = _recall_at_threshold(
labels=labels,
predictions=logistic,
weights=weights,
threshold=threshold,
name=recall_key)
return metric_ops
def create_loss(self, features, mode, logits, labels):
"""See `Head`."""
del mode, features # Unused for this head.
labels = _check_labels(_maybe_expand_dim(labels), self.logits_dimension)
if self._label_vocabulary is not None:
labels = lookup_ops.index_table_from_tensor(
vocabulary_list=tuple(self._label_vocabulary),
name='class_id_lookup').lookup(labels)
labels = math_ops.to_float(labels)
labels = _assert_range(labels, 2)
return LossAndLabels(
unweighted_loss=nn.sigmoid_cross_entropy_with_logits(
labels=labels, logits=logits),
processed_labels=labels)
def create_estimator_spec(
self, features, mode, logits, labels=None, train_op_fn=None):
"""See `Head`."""
# Predict.
with ops.name_scope('head'):
with ops.name_scope(None, 'predictions', (logits,)):
pred_keys = prediction_keys.PredictionKeys
logits = _check_logits(logits, self.logits_dimension)
logistic = math_ops.sigmoid(logits, name=pred_keys.LOGISTIC)
two_class_logits = array_ops.concat(
(array_ops.zeros_like(logits), logits), 1, name='two_class_logits')
scores = nn.softmax(two_class_logits, name=pred_keys.PROBABILITIES)
class_ids = array_ops.reshape(
math_ops.argmax(two_class_logits, axis=1), (-1, 1), name='classes')
if self._label_vocabulary:
table = lookup_ops.index_to_string_table_from_tensor(
vocabulary_list=self._label_vocabulary,
name='class_string_lookup')
classes = table.lookup(class_ids)
else:
classes = string_ops.as_string(class_ids, name='str_classes')
predictions = {
pred_keys.LOGITS: logits,
pred_keys.LOGISTIC: logistic,
pred_keys.PROBABILITIES: scores,
pred_keys.CLASS_IDS: class_ids,
pred_keys.CLASSES: classes,
}
if mode == model_fn.ModeKeys.PREDICT:
batch_size = array_ops.shape(logistic)[0]
export_class_list = self._label_vocabulary
if not export_class_list:
export_class_list = string_ops.as_string([0, 1])
export_output_classes = array_ops.tile(
input=array_ops.expand_dims(input=export_class_list, axis=0),
multiples=[batch_size, 1])
classifier_output = export_output.ClassificationOutput(
scores=scores,
# `ClassificationOutput` requires string classes.
classes=export_output_classes)
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.PREDICT,
predictions=predictions,
export_outputs={
'': classifier_output, # to be same as other heads.
'classification': classifier_output, # to be called by name.
_DEFAULT_SERVING_KEY: classifier_output, # default
'regression': export_output.RegressionOutput(value=logistic)
})
# Eval.
unweighted_loss, processed_labels = self.create_loss(
features=features, mode=mode, logits=logits, labels=labels)
weights = _weights(features, self._weight_column)
training_loss = losses.compute_weighted_loss(
unweighted_loss, weights=weights, reduction=losses.Reduction.SUM)
if mode == model_fn.ModeKeys.EVAL:
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.EVAL,
predictions=predictions,
loss=training_loss,
eval_metric_ops=self._eval_metric_ops(
labels=processed_labels,
logits=logits,
logistic=logistic,
scores=scores,
class_ids=class_ids,
unweighted_loss=unweighted_loss,
weights=weights))
# Train.
if train_op_fn is None:
raise ValueError('train_op_fn can not be None.')
with ops.name_scope(''):
summary.scalar(
_summary_key(self._head_name, metric_keys.MetricKeys.LOSS),
training_loss)
summary.scalar(
_summary_key(self._head_name, metric_keys.MetricKeys.LOSS_MEAN),
losses.compute_weighted_loss(
unweighted_loss, weights=weights,
reduction=losses.Reduction.MEAN))
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.TRAIN,
predictions=predictions,
loss=training_loss,
train_op=train_op_fn(training_loss))
def _regression_head_with_mean_squared_error_loss(weight_column=None,
label_dimension=1,
head_name=None):
"""Creates a `_Head` for regression using the mean squared loss.
Args:
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
label_dimension: Number of regression labels per example. This is the size
of the last dimension of the labels `Tensor` (typically, this has shape
`[batch_size, label_dimension]`).
head_name: name of the head. If provided, summary and metrics keys will be
suffixed by `"/" + head_name`.
Returns:
An instance of `_Head` for linear regression.
"""
return _RegressionHeadWithMeanSquaredErrorLoss(
weight_column=weight_column,
label_dimension=label_dimension,
head_name=head_name)
class _RegressionHeadWithMeanSquaredErrorLoss(_Head):
"""`Head` for regression using the mean squared loss."""
def __init__(self, label_dimension, weight_column=None, head_name=None):
"""`Head` for regression."""
if label_dimension < 1:
raise ValueError('Invalid label_dimension %s.' % label_dimension)
self._logits_dimension = label_dimension
self._weight_column = weight_column
self._head_name = head_name
@property
def logits_dimension(self):
return self._logits_dimension
def create_loss(self, features, mode, logits, labels):
"""See `Head`."""
del mode, features # Unused for this head.
labels = _check_labels(
_maybe_expand_dim(math_ops.to_float(labels)), self._logits_dimension)
return LossAndLabels(
unweighted_loss=losses.mean_squared_error(
labels=labels, predictions=logits, reduction=losses.Reduction.NONE),
processed_labels=labels)
def create_estimator_spec(
self, features, mode, logits, labels=None, train_op_fn=None):
"""See `Head`."""
# Predict.
with ops.name_scope('head'):
logits = _check_logits(logits, self._logits_dimension)
predictions = {prediction_keys.PredictionKeys.PREDICTIONS: logits}
if mode == model_fn.ModeKeys.PREDICT:
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.PREDICT,
predictions=predictions,
export_outputs={'': export_output.RegressionOutput(value=logits)})
# Eval.
unweighted_loss, _ = self.create_loss(
features=features, mode=mode, logits=logits, labels=labels)
weights = _weights(features, self._weight_column)
training_loss = losses.compute_weighted_loss(
unweighted_loss, weights=weights, reduction=losses.Reduction.SUM)
if mode == model_fn.ModeKeys.EVAL:
# Estimator already adds a metric for loss.
eval_metric_ops = {
metric_keys.MetricKeys.LOSS_MEAN: metrics_lib.mean(
unweighted_loss, weights=weights)
}
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.EVAL,
predictions=predictions,
loss=training_loss,
eval_metric_ops=eval_metric_ops)
# Train.
if train_op_fn is None:
raise ValueError('train_op_fn can not be None.')
with ops.name_scope(''):
summary.scalar(
_summary_key(self._head_name, metric_keys.MetricKeys.LOSS),
training_loss)
summary.scalar(
_summary_key(self._head_name, metric_keys.MetricKeys.LOSS_MEAN),
losses.compute_weighted_loss(
unweighted_loss, weights=weights,
reduction=losses.Reduction.MEAN))
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.TRAIN,
predictions=predictions,
loss=training_loss,
train_op=train_op_fn(training_loss))
def _assert_range(labels, n_classes):
with ops.name_scope(None, 'assert_range', (labels,)):
assert_less = check_ops.assert_less(
labels,
ops.convert_to_tensor(n_classes, dtype=labels.dtype),
message='Label IDs must < n_classes')
assert_greater = check_ops.assert_non_negative(
labels, message='Label IDs must >= 0')
with ops.control_dependencies((assert_less, assert_greater)):
return array_ops.identity(labels)
def _weights(features, weight_column):
"""Fetches weights from features."""
with ops.name_scope(None, 'weights', values=features.values()):
if weight_column is None:
return 1.
if isinstance(weight_column, six.string_types):
weight_column = feature_column_lib.numeric_column(key=weight_column)
if not isinstance(weight_column, feature_column_lib._NumericColumn): # pylint: disable=protected-access
raise TypeError('Weight column must be either a string or _NumericColumn.'
' Given type: {}.'.format(type(weight_column)))
weights = weight_column._get_dense_tensor( # pylint: disable=protected-access
feature_column_lib._LazyBuilder(features)) # pylint: disable=protected-access
if not (weights.dtype.is_floating or weights.dtype.is_integer):
raise ValueError('Weight column should be castable to float. '
'Given dtype: {}'.format(weights.dtype))
weights = _maybe_expand_dim(math_ops.to_float(weights, name='weights'))
return weights
| apache-2.0 |
newfies-dialer/newfies-dialer | newfies/apirest/api_appointment/view_rule.py | 4 | 1095 | # -*- coding: utf-8 -*-
#
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2015 Star2Billing S.L.
#
# The primary maintainer of this project is
# Arezqui Belaid <[email protected]>
#
from rest_framework import viewsets
from apirest.api_appointment.rule_serializers import RuleSerializer
from rest_framework.permissions import IsAuthenticated
from rest_framework.authentication import BasicAuthentication, SessionAuthentication
from appointment.models.rules import Rule
from apirest.permissions import CustomObjectPermissions
class RuleViewSet(viewsets.ReadOnlyModelViewSet):
"""
API endpoint that allows rule to be viewed or edited.
"""
queryset = Rule.objects.all()
serializer_class = RuleSerializer
authentication = (BasicAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated, CustomObjectPermissions)
| mpl-2.0 |
roth1002/react-native-intro-qnap | node_modules/node-gyp/gyp/pylib/gyp/generator/android.py | 542 | 45270 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Notes:
#
# This generates makefiles suitable for inclusion into the Android build system
# via an Android.mk file. It is based on make.py, the standard makefile
# generator.
#
# The code below generates a separate .mk file for each target, but
# all are sourced by the top-level GypAndroid.mk. This means that all
# variables in .mk-files clobber one another, and furthermore that any
# variables set potentially clash with other Android build system variables.
# Try to avoid setting global variables where possible.
import gyp
import gyp.common
import gyp.generator.make as make # Reuse global functions from make backend.
import os
import re
import subprocess
generator_default_variables = {
'OS': 'android',
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_SUFFIX': '.so',
'INTERMEDIATE_DIR': '$(gyp_intermediate_dir)',
'SHARED_INTERMEDIATE_DIR': '$(gyp_shared_intermediate_dir)',
'PRODUCT_DIR': '$(gyp_shared_intermediate_dir)',
'SHARED_LIB_DIR': '$(builddir)/lib.$(TOOLSET)',
'LIB_DIR': '$(obj).$(TOOLSET)',
'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python.
'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python.
'RULE_INPUT_PATH': '$(RULE_SOURCES)',
'RULE_INPUT_EXT': '$(suffix $<)',
'RULE_INPUT_NAME': '$(notdir $<)',
'CONFIGURATION_NAME': '$(GYP_CONFIGURATION)',
}
# Make supports multiple toolsets
generator_supports_multiple_toolsets = True
# Generator-specific gyp specs.
generator_additional_non_configuration_keys = [
# Boolean to declare that this target does not want its name mangled.
'android_unmangled_name',
# Map of android build system variables to set.
'aosp_build_settings',
]
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
ALL_MODULES_FOOTER = """\
# "gyp_all_modules" is a concatenation of the "gyp_all_modules" targets from
# all the included sub-makefiles. This is just here to clarify.
gyp_all_modules:
"""
header = """\
# This file is generated by gyp; do not edit.
"""
# Map gyp target types to Android module classes.
MODULE_CLASSES = {
'static_library': 'STATIC_LIBRARIES',
'shared_library': 'SHARED_LIBRARIES',
'executable': 'EXECUTABLES',
}
def IsCPPExtension(ext):
return make.COMPILABLE_EXTENSIONS.get(ext) == 'cxx'
def Sourceify(path):
"""Convert a path to its source directory form. The Android backend does not
support options.generator_output, so this function is a noop."""
return path
# Map from qualified target to path to output.
# For Android, the target of these maps is a tuple ('static', 'modulename'),
# ('dynamic', 'modulename'), or ('path', 'some/path') instead of a string,
# since we link by module.
target_outputs = {}
# Map from qualified target to any linkable output. A subset
# of target_outputs. E.g. when mybinary depends on liba, we want to
# include liba in the linker line; when otherbinary depends on
# mybinary, we just want to build mybinary first.
target_link_deps = {}
class AndroidMkWriter(object):
"""AndroidMkWriter packages up the writing of one target-specific Android.mk.
Its only real entry point is Write(), and is mostly used for namespacing.
"""
def __init__(self, android_top_dir):
self.android_top_dir = android_top_dir
def Write(self, qualified_target, relative_target, base_path, output_filename,
spec, configs, part_of_all, write_alias_target, sdk_version):
"""The main entry point: writes a .mk file for a single target.
Arguments:
qualified_target: target we're generating
relative_target: qualified target name relative to the root
base_path: path relative to source root we're building in, used to resolve
target-relative paths
output_filename: output .mk file name to write
spec, configs: gyp info
part_of_all: flag indicating this target is part of 'all'
write_alias_target: flag indicating whether to create short aliases for
this target
sdk_version: what to emit for LOCAL_SDK_VERSION in output
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
self.qualified_target = qualified_target
self.relative_target = relative_target
self.path = base_path
self.target = spec['target_name']
self.type = spec['type']
self.toolset = spec['toolset']
deps, link_deps = self.ComputeDeps(spec)
# Some of the generation below can add extra output, sources, or
# link dependencies. All of the out params of the functions that
# follow use names like extra_foo.
extra_outputs = []
extra_sources = []
self.android_class = MODULE_CLASSES.get(self.type, 'GYP')
self.android_module = self.ComputeAndroidModule(spec)
(self.android_stem, self.android_suffix) = self.ComputeOutputParts(spec)
self.output = self.output_binary = self.ComputeOutput(spec)
# Standard header.
self.WriteLn('include $(CLEAR_VARS)\n')
# Module class and name.
self.WriteLn('LOCAL_MODULE_CLASS := ' + self.android_class)
self.WriteLn('LOCAL_MODULE := ' + self.android_module)
# Only emit LOCAL_MODULE_STEM if it's different to LOCAL_MODULE.
# The library module classes fail if the stem is set. ComputeOutputParts
# makes sure that stem == modulename in these cases.
if self.android_stem != self.android_module:
self.WriteLn('LOCAL_MODULE_STEM := ' + self.android_stem)
self.WriteLn('LOCAL_MODULE_SUFFIX := ' + self.android_suffix)
if self.toolset == 'host':
self.WriteLn('LOCAL_IS_HOST_MODULE := true')
self.WriteLn('LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)')
else:
self.WriteLn('LOCAL_MODULE_TARGET_ARCH := '
'$(TARGET_$(GYP_VAR_PREFIX)ARCH)')
self.WriteLn('LOCAL_SDK_VERSION := %s' % sdk_version)
# Grab output directories; needed for Actions and Rules.
if self.toolset == 'host':
self.WriteLn('gyp_intermediate_dir := '
'$(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))')
else:
self.WriteLn('gyp_intermediate_dir := '
'$(call local-intermediates-dir,,$(GYP_VAR_PREFIX))')
self.WriteLn('gyp_shared_intermediate_dir := '
'$(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))')
self.WriteLn()
# List files this target depends on so that actions/rules/copies/sources
# can depend on the list.
# TODO: doesn't pull in things through transitive link deps; needed?
target_dependencies = [x[1] for x in deps if x[0] == 'path']
self.WriteLn('# Make sure our deps are built first.')
self.WriteList(target_dependencies, 'GYP_TARGET_DEPENDENCIES',
local_pathify=True)
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
self.WriteActions(spec['actions'], extra_sources, extra_outputs)
# Rules must be early like actions.
if 'rules' in spec:
self.WriteRules(spec['rules'], extra_sources, extra_outputs)
if 'copies' in spec:
self.WriteCopies(spec['copies'], extra_outputs)
# GYP generated outputs.
self.WriteList(extra_outputs, 'GYP_GENERATED_OUTPUTS', local_pathify=True)
# Set LOCAL_ADDITIONAL_DEPENDENCIES so that Android's build rules depend
# on both our dependency targets and our generated files.
self.WriteLn('# Make sure our deps and generated files are built first.')
self.WriteLn('LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) '
'$(GYP_GENERATED_OUTPUTS)')
self.WriteLn()
# Sources.
if spec.get('sources', []) or extra_sources:
self.WriteSources(spec, configs, extra_sources)
self.WriteTarget(spec, configs, deps, link_deps, part_of_all,
write_alias_target)
# Update global list of target outputs, used in dependency tracking.
target_outputs[qualified_target] = ('path', self.output_binary)
# Update global list of link dependencies.
if self.type == 'static_library':
target_link_deps[qualified_target] = ('static', self.android_module)
elif self.type == 'shared_library':
target_link_deps[qualified_target] = ('shared', self.android_module)
self.fp.close()
return self.android_module
def WriteActions(self, actions, extra_sources, extra_outputs):
"""Write Makefile code for any 'actions' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
actions (used to make other pieces dependent on these
actions)
"""
for action in actions:
name = make.StringToMakefileVariable('%s_%s' % (self.relative_target,
action['action_name']))
self.WriteLn('### Rules for action "%s":' % action['action_name'])
inputs = action['inputs']
outputs = action['outputs']
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set()
for out in outputs:
if not out.startswith('$'):
print ('WARNING: Action for target "%s" writes output to local path '
'"%s".' % (self.target, out))
dir = os.path.split(out)[0]
if dir:
dirs.add(dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources += outputs
# Prepare the actual command.
command = gyp.common.EncodePOSIXShellList(action['action'])
if 'message' in action:
quiet_cmd = 'Gyp action: %s ($@)' % action['message']
else:
quiet_cmd = 'Gyp action: %s ($@)' % name
if len(dirs) > 0:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
cd_action = 'cd $(gyp_local_path)/%s; ' % self.path
command = cd_action + command
# The makefile rules are all relative to the top dir, but the gyp actions
# are defined relative to their containing dir. This replaces the gyp_*
# variables for the action rule with an absolute version so that the
# output goes in the right place.
# Only write the gyp_* rules for the "primary" output (:1);
# it's superfluous for the "extra outputs", and this avoids accidentally
# writing duplicate dummy rules for those outputs.
main_output = make.QuoteSpaces(self.LocalPathify(outputs[0]))
self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output)
self.WriteLn('%s: gyp_var_prefix := $(GYP_VAR_PREFIX)' % main_output)
self.WriteLn('%s: gyp_intermediate_dir := '
'$(abspath $(gyp_intermediate_dir))' % main_output)
self.WriteLn('%s: gyp_shared_intermediate_dir := '
'$(abspath $(gyp_shared_intermediate_dir))' % main_output)
# Android's envsetup.sh adds a number of directories to the path including
# the built host binary directory. This causes actions/rules invoked by
# gyp to sometimes use these instead of system versions, e.g. bison.
# The built host binaries may not be suitable, and can cause errors.
# So, we remove them from the PATH using the ANDROID_BUILD_PATHS variable
# set by envsetup.
self.WriteLn('%s: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))'
% main_output)
# Don't allow spaces in input/output filenames, but make an exception for
# filenames which start with '$(' since it's okay for there to be spaces
# inside of make function/macro invocations.
for input in inputs:
if not input.startswith('$(') and ' ' in input:
raise gyp.common.GypError(
'Action input filename "%s" in target %s contains a space' %
(input, self.target))
for output in outputs:
if not output.startswith('$(') and ' ' in output:
raise gyp.common.GypError(
'Action output filename "%s" in target %s contains a space' %
(output, self.target))
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES)' %
(main_output, ' '.join(map(self.LocalPathify, inputs))))
self.WriteLn('\t@echo "%s"' % quiet_cmd)
self.WriteLn('\t$(hide)%s\n' % command)
for output in outputs[1:]:
# Make each output depend on the main output, with an empty command
# to force make to notice that the mtime has changed.
self.WriteLn('%s: %s ;' % (self.LocalPathify(output), main_output))
extra_outputs += outputs
self.WriteLn()
self.WriteLn()
def WriteRules(self, rules, extra_sources, extra_outputs):
"""Write Makefile code for any 'rules' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
rules (used to make other pieces dependent on these rules)
"""
if len(rules) == 0:
return
for rule in rules:
if len(rule.get('rule_sources', [])) == 0:
continue
name = make.StringToMakefileVariable('%s_%s' % (self.relative_target,
rule['rule_name']))
self.WriteLn('\n### Generated for rule "%s":' % name)
self.WriteLn('# "%s":' % rule)
inputs = rule.get('inputs')
for rule_source in rule.get('rule_sources', []):
(rule_source_dirname, rule_source_basename) = os.path.split(rule_source)
(rule_source_root, rule_source_ext) = \
os.path.splitext(rule_source_basename)
outputs = [self.ExpandInputRoot(out, rule_source_root,
rule_source_dirname)
for out in rule['outputs']]
dirs = set()
for out in outputs:
if not out.startswith('$'):
print ('WARNING: Rule for target %s writes output to local path %s'
% (self.target, out))
dir = os.path.dirname(out)
if dir:
dirs.add(dir)
extra_outputs += outputs
if int(rule.get('process_outputs_as_sources', False)):
extra_sources.extend(outputs)
components = []
for component in rule['action']:
component = self.ExpandInputRoot(component, rule_source_root,
rule_source_dirname)
if '$(RULE_SOURCES)' in component:
component = component.replace('$(RULE_SOURCES)',
rule_source)
components.append(component)
command = gyp.common.EncodePOSIXShellList(components)
cd_action = 'cd $(gyp_local_path)/%s; ' % self.path
command = cd_action + command
if dirs:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
# We set up a rule to build the first output, and then set up
# a rule for each additional output to depend on the first.
outputs = map(self.LocalPathify, outputs)
main_output = outputs[0]
self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output)
self.WriteLn('%s: gyp_var_prefix := $(GYP_VAR_PREFIX)' % main_output)
self.WriteLn('%s: gyp_intermediate_dir := '
'$(abspath $(gyp_intermediate_dir))' % main_output)
self.WriteLn('%s: gyp_shared_intermediate_dir := '
'$(abspath $(gyp_shared_intermediate_dir))' % main_output)
# See explanation in WriteActions.
self.WriteLn('%s: export PATH := '
'$(subst $(ANDROID_BUILD_PATHS),,$(PATH))' % main_output)
main_output_deps = self.LocalPathify(rule_source)
if inputs:
main_output_deps += ' '
main_output_deps += ' '.join([self.LocalPathify(f) for f in inputs])
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES)' %
(main_output, main_output_deps))
self.WriteLn('\t%s\n' % command)
for output in outputs[1:]:
# Make each output depend on the main output, with an empty command
# to force make to notice that the mtime has changed.
self.WriteLn('%s: %s ;' % (output, main_output))
self.WriteLn()
self.WriteLn()
def WriteCopies(self, copies, extra_outputs):
"""Write Makefile code for any 'copies' from the gyp input.
extra_outputs: a list that will be filled in with any outputs of this action
(used to make other pieces dependent on this action)
"""
self.WriteLn('### Generated for copy rule.')
variable = make.StringToMakefileVariable(self.relative_target + '_copies')
outputs = []
for copy in copies:
for path in copy['files']:
# The Android build system does not allow generation of files into the
# source tree. The destination should start with a variable, which will
# typically be $(gyp_intermediate_dir) or
# $(gyp_shared_intermediate_dir). Note that we can't use an assertion
# because some of the gyp tests depend on this.
if not copy['destination'].startswith('$'):
print ('WARNING: Copy rule for target %s writes output to '
'local path %s' % (self.target, copy['destination']))
# LocalPathify() calls normpath, stripping trailing slashes.
path = Sourceify(self.LocalPathify(path))
filename = os.path.split(path)[1]
output = Sourceify(self.LocalPathify(os.path.join(copy['destination'],
filename)))
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES) | $(ACP)' %
(output, path))
self.WriteLn('\t@echo Copying: $@')
self.WriteLn('\t$(hide) mkdir -p $(dir $@)')
self.WriteLn('\t$(hide) $(ACP) -rpf $< $@')
self.WriteLn()
outputs.append(output)
self.WriteLn('%s = %s' % (variable,
' '.join(map(make.QuoteSpaces, outputs))))
extra_outputs.append('$(%s)' % variable)
self.WriteLn()
def WriteSourceFlags(self, spec, configs):
"""Write out the flags and include paths used to compile source files for
the current target.
Args:
spec, configs: input from gyp.
"""
for configname, config in sorted(configs.iteritems()):
extracted_includes = []
self.WriteLn('\n# Flags passed to both C and C++ files.')
cflags, includes_from_cflags = self.ExtractIncludesFromCFlags(
config.get('cflags', []) + config.get('cflags_c', []))
extracted_includes.extend(includes_from_cflags)
self.WriteList(cflags, 'MY_CFLAGS_%s' % configname)
self.WriteList(config.get('defines'), 'MY_DEFS_%s' % configname,
prefix='-D', quoter=make.EscapeCppDefine)
self.WriteLn('\n# Include paths placed before CFLAGS/CPPFLAGS')
includes = list(config.get('include_dirs', []))
includes.extend(extracted_includes)
includes = map(Sourceify, map(self.LocalPathify, includes))
includes = self.NormalizeIncludePaths(includes)
self.WriteList(includes, 'LOCAL_C_INCLUDES_%s' % configname)
self.WriteLn('\n# Flags passed to only C++ (and not C) files.')
self.WriteList(config.get('cflags_cc'), 'LOCAL_CPPFLAGS_%s' % configname)
self.WriteLn('\nLOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) '
'$(MY_DEFS_$(GYP_CONFIGURATION))')
# Undefine ANDROID for host modules
# TODO: the source code should not use macro ANDROID to tell if it's host
# or target module.
if self.toolset == 'host':
self.WriteLn('# Undefine ANDROID for host modules')
self.WriteLn('LOCAL_CFLAGS += -UANDROID')
self.WriteLn('LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) '
'$(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))')
self.WriteLn('LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))')
# Android uses separate flags for assembly file invocations, but gyp expects
# the same CFLAGS to be applied:
self.WriteLn('LOCAL_ASFLAGS := $(LOCAL_CFLAGS)')
def WriteSources(self, spec, configs, extra_sources):
"""Write Makefile code for any 'sources' from the gyp input.
These are source files necessary to build the current target.
We need to handle shared_intermediate directory source files as
a special case by copying them to the intermediate directory and
treating them as a genereated sources. Otherwise the Android build
rules won't pick them up.
Args:
spec, configs: input from gyp.
extra_sources: Sources generated from Actions or Rules.
"""
sources = filter(make.Compilable, spec.get('sources', []))
generated_not_sources = [x for x in extra_sources if not make.Compilable(x)]
extra_sources = filter(make.Compilable, extra_sources)
# Determine and output the C++ extension used by these sources.
# We simply find the first C++ file and use that extension.
all_sources = sources + extra_sources
local_cpp_extension = '.cpp'
for source in all_sources:
(root, ext) = os.path.splitext(source)
if IsCPPExtension(ext):
local_cpp_extension = ext
break
if local_cpp_extension != '.cpp':
self.WriteLn('LOCAL_CPP_EXTENSION := %s' % local_cpp_extension)
# We need to move any non-generated sources that are coming from the
# shared intermediate directory out of LOCAL_SRC_FILES and put them
# into LOCAL_GENERATED_SOURCES. We also need to move over any C++ files
# that don't match our local_cpp_extension, since Android will only
# generate Makefile rules for a single LOCAL_CPP_EXTENSION.
local_files = []
for source in sources:
(root, ext) = os.path.splitext(source)
if '$(gyp_shared_intermediate_dir)' in source:
extra_sources.append(source)
elif '$(gyp_intermediate_dir)' in source:
extra_sources.append(source)
elif IsCPPExtension(ext) and ext != local_cpp_extension:
extra_sources.append(source)
else:
local_files.append(os.path.normpath(os.path.join(self.path, source)))
# For any generated source, if it is coming from the shared intermediate
# directory then we add a Make rule to copy them to the local intermediate
# directory first. This is because the Android LOCAL_GENERATED_SOURCES
# must be in the local module intermediate directory for the compile rules
# to work properly. If the file has the wrong C++ extension, then we add
# a rule to copy that to intermediates and use the new version.
final_generated_sources = []
# If a source file gets copied, we still need to add the orginal source
# directory as header search path, for GCC searches headers in the
# directory that contains the source file by default.
origin_src_dirs = []
for source in extra_sources:
local_file = source
if not '$(gyp_intermediate_dir)/' in local_file:
basename = os.path.basename(local_file)
local_file = '$(gyp_intermediate_dir)/' + basename
(root, ext) = os.path.splitext(local_file)
if IsCPPExtension(ext) and ext != local_cpp_extension:
local_file = root + local_cpp_extension
if local_file != source:
self.WriteLn('%s: %s' % (local_file, self.LocalPathify(source)))
self.WriteLn('\tmkdir -p $(@D); cp $< $@')
origin_src_dirs.append(os.path.dirname(source))
final_generated_sources.append(local_file)
# We add back in all of the non-compilable stuff to make sure that the
# make rules have dependencies on them.
final_generated_sources.extend(generated_not_sources)
self.WriteList(final_generated_sources, 'LOCAL_GENERATED_SOURCES')
origin_src_dirs = gyp.common.uniquer(origin_src_dirs)
origin_src_dirs = map(Sourceify, map(self.LocalPathify, origin_src_dirs))
self.WriteList(origin_src_dirs, 'GYP_COPIED_SOURCE_ORIGIN_DIRS')
self.WriteList(local_files, 'LOCAL_SRC_FILES')
# Write out the flags used to compile the source; this must be done last
# so that GYP_COPIED_SOURCE_ORIGIN_DIRS can be used as an include path.
self.WriteSourceFlags(spec, configs)
def ComputeAndroidModule(self, spec):
"""Return the Android module name used for a gyp spec.
We use the complete qualified target name to avoid collisions between
duplicate targets in different directories. We also add a suffix to
distinguish gyp-generated module names.
"""
if int(spec.get('android_unmangled_name', 0)):
assert self.type != 'shared_library' or self.target.startswith('lib')
return self.target
if self.type == 'shared_library':
# For reasons of convention, the Android build system requires that all
# shared library modules are named 'libfoo' when generating -l flags.
prefix = 'lib_'
else:
prefix = ''
if spec['toolset'] == 'host':
suffix = '_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp'
else:
suffix = '_gyp'
if self.path:
middle = make.StringToMakefileVariable('%s_%s' % (self.path, self.target))
else:
middle = make.StringToMakefileVariable(self.target)
return ''.join([prefix, middle, suffix])
def ComputeOutputParts(self, spec):
"""Return the 'output basename' of a gyp spec, split into filename + ext.
Android libraries must be named the same thing as their module name,
otherwise the linker can't find them, so product_name and so on must be
ignored if we are building a library, and the "lib" prepending is
not done for Android.
"""
assert self.type != 'loadable_module' # TODO: not supported?
target = spec['target_name']
target_prefix = ''
target_ext = ''
if self.type == 'static_library':
target = self.ComputeAndroidModule(spec)
target_ext = '.a'
elif self.type == 'shared_library':
target = self.ComputeAndroidModule(spec)
target_ext = '.so'
elif self.type == 'none':
target_ext = '.stamp'
elif self.type != 'executable':
print ("ERROR: What output file should be generated?",
"type", self.type, "target", target)
if self.type != 'static_library' and self.type != 'shared_library':
target_prefix = spec.get('product_prefix', target_prefix)
target = spec.get('product_name', target)
product_ext = spec.get('product_extension')
if product_ext:
target_ext = '.' + product_ext
target_stem = target_prefix + target
return (target_stem, target_ext)
def ComputeOutputBasename(self, spec):
"""Return the 'output basename' of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'libfoobar.so'
"""
return ''.join(self.ComputeOutputParts(spec))
def ComputeOutput(self, spec):
"""Return the 'output' (full output path) of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'$(obj)/baz/libfoobar.so'
"""
if self.type == 'executable':
# We install host executables into shared_intermediate_dir so they can be
# run by gyp rules that refer to PRODUCT_DIR.
path = '$(gyp_shared_intermediate_dir)'
elif self.type == 'shared_library':
if self.toolset == 'host':
path = '$($(GYP_HOST_VAR_PREFIX)HOST_OUT_INTERMEDIATE_LIBRARIES)'
else:
path = '$($(GYP_VAR_PREFIX)TARGET_OUT_INTERMEDIATE_LIBRARIES)'
else:
# Other targets just get built into their intermediate dir.
if self.toolset == 'host':
path = ('$(call intermediates-dir-for,%s,%s,true,,'
'$(GYP_HOST_VAR_PREFIX))' % (self.android_class,
self.android_module))
else:
path = ('$(call intermediates-dir-for,%s,%s,,,$(GYP_VAR_PREFIX))'
% (self.android_class, self.android_module))
assert spec.get('product_dir') is None # TODO: not supported?
return os.path.join(path, self.ComputeOutputBasename(spec))
def NormalizeIncludePaths(self, include_paths):
""" Normalize include_paths.
Convert absolute paths to relative to the Android top directory.
Args:
include_paths: A list of unprocessed include paths.
Returns:
A list of normalized include paths.
"""
normalized = []
for path in include_paths:
if path[0] == '/':
path = gyp.common.RelativePath(path, self.android_top_dir)
normalized.append(path)
return normalized
def ExtractIncludesFromCFlags(self, cflags):
"""Extract includes "-I..." out from cflags
Args:
cflags: A list of compiler flags, which may be mixed with "-I.."
Returns:
A tuple of lists: (clean_clfags, include_paths). "-I.." is trimmed.
"""
clean_cflags = []
include_paths = []
for flag in cflags:
if flag.startswith('-I'):
include_paths.append(flag[2:])
else:
clean_cflags.append(flag)
return (clean_cflags, include_paths)
def FilterLibraries(self, libraries):
"""Filter the 'libraries' key to separate things that shouldn't be ldflags.
Library entries that look like filenames should be converted to android
module names instead of being passed to the linker as flags.
Args:
libraries: the value of spec.get('libraries')
Returns:
A tuple (static_lib_modules, dynamic_lib_modules, ldflags)
"""
static_lib_modules = []
dynamic_lib_modules = []
ldflags = []
for libs in libraries:
# Libs can have multiple words.
for lib in libs.split():
# Filter the system libraries, which are added by default by the Android
# build system.
if (lib == '-lc' or lib == '-lstdc++' or lib == '-lm' or
lib.endswith('libgcc.a')):
continue
match = re.search(r'([^/]+)\.a$', lib)
if match:
static_lib_modules.append(match.group(1))
continue
match = re.search(r'([^/]+)\.so$', lib)
if match:
dynamic_lib_modules.append(match.group(1))
continue
if lib.startswith('-l'):
ldflags.append(lib)
return (static_lib_modules, dynamic_lib_modules, ldflags)
def ComputeDeps(self, spec):
"""Compute the dependencies of a gyp spec.
Returns a tuple (deps, link_deps), where each is a list of
filenames that will need to be put in front of make for either
building (deps) or linking (link_deps).
"""
deps = []
link_deps = []
if 'dependencies' in spec:
deps.extend([target_outputs[dep] for dep in spec['dependencies']
if target_outputs[dep]])
for dep in spec['dependencies']:
if dep in target_link_deps:
link_deps.append(target_link_deps[dep])
deps.extend(link_deps)
return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps))
def WriteTargetFlags(self, spec, configs, link_deps):
"""Write Makefile code to specify the link flags and library dependencies.
spec, configs: input from gyp.
link_deps: link dependency list; see ComputeDeps()
"""
# Libraries (i.e. -lfoo)
# These must be included even for static libraries as some of them provide
# implicit include paths through the build system.
libraries = gyp.common.uniquer(spec.get('libraries', []))
static_libs, dynamic_libs, ldflags_libs = self.FilterLibraries(libraries)
if self.type != 'static_library':
for configname, config in sorted(configs.iteritems()):
ldflags = list(config.get('ldflags', []))
self.WriteLn('')
self.WriteList(ldflags, 'LOCAL_LDFLAGS_%s' % configname)
self.WriteList(ldflags_libs, 'LOCAL_GYP_LIBS')
self.WriteLn('LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION)) '
'$(LOCAL_GYP_LIBS)')
# Link dependencies (i.e. other gyp targets this target depends on)
# These need not be included for static libraries as within the gyp build
# we do not use the implicit include path mechanism.
if self.type != 'static_library':
static_link_deps = [x[1] for x in link_deps if x[0] == 'static']
shared_link_deps = [x[1] for x in link_deps if x[0] == 'shared']
else:
static_link_deps = []
shared_link_deps = []
# Only write the lists if they are non-empty.
if static_libs or static_link_deps:
self.WriteLn('')
self.WriteList(static_libs + static_link_deps,
'LOCAL_STATIC_LIBRARIES')
self.WriteLn('# Enable grouping to fix circular references')
self.WriteLn('LOCAL_GROUP_STATIC_LIBRARIES := true')
if dynamic_libs or shared_link_deps:
self.WriteLn('')
self.WriteList(dynamic_libs + shared_link_deps,
'LOCAL_SHARED_LIBRARIES')
def WriteTarget(self, spec, configs, deps, link_deps, part_of_all,
write_alias_target):
"""Write Makefile code to produce the final target of the gyp spec.
spec, configs: input from gyp.
deps, link_deps: dependency lists; see ComputeDeps()
part_of_all: flag indicating this target is part of 'all'
write_alias_target: flag indicating whether to create short aliases for this
target
"""
self.WriteLn('### Rules for final target.')
if self.type != 'none':
self.WriteTargetFlags(spec, configs, link_deps)
settings = spec.get('aosp_build_settings', {})
if settings:
self.WriteLn('### Set directly by aosp_build_settings.')
for k, v in settings.iteritems():
if isinstance(v, list):
self.WriteList(v, k)
else:
self.WriteLn('%s := %s' % (k, make.QuoteIfNecessary(v)))
self.WriteLn('')
# Add to the set of targets which represent the gyp 'all' target. We use the
# name 'gyp_all_modules' as the Android build system doesn't allow the use
# of the Make target 'all' and because 'all_modules' is the equivalent of
# the Make target 'all' on Android.
if part_of_all and write_alias_target:
self.WriteLn('# Add target alias to "gyp_all_modules" target.')
self.WriteLn('.PHONY: gyp_all_modules')
self.WriteLn('gyp_all_modules: %s' % self.android_module)
self.WriteLn('')
# Add an alias from the gyp target name to the Android module name. This
# simplifies manual builds of the target, and is required by the test
# framework.
if self.target != self.android_module and write_alias_target:
self.WriteLn('# Alias gyp target name.')
self.WriteLn('.PHONY: %s' % self.target)
self.WriteLn('%s: %s' % (self.target, self.android_module))
self.WriteLn('')
# Add the command to trigger build of the target type depending
# on the toolset. Ex: BUILD_STATIC_LIBRARY vs. BUILD_HOST_STATIC_LIBRARY
# NOTE: This has to come last!
modifier = ''
if self.toolset == 'host':
modifier = 'HOST_'
if self.type == 'static_library':
self.WriteLn('include $(BUILD_%sSTATIC_LIBRARY)' % modifier)
elif self.type == 'shared_library':
self.WriteLn('LOCAL_PRELINK_MODULE := false')
self.WriteLn('include $(BUILD_%sSHARED_LIBRARY)' % modifier)
elif self.type == 'executable':
# Executables are for build and test purposes only, so they're installed
# to a directory that doesn't get included in the system image.
self.WriteLn('LOCAL_MODULE_PATH := $(gyp_shared_intermediate_dir)')
self.WriteLn('include $(BUILD_%sEXECUTABLE)' % modifier)
else:
self.WriteLn('LOCAL_MODULE_PATH := $(PRODUCT_OUT)/gyp_stamp')
self.WriteLn('LOCAL_UNINSTALLABLE_MODULE := true')
if self.toolset == 'target':
self.WriteLn('LOCAL_2ND_ARCH_VAR_PREFIX := $(GYP_VAR_PREFIX)')
else:
self.WriteLn('LOCAL_2ND_ARCH_VAR_PREFIX := $(GYP_HOST_VAR_PREFIX)')
self.WriteLn()
self.WriteLn('include $(BUILD_SYSTEM)/base_rules.mk')
self.WriteLn()
self.WriteLn('$(LOCAL_BUILT_MODULE): $(LOCAL_ADDITIONAL_DEPENDENCIES)')
self.WriteLn('\t$(hide) echo "Gyp timestamp: $@"')
self.WriteLn('\t$(hide) mkdir -p $(dir $@)')
self.WriteLn('\t$(hide) touch $@')
self.WriteLn()
self.WriteLn('LOCAL_2ND_ARCH_VAR_PREFIX :=')
def WriteList(self, value_list, variable=None, prefix='',
quoter=make.QuoteIfNecessary, local_pathify=False):
"""Write a variable definition that is a list of values.
E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out
foo = blaha blahb
but in a pretty-printed style.
"""
values = ''
if value_list:
value_list = [quoter(prefix + l) for l in value_list]
if local_pathify:
value_list = [self.LocalPathify(l) for l in value_list]
values = ' \\\n\t' + ' \\\n\t'.join(value_list)
self.fp.write('%s :=%s\n\n' % (variable, values))
def WriteLn(self, text=''):
self.fp.write(text + '\n')
def LocalPathify(self, path):
"""Convert a subdirectory-relative path into a normalized path which starts
with the make variable $(LOCAL_PATH) (i.e. the top of the project tree).
Absolute paths, or paths that contain variables, are just normalized."""
if '$(' in path or os.path.isabs(path):
# path is not a file in the project tree in this case, but calling
# normpath is still important for trimming trailing slashes.
return os.path.normpath(path)
local_path = os.path.join('$(LOCAL_PATH)', self.path, path)
local_path = os.path.normpath(local_path)
# Check that normalizing the path didn't ../ itself out of $(LOCAL_PATH)
# - i.e. that the resulting path is still inside the project tree. The
# path may legitimately have ended up containing just $(LOCAL_PATH), though,
# so we don't look for a slash.
assert local_path.startswith('$(LOCAL_PATH)'), (
'Path %s attempts to escape from gyp path %s !)' % (path, self.path))
return local_path
def ExpandInputRoot(self, template, expansion, dirname):
if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template:
return template
path = template % {
'INPUT_ROOT': expansion,
'INPUT_DIRNAME': dirname,
}
return os.path.normpath(path)
def PerformBuild(data, configurations, params):
# The android backend only supports the default configuration.
options = params['options']
makefile = os.path.abspath(os.path.join(options.toplevel_dir,
'GypAndroid.mk'))
env = dict(os.environ)
env['ONE_SHOT_MAKEFILE'] = makefile
arguments = ['make', '-C', os.environ['ANDROID_BUILD_TOP'], 'gyp_all_modules']
print 'Building: %s' % arguments
subprocess.check_call(arguments, env=env)
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
generator_flags = params.get('generator_flags', {})
builddir_name = generator_flags.get('output_dir', 'out')
limit_to_target_all = generator_flags.get('limit_to_target_all', False)
write_alias_targets = generator_flags.get('write_alias_targets', True)
sdk_version = generator_flags.get('aosp_sdk_version', 19)
android_top_dir = os.environ.get('ANDROID_BUILD_TOP')
assert android_top_dir, '$ANDROID_BUILD_TOP not set; you need to run lunch.'
def CalculateMakefilePath(build_file, base_name):
"""Determine where to write a Makefile for a given gyp file."""
# Paths in gyp files are relative to the .gyp file, but we want
# paths relative to the source root for the master makefile. Grab
# the path of the .gyp file as the base to relativize against.
# E.g. "foo/bar" when we're constructing targets for "foo/bar/baz.gyp".
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.depth)
# We write the file in the base_path directory.
output_file = os.path.join(options.depth, base_path, base_name)
assert not options.generator_output, (
'The Android backend does not support options.generator_output.')
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.toplevel_dir)
return base_path, output_file
# TODO: search for the first non-'Default' target. This can go
# away when we add verification that all targets have the
# necessary configurations.
default_configuration = None
toolsets = set([target_dicts[target]['toolset'] for target in target_list])
for target in target_list:
spec = target_dicts[target]
if spec['default_configuration'] != 'Default':
default_configuration = spec['default_configuration']
break
if not default_configuration:
default_configuration = 'Default'
srcdir = '.'
makefile_name = 'GypAndroid' + options.suffix + '.mk'
makefile_path = os.path.join(options.toplevel_dir, makefile_name)
assert not options.generator_output, (
'The Android backend does not support options.generator_output.')
gyp.common.EnsureDirExists(makefile_path)
root_makefile = open(makefile_path, 'w')
root_makefile.write(header)
# We set LOCAL_PATH just once, here, to the top of the project tree. This
# allows all the other paths we use to be relative to the Android.mk file,
# as the Android build system expects.
root_makefile.write('\nLOCAL_PATH := $(call my-dir)\n')
# Find the list of targets that derive from the gyp file(s) being built.
needed_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list, target_dicts, build_file):
needed_targets.add(target)
build_files = set()
include_list = set()
android_modules = {}
for qualified_target in target_list:
build_file, target, toolset = gyp.common.ParseQualifiedTarget(
qualified_target)
relative_build_file = gyp.common.RelativePath(build_file,
options.toplevel_dir)
build_files.add(relative_build_file)
included_files = data[build_file]['included_files']
for included_file in included_files:
# The included_files entries are relative to the dir of the build file
# that included them, so we have to undo that and then make them relative
# to the root dir.
relative_include_file = gyp.common.RelativePath(
gyp.common.UnrelativePath(included_file, build_file),
options.toplevel_dir)
abs_include_file = os.path.abspath(relative_include_file)
# If the include file is from the ~/.gyp dir, we should use absolute path
# so that relocating the src dir doesn't break the path.
if (params['home_dot_gyp'] and
abs_include_file.startswith(params['home_dot_gyp'])):
build_files.add(abs_include_file)
else:
build_files.add(relative_include_file)
base_path, output_file = CalculateMakefilePath(build_file,
target + '.' + toolset + options.suffix + '.mk')
spec = target_dicts[qualified_target]
configs = spec['configurations']
part_of_all = qualified_target in needed_targets
if limit_to_target_all and not part_of_all:
continue
relative_target = gyp.common.QualifiedTarget(relative_build_file, target,
toolset)
writer = AndroidMkWriter(android_top_dir)
android_module = writer.Write(qualified_target, relative_target, base_path,
output_file, spec, configs,
part_of_all=part_of_all,
write_alias_target=write_alias_targets,
sdk_version=sdk_version)
if android_module in android_modules:
print ('ERROR: Android module names must be unique. The following '
'targets both generate Android module name %s.\n %s\n %s' %
(android_module, android_modules[android_module],
qualified_target))
return
android_modules[android_module] = qualified_target
# Our root_makefile lives at the source root. Compute the relative path
# from there to the output_file for including.
mkfile_rel_path = gyp.common.RelativePath(output_file,
os.path.dirname(makefile_path))
include_list.add(mkfile_rel_path)
root_makefile.write('GYP_CONFIGURATION ?= %s\n' % default_configuration)
root_makefile.write('GYP_VAR_PREFIX ?=\n')
root_makefile.write('GYP_HOST_VAR_PREFIX ?=\n')
root_makefile.write('GYP_HOST_MULTILIB ?=\n')
# Write out the sorted list of includes.
root_makefile.write('\n')
for include_file in sorted(include_list):
root_makefile.write('include $(LOCAL_PATH)/' + include_file + '\n')
root_makefile.write('\n')
if write_alias_targets:
root_makefile.write(ALL_MODULES_FOOTER)
root_makefile.close()
| mit |
gvlproject/tools-iuc | tools/query_tabular/query_db.py | 7 | 1966 | #!/usr/bin/env python
from __future__ import print_function
import re
import sqlite3 as sqlite
import sys
TABLE_QUERY = \
"""
SELECT name, sql
FROM sqlite_master
WHERE type='table'
ORDER BY name
"""
def regex_match(expr, item):
return re.match(expr, item) is not None
def regex_search(expr, item):
return re.search(expr, item) is not None
def regex_sub(expr, replace, item):
return re.sub(expr, replace, item)
def get_connection(sqlitedb_path, addfunctions=True):
conn = sqlite.connect(sqlitedb_path)
if addfunctions:
conn.create_function("re_match", 2, regex_match)
conn.create_function("re_search", 2, regex_search)
conn.create_function("re_sub", 3, regex_sub)
return conn
def describe_tables(conn, outputFile):
try:
c = conn.cursor()
tables_query = TABLE_QUERY
rslt = c.execute(tables_query).fetchall()
for table, sql in rslt:
print("Table %s:" % table, file=outputFile)
try:
col_query = 'SELECT * FROM %s LIMIT 0' % table
cur = conn.cursor().execute(col_query)
cols = [col[0] for col in cur.description]
print(" Columns: %s" % cols, file=outputFile)
except Exception as exc:
print("Warning: %s" % exc, file=sys.stderr)
except Exception as e:
exit('Error: %s' % (e))
exit(0)
def run_query(conn, query, outputFile, no_header=False, comment_char='#'):
cur = conn.cursor()
results = cur.execute(query)
if outputFile is not None:
if not no_header:
outputFile.write("%s%s\n" % (comment_char, '\t'.join(
[str(col[0]) for col in cur.description])))
for i, row in enumerate(results):
outputFile.write("%s\n" % '\t'.join(
[str(val) if val is not None else '' for val in row]))
else:
conn.commit()
return results
| mit |
nexiles/odoo | odoo.py | 257 | 5618 | #!/usr/bin/env python
#----------------------------------------------------------
# odoo cli
#
# To install your odoo development environement type:
#
# wget -O- https://raw.githubusercontent.com/odoo/odoo/8.0/odoo.py | python
#
# The setup_* subcommands used to boostrap odoo are defined here inline and may
# only depends on the python 2.7 stdlib
#
# The rest of subcommands are defined in odoo/cli or in <module>/cli by
# subclassing the Command object
#
#----------------------------------------------------------
import os
import re
import sys
import subprocess
GIT_HOOKS_PRE_PUSH = """
#!/usr/bin/env python2
import re
import sys
if re.search('github.com[:/]odoo/odoo.git$', sys.argv[2]):
print "Pushing to /odoo/odoo.git is forbidden, please push to odoo-dev, use --no-verify to override"
sys.exit(1)
"""
def printf(f,*l):
print "odoo:" + f % l
def run(*l):
if isinstance(l[0], list):
l = l[0]
printf("running %s", " ".join(l))
subprocess.check_call(l)
def git_locate():
# Locate git dir
# TODO add support for os.environ.get('GIT_DIR')
# check for an odoo child
if os.path.isfile('odoo/.git/config'):
os.chdir('odoo')
path = os.getcwd()
while path != os.path.abspath(os.sep):
gitconfig_path = os.path.join(path, '.git/config')
if os.path.isfile(gitconfig_path):
release_py = os.path.join(path, 'openerp/release.py')
if os.path.isfile(release_py):
break
path = os.path.dirname(path)
if path == os.path.abspath(os.sep):
path = None
return path
def cmd_setup_git():
git_dir = git_locate()
if git_dir:
printf('git repo found at %s',git_dir)
else:
run("git", "init", "odoo")
os.chdir('odoo')
git_dir = os.getcwd()
if git_dir:
# push sane config for git < 2.0, and hooks
#run('git','config','push.default','simple')
# alias
run('git','config','alias.st','status')
# merge bzr style
run('git','config','merge.commit','no')
# pull let me choose between merge or rebase only works in git > 2.0, use an alias for 1
run('git','config','pull.ff','only')
run('git','config','alias.pl','pull --ff-only')
pre_push_path = os.path.join(git_dir, '.git/hooks/pre-push')
open(pre_push_path,'w').write(GIT_HOOKS_PRE_PUSH.strip())
os.chmod(pre_push_path, 0755)
# setup odoo remote
run('git','config','remote.odoo.url','https://github.com/odoo/odoo.git')
run('git','config','remote.odoo.pushurl','[email protected]:odoo/odoo.git')
run('git','config','--add','remote.odoo.fetch','dummy')
run('git','config','--unset-all','remote.odoo.fetch')
run('git','config','--add','remote.odoo.fetch','+refs/heads/*:refs/remotes/odoo/*')
# setup odoo-dev remote
run('git','config','remote.odoo-dev.url','https://github.com/odoo-dev/odoo.git')
run('git','config','remote.odoo-dev.pushurl','[email protected]:odoo-dev/odoo.git')
run('git','remote','update')
# setup 8.0 branch
run('git','config','branch.8.0.remote','odoo')
run('git','config','branch.8.0.merge','refs/heads/8.0')
run('git','checkout','8.0')
else:
printf('no git repo found')
def cmd_setup_git_dev():
git_dir = git_locate()
if git_dir:
# setup odoo-dev remote
run('git','config','--add','remote.odoo-dev.fetch','dummy')
run('git','config','--unset-all','remote.odoo-dev.fetch')
run('git','config','--add','remote.odoo-dev.fetch','+refs/heads/*:refs/remotes/odoo-dev/*')
run('git','config','--add','remote.odoo-dev.fetch','+refs/pull/*:refs/remotes/odoo-dev/pull/*')
run('git','remote','update')
def cmd_setup_git_review():
git_dir = git_locate()
if git_dir:
# setup odoo-dev remote
run('git','config','--add','remote.odoo.fetch','dummy')
run('git','config','--unset-all','remote.odoo.fetch')
run('git','config','--add','remote.odoo.fetch','+refs/heads/*:refs/remotes/odoo/*')
run('git','config','--add','remote.odoo.fetch','+refs/tags/*:refs/remotes/odoo/tags/*')
run('git','config','--add','remote.odoo.fetch','+refs/pull/*:refs/remotes/odoo/pull/*')
def setup_deps_debian(git_dir):
debian_control_path = os.path.join(git_dir, 'debian/control')
debian_control = open(debian_control_path).read()
debs = re.findall('python-[0-9a-z]+',debian_control)
debs += ["postgresql"]
proc = subprocess.Popen(['sudo','apt-get','install'] + debs, stdin=open('/dev/tty'))
proc.communicate()
def cmd_setup_deps():
git_dir = git_locate()
if git_dir:
if os.path.isfile('/etc/debian_version'):
setup_deps_debian(git_dir)
def setup_pg_debian(git_dir):
cmd = ['sudo','su','-','postgres','-c','createuser -s %s' % os.environ['USER']]
subprocess.call(cmd)
def cmd_setup_pg():
git_dir = git_locate()
if git_dir:
if os.path.isfile('/etc/debian_version'):
setup_pg_debian(git_dir)
def cmd_setup():
cmd_setup_git()
cmd_setup_deps()
cmd_setup_pg()
def main():
# regsitry of commands
g = globals()
cmds = dict([(i[4:],g[i]) for i in g if i.startswith('cmd_')])
# if curl URL | python2 then use command setup
if len(sys.argv) == 1 and __file__ == '<stdin>':
cmd_setup()
elif len(sys.argv) == 2 and sys.argv[1] in cmds:
cmds[sys.argv[1]]()
else:
import openerp
openerp.cli.main()
if __name__ == "__main__":
main()
| agpl-3.0 |
yesudeep/mils-secure | app/jinja2/tests/test_lexer.py | 6 | 2087 | # -*- coding: utf-8 -*-
"""
unit test for the lexer
~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2009 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
from jinja2 import Environment
env = Environment()
RAW = '{% raw %}foo{% endraw %}|{%raw%}{{ bar }}|{% baz %}{% endraw %}'
BALANCING = '''{% for item in seq %}${{'foo': item}|upper}{% endfor %}'''
COMMENTS = '''\
<ul>
<!--- for item in seq -->
<li>{item}</li>
<!--- endfor -->
</ul>'''
BYTEFALLBACK = u'''{{ 'foo'|pprint }}|{{ 'bär'|pprint }}'''
def test_raw():
tmpl = env.from_string(RAW)
assert tmpl.render() == 'foo|{{ bar }}|{% baz %}'
def test_balancing():
from jinja2 import Environment
env = Environment('{%', '%}', '${', '}')
tmpl = env.from_string(BALANCING)
assert tmpl.render(seq=range(3)) == "{'FOO': 0}{'FOO': 1}{'FOO': 2}"
def test_comments():
from jinja2 import Environment
env = Environment('<!--', '-->', '{', '}')
tmpl = env.from_string(COMMENTS)
assert tmpl.render(seq=range(3)) == ("<ul>\n <li>0</li>\n "
"<li>1</li>\n <li>2</li>\n</ul>")
def test_string_escapes():
for char in u'\0', u'\u2668', u'\xe4', u'\t', u'\r', u'\n':
tmpl = env.from_string('{{ %s }}' % repr(char)[1:])
assert tmpl.render() == char
assert env.from_string('{{ "\N{HOT SPRINGS}" }}').render() == u'\u2668'
def test_bytefallback():
tmpl = env.from_string(BYTEFALLBACK)
assert tmpl.render() == u"'foo'|u'b\\xe4r'"
def test_operators():
from jinja2.lexer import operators
for test, expect in operators.iteritems():
if test in '([{}])':
continue
stream = env.lexer.tokenize('{{ %s }}' % test)
stream.next()
assert stream.current.type == expect
def test_normalizing():
from jinja2 import Environment
for seq in '\r', '\r\n', '\n':
env = Environment(newline_sequence=seq)
tmpl = env.from_string('1\n2\r\n3\n4\n')
result = tmpl.render()
assert result.replace(seq, 'X') == '1X2X3X4'
| mit |
amifsud/sot-stabilizer | src/dynamic_graph/sot/application/stabilizer/scenarii/pg_lqr_twoDof_coupled_stabilizer_hrp2_encoders.py | 1 | 2099 | from dynamic_graph.sot.application.stabilizer.scenarii.pg_lqr_twoDof_coupled_stabilizer import PgLqrTwoDofCoupledStabilizer
from dynamic_graph.sot.application.stabilizer.scenarii.hrp2_lqr_twoDof_coupled_stabilizer_encoders import HRP2LqrTwoDofCoupledStabilizerEncoders
from dynamic_graph.sot.core.meta_tasks import GainAdaptive
from dynamic_graph import plug
from dynamic_graph.sot.core.matrix_util import matrixToTuple
from dynamic_graph.sot.core import MatrixToUTheta, HomoToMatrix, HomoToRotation, Multiply_matrix_vector
from numpy import diag
class PgLqrTwoDofCoupledStabilizerHRP2Encoders(PgLqrTwoDofCoupledStabilizer):
def __init__(self,robot,trunkStabilize = False, hands = False, posture =False):
PgLqrTwoDofCoupledStabilizer.__init__(self,robot,trunkStabilize,hands,posture)
def createStabilizedCoMTask (self):
task = HRP2LqrTwoDofCoupledStabilizerEncoders(self.robot)
gain = GainAdaptive('gain'+task.name)
plug(self.comRef,task.comRef)
task.waistOriRef.value=(0,)*3
task.flexOriRef.value=(0,)*3
task.comDotRef.value=(0,)*3
task.waistVelRef.value=(0,)*3
task.flexAngVelRef.value=(0,)*3
plug(gain.gain, task.controlGain)
plug(task.error, gain.error)
return (task, gain)
def initTaskPosture(self):
# --- LEAST NORM
weight_ff = 0
weight_leg = 3
weight_knee = 5
weight_chest = 1
weight_chesttilt = 10
weight_head = 0.3
weight_arm = 1
#weight = diag( (weight_ff,)*6 + (weight_leg,)*12 + (weight_chest,)*2 + (weight_head,)*2 + (weight_arm,)*14)
#weight[9,9] = weight_knee
#weight[15,15] = weight_knee
#weight[19,19] = weight_chesttilt
weight = diag( (0,)*6+(1,)*30)
#weight = weight[6:,:]
self.featurePosture.jacobianIN.value = matrixToTuple(weight)
self.featurePostureDes.errorIN.value = self.robot.halfSitting
#mask = '1'*36
#mask = '1'*14+'0'*22
#self.tasks['posture'].controlSelec.value = mask
| lgpl-3.0 |
tomellericcardo/SafeChat | server-side/safeBase.py | 1 | 3601 | # -*- coding: utf-8 -*-
from os.path import realpath, dirname, join
from re import compile
from sqlite3 import connect
class SafeBase:
def __init__(self, g, database_filename):
self.g = g
posizione = dirname(realpath(__file__))
self.percorso = join(posizione, database_filename)
self.init_db()
def init_db(self):
database = connect(self.percorso)
cursore = database.cursor()
cursore.execute('''
CREATE TABLE IF NOT EXISTS utente (
username TEXT PRIMARY KEY,
password TEXT NOT NULL,
chiave TEXT NOT NULL,
sale TEXT NOT NULL
)
''')
database.commit()
cursore.execute('''
CREATE TABLE IF NOT EXISTS profilo (
username TEXT PRIMARY KEY,
nome TEXT,
cognome TEXT,
stato TEXT,
foto TEXT
)
''')
database.commit()
cursore.execute('''
CREATE TABLE IF NOT EXISTS messaggio (
chiave INTEGER PRIMARY KEY AUTOINCREMENT,
proprietario TEXT NOT NULL,
partecipante TEXT NOT NULL,
mittente TEXT NOT NULL,
immagine INT DEFAULT 0,
testo TEXT NOT NULL,
data_ora DATETIME DEFAULT CURRENT_TIMESTAMP,
letto INT DEFAULT 0
)
''')
database.commit()
cursore.execute('''
CREATE VIEW IF NOT EXISTS ultimo_messaggio AS
SELECT m.proprietario, m.mittente, m.partecipante, m.testo, m.immagine, m.data_ora, m.letto
FROM messaggio m
INNER JOIN (
SELECT proprietario, partecipante, MAX(data_ora) AS data_ora
FROM messaggio
GROUP BY proprietario, partecipante
) u
ON u.proprietario = m.proprietario
AND u.partecipante = m.partecipante
AND u.data_ora = m.data_ora
''')
database.commit()
cursore.execute('''
CREATE VIEW IF NOT EXISTS non_letti AS
SELECT proprietario, partecipante,
SUM(CASE letto WHEN 0 THEN 1 ELSE 0 END) AS non_letti
FROM messaggio
GROUP BY proprietario, partecipante
''')
database.commit()
cursore.close()
database.close()
def apri_connessione(self):
self.g.db = connect(self.percorso)
self.g.db.text_factory = str
self.g.db.create_function('REGEXP', 2, self.regexp)
def chiudi_connessione(self):
db = getattr(self.g, 'db', None)
if db is not None:
db.close()
def regexp(self, espressione, oggetto):
reg = compile(espressione)
return reg.search(oggetto) is not None
def leggi_righe(self, query, parametri):
cursore = self.g.db.cursor()
cursore.execute(query, parametri)
risultato = cursore.fetchall()
cursore.close()
return risultato
def leggi_riga(self, query, parametri):
cursore = self.g.db.cursor()
cursore.execute(query, parametri)
risultato = cursore.fetchone()
cursore.close()
return risultato
def leggi_dato(self, query, parametri):
return self.leggi_riga(query, parametri)[0]
def scrivi(self, query, parametri):
cursore = self.g.db.cursor()
cursore.execute(query, parametri)
self.g.db.commit()
cursore.close()
| apache-2.0 |
VinGarcia/kivy | examples/kinect/kinectviewer.py | 56 | 7964 | import freenect
from time import sleep
from threading import Thread
from collections import deque
from kivy.app import App
from kivy.clock import Clock
from kivy.properties import NumericProperty, StringProperty
from kivy.graphics import RenderContext, Color, Rectangle
from kivy.graphics.texture import Texture
from kivy.core.window import Window
from kivy.uix.widget import Widget
from kivy.uix.slider import Slider
from kivy.uix.boxlayout import BoxLayout
fragment_header = '''
#ifdef GL_ES
precision highp float;
#endif
/* Outputs from the vertex shader */
varying vec4 frag_color;
varying vec2 tex_coord0;
/* uniform texture samplers */
uniform sampler2D texture0;
/* custom input */
uniform float depth_range;
uniform vec2 size;
'''
hsv_func = '''
vec3 HSVtoRGB(vec3 color) {
float f,p,q,t, hueRound;
int hueIndex;
float hue, saturation, v;
vec3 result;
/* just for clarity */
hue = color.r;
saturation = color.g;
v = color.b;
hueRound = floor(hue * 6.0);
hueIndex = mod(int(hueRound), 6.);
f = (hue * 6.0) - hueRound;
p = v * (1.0 - saturation);
q = v * (1.0 - f*saturation);
t = v * (1.0 - (1.0 - f)*saturation);
switch(hueIndex) {
case 0:
result = vec3(v,t,p);
break;
case 1:
result = vec3(q,v,p);
break;
case 2:
result = vec3(p,v,t);
break;
case 3:
result = vec3(p,q,v);
break;
case 4:
result = vec3(t,p,v);
break;
case 5:
result = vec3(v,p,q);
break;
}
return result;
}
'''
rgb_kinect = fragment_header + '''
void main (void) {
float value = texture2D(texture0, tex_coord0).r;
value = mod(value * depth_range, 1.);
vec3 col = vec3(0., 0., 0.);
if ( value <= 0.33 )
col.r = clamp(value, 0., 0.33) * 3.;
if ( value <= 0.66 )
col.g = clamp(value - 0.33, 0., 0.33) * 3.;
col.b = clamp(value - 0.66, 0., 0.33) * 3.;
gl_FragColor = vec4(col, 1.);
}
'''
points_kinect = fragment_header + hsv_func + '''
void main (void) {
// threshold used to reduce the depth (better result)
const int th = 5;
// size of a square
int square = floor(depth_range);
// number of square on the display
vec2 count = size / square;
// current position of the square
vec2 pos = floor(tex_coord0.xy * count) / count;
// texture step to pass to another square
vec2 step = 1 / count;
// texture step to pass to another pixel
vec2 pxstep = 1 / size;
// center of the square
vec2 center = pos + step / 2.;
// calculate average of every pixels in the square
float s = 0, x, y;
for (x = 0; x < square; x++) {
for (y = 0; y < square; y++) {
s += texture2D(texture0, pos + pxstep * vec2(x,y)).r;
}
}
float v = s / (square * square);
// threshold the value
float dr = th / 10.;
v = min(v, dr) / dr;
// calculate the distance between the center of the square and current pixel
// display the pixel only if the distance is inside the circle
float vdist = length(abs(tex_coord0 - center) * size / square);
float value = 1 - v;
if ( vdist < value ) {
vec3 col = HSVtoRGB(vec3(value, 1., 1.));
gl_FragColor = vec4(col, 1);
}
}
'''
hsv_kinect = fragment_header + hsv_func + '''
void main (void) {
float value = texture2D(texture0, tex_coord0).r;
value = mod(value * depth_range, 1.);
vec3 col = HSVtoRGB(vec3(value, 1., 1.));
gl_FragColor = vec4(col, 1.);
}
'''
class KinectDepth(Thread):
def __init__(self, *largs, **kwargs):
super(KinectDepth, self).__init__(*largs, **kwargs)
self.daemon = True
self.queue = deque()
self.quit = False
self.index = 0
def run(self):
q = self.queue
while not self.quit:
depths = freenect.sync_get_depth(index=self.index)
if depths is None:
sleep(2)
continue
q.appendleft(depths)
def pop(self):
return self.queue.pop()
class KinectViewer(Widget):
depth_range = NumericProperty(7.7)
shader = StringProperty("rgb")
index = NumericProperty(0)
def __init__(self, **kwargs):
# change the default canvas to RenderContext, we can change the shader
self.canvas = RenderContext()
self.canvas.shader.fs = hsv_kinect
# add kinect depth provider, and start the thread
self.kinect = KinectDepth()
self.kinect.start()
# parent init
super(KinectViewer, self).__init__(**kwargs)
# allocate texture for pushing depth
self.texture = Texture.create(
size=(640, 480), colorfmt='luminance', bufferfmt='ushort')
self.texture.flip_vertical()
# create default canvas element
with self.canvas:
Color(1, 1, 1)
Rectangle(size=Window.size, texture=self.texture)
# add a little clock to update our glsl
Clock.schedule_interval(self.update_transformation, 0)
def on_index(self, instance, value):
self.kinect.index = value
def on_shader(self, instance, value):
if value == 'rgb':
self.canvas.shader.fs = rgb_kinect
elif value == 'hsv':
self.canvas.shader.fs = hsv_kinect
elif value == 'points':
self.canvas.shader.fs = points_kinect
def update_transformation(self, *largs):
# update projection mat and uvsize
self.canvas['projection_mat'] = Window.render_context['projection_mat']
self.canvas['depth_range'] = self.depth_range
self.canvas['size'] = list(map(float, self.size))
try:
value = self.kinect.pop()
except:
return
f = value[0].astype('ushort') * 32
self.texture.blit_buffer(
f.tostring(), colorfmt='luminance', bufferfmt='ushort')
self.canvas.ask_update()
class KinectViewerApp(App):
def build(self):
root = BoxLayout(orientation='vertical')
self.viewer = viewer = KinectViewer(
index=self.config.getint('kinect', 'index'),
shader=self.config.get('shader', 'theme'))
root.add_widget(viewer)
toolbar = BoxLayout(size_hint=(1, None), height=50)
root.add_widget(toolbar)
slider = Slider(min=1., max=32., value=1.)
def update_depth_range(instance, value):
viewer.depth_range = value
slider.bind(value=update_depth_range)
toolbar.add_widget(slider)
return root
def build_config(self, config):
config.add_section('kinect')
config.set('kinect', 'index', '0')
config.add_section('shader')
config.set('shader', 'theme', 'rgb')
def build_settings(self, settings):
settings.add_json_panel('Kinect Viewer', self.config, data='''[
{ "type": "title", "title": "Kinect" },
{ "type": "numeric", "title": "Index",
"desc": "Kinect index, from 0 to X",
"section": "kinect", "key": "index" },
{ "type": "title", "title": "Shaders" },
{ "type": "options", "title": "Theme",
"desc": "Shader to use for a specific visualization",
"section": "shader", "key": "theme",
"options": ["rgb", "hsv", "points"]}
]''')
def on_config_change(self, config, section, key, value):
if config is not self.config:
return
token = (section, key)
if token == ('kinect', 'index'):
self.viewer.index = int(value)
elif token == ('shader', 'theme'):
if value == 'rgb':
self.viewer.canvas.shader.fs = rgb_kinect
elif value == 'hsv':
self.viewer.shader = value
if __name__ == '__main__':
KinectViewerApp().run()
| mit |
knightsamar/github-edu-effectiveness | store.py | 1 | 1233 | #builds a db store using the Github data
import github
from pymongo import MongoClient
from settings import MONGO_HOST, MONGO_PORT, MONGO_DB
'''
Why do we store the data when we can get it from GitHub API anytime we need?
1. Because GitHub API seems to say that we can get data only as old as 90 days in case of things like Events.
We often need data older than that.
2. Querying the GitHub API for things specifically often doesn't work. Say, get all Events where type is CommentCommitEvent
It is easier to do that using a local db store like MongoDB
'''
def connect(dbname=MONGO_DB):
'''Connects and returns a Database object for use'''
c = MongoClient(host=MONGO_HOST, port=MONGO_PORT)
db = c[dbname]
return db
def store_events_for_user(u, collection='events'):
'''
Gets and stores all the events for a given user inside given MongoDB collection
'''
print "Storing events for user", u.login
events_collection = connect()[collection]
events_from_github = u.get_events()
print type(events_from_github)
for e in events_from_github:
print "Storing event", e
x = events_collection.insert_one(e._rawData)
print "Stored at ", x.inserted_id
del(x)
| gpl-2.0 |
nkolban/Espruino | boards/STM32F429IDISCOVERY.py | 6 | 5414 | #!/bin/false
# This file is part of Espruino, a JavaScript interpreter for Microcontrollers
#
# Copyright (C) 2013 Gordon Williams <[email protected]>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# ----------------------------------------------------------------------------------------
# This file contains information for a specific board - the available pins, and where LEDs,
# Buttons, and other in-built peripherals are. It is used to build documentation as well
# as various source and header files for Espruino.
# ----------------------------------------------------------------------------------------
import pinutils;
info = {
'name' : "STM32 F429 Discovery",
'link' : [ "http://www.st.com/web/catalog/tools/FM116/SC959/SS1532/LN1199/PF259090" ],
'default_console' : "EV_SERIAL1",
'variables' : 5450,
'binary_name' : 'espruino_%v_stm32f429idiscovery.bin',
'build' : {
'defines' : [
'USE_GRAPHICS',
'USE_NET',
]
}
};
chip = {
'part' : "STM32F429ZIT6",
'family' : "STM32F4",
'package' : "LQFP144",
'ram' : 128,#256,
'flash' : 512, #2048,
'speed' : 168,
'usart' : 6,
'spi' : 3,
'i2c' : 3,
'adc' : 3,
'dac' : 2,
};
devices = {
'OSC' : { 'pin_1' : 'H0',
'pin_2' : 'H1' },
'OSC_RTC' : { 'pin_1' : 'C14',
'pin_2' : 'C15' },
'LED1' : { 'pin' : 'G13' }, # green
'LED2' : { 'pin' : 'G14' }, # red
'BTN1' : { 'pin' : 'A0' },
'USB' : { 'pin_dm' : 'B14',
'pin_dp' : 'B15',
'pin_vbus' : 'B13',
'pin_id' : 'B12',
'pin_pso' : 'C4', # Power supply enable
'pin_oc' : 'C5', # Overcurrent
},
'MEMS' : { 'device' : 'L3GD20',
'pin_cs' : 'C1',
'pin_int1' : 'A1',
'pin_int2' : 'A2',
'pin_mosi' : 'F9',
'pin_miso' : 'F8',
'pin_sck' : 'F7' },
'TOUCHSCREEN' : {
'pin_irq' : 'A15',
'pin_cs' : '',
'pin_scl' : 'A8',
'pin_sda' : 'C9',
},
'LCD' : {
'width' : 320, 'height' : 240, 'bpp' : 16, 'controller' : 'fsmc', 'controller2' : 'ili9341',
'pin_d0' : 'D6',
'pin_d1' : 'G11',
'pin_d2' : 'G12',
'pin_d3' : 'A3',
'pin_d4' : 'B8',
'pin_d5' : 'B9',
'pin_d6' : 'A6',
'pin_d7' : 'G10',
'pin_d8' : 'B10',
'pin_d9' : 'B11',
'pin_d10' : 'C7',
'pin_d11' : 'D3',
'pin_d12' : 'C10',
'pin_d13' : 'B0',
'pin_d14' : 'A11',
'pin_d15' : 'A12',
'pin_d16' : 'B1',
'pin_d16' : 'G6',
'pin_rd' : 'D12', # RDX
'pin_wr' : 'D13',# WRQ (or SPI DC - data=1/command=0)
'pin_cs' : 'C2', # SPI CS (enable=0)
'pin_en' : 'F10',
'pin_vsync' : 'A4',
'pin_hsync' : 'C6',
'pin_dotlck' : 'G7',
'pin_dc' : 'F7', # SPI CLK
'pin_sda' : 'F9', # SPI SDI/SDO
'pin_im0' : 'D2', # solder bridge normally open, pulled to 0
'pin_im1' : 'D4', # solder bridge normally open, pulled to 1
'pin_im2' : 'D5', # solder bridge normally open, pulled to 1
'pin_im3' : 'D7', # solder bridge normally open, pulled to 0
},
'SDRAM' : {
'pin_sdcke1' : 'B5',
'pin_sdne1' : 'B6',
'pin_sdnwe' : 'C0',
'pin_d2' : 'D0',
'pin_d3' : 'D1',
'pin_d13' : 'D8',
'pin_d14' : 'D9',
'pin_d15' : 'D10',
'pin_d0' : 'D14',
'pin_d1' : 'D15',
'pin_nbl0' : 'E0',
'pin_nbl1' : 'E1',
'pin_d4' : 'E7',
'pin_d5' : 'E8',
'pin_d6' : 'E9',
'pin_d7' : 'E10',
'pin_d8' : 'E11',
'pin_d9' : 'E12',
'pin_d10' : 'E13',
'pin_d11' : 'E14',
'pin_d12' : 'E15',
'pin_a0' : 'F0',
'pin_a1' : 'F1',
'pin_a2' : 'F2',
'pin_a3' : 'F3',
'pin_a4' : 'F4',
'pin_a5' : 'F5',
'pin_sdnras' : 'F11',
'pin_a6' : 'F12',
'pin_a7' : 'F13',
'pin_a8' : 'F14',
'pin_a9' : 'F15',
'pin_a10' : 'G0',
'pin_a11' : 'G1',
'pin_ba0' : 'G4',
'pin_ba1' : 'G5',
'pin_sdclk' : 'G8',
'pin_sdncas' : 'G15',
},
'JTAG' : {
'pin_MS' : 'A13',
'pin_CK' : 'A14',
'pin_DI' : 'A15'
},
};
# left-right, or top-bottom order
board = {
'left' : [ ], # fixme
'left2' : [ ],
'right2' : [ ],
'right' : [ ],
};
board["_css"] = """
#board {
width: 680px;
height: 1020px;
left: 200px;
background-image: url(img/STM32F429IDISCOVERY.jpg);
}
#boardcontainer {
height: 1020px;
}
#left {
top: 375px;
right: 590px;
}
#left2 {
top: 375px;
left: 105px;
}
#right {
top: 375px;
left: 550px;
}
#right2 {
top: 375px;
right: 145px;
}
""";
def get_pins():
pins = pinutils.scan_pin_file([], 'stm32f40x.csv', 6, 9, 10)
return pinutils.only_from_package(pinutils.fill_gaps_in_pin_list(pins), chip["package"])
| mpl-2.0 |
ihsanudin/odoo | openerp/tools/float_utils.py | 312 | 10296 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import math
def _float_check_precision(precision_digits=None, precision_rounding=None):
assert (precision_digits is not None or precision_rounding is not None) and \
not (precision_digits and precision_rounding),\
"exactly one of precision_digits and precision_rounding must be specified"
if precision_digits is not None:
return 10 ** -precision_digits
return precision_rounding
def float_round(value, precision_digits=None, precision_rounding=None, rounding_method='HALF-UP'):
"""Return ``value`` rounded to ``precision_digits`` decimal digits,
minimizing IEEE-754 floating point representation errors, and applying
the tie-breaking rule selected with ``rounding_method``, by default
HALF-UP (away from zero).
Precision must be given by ``precision_digits`` or ``precision_rounding``,
not both!
:param float value: the value to round
:param int precision_digits: number of fractional digits to round to.
:param float precision_rounding: decimal number representing the minimum
non-zero value at the desired precision (for example, 0.01 for a
2-digit precision).
:param rounding_method: the rounding method used: 'HALF-UP' or 'UP', the first
one rounding up to the closest number with the rule that number>=0.5 is
rounded up to 1, and the latest one always rounding up.
:return: rounded float
"""
rounding_factor = _float_check_precision(precision_digits=precision_digits,
precision_rounding=precision_rounding)
if rounding_factor == 0 or value == 0: return 0.0
# NORMALIZE - ROUND - DENORMALIZE
# In order to easily support rounding to arbitrary 'steps' (e.g. coin values),
# we normalize the value before rounding it as an integer, and de-normalize
# after rounding: e.g. float_round(1.3, precision_rounding=.5) == 1.5
# TIE-BREAKING: HALF-UP (for normal rounding)
# We want to apply HALF-UP tie-breaking rules, i.e. 0.5 rounds away from 0.
# Due to IEE754 float/double representation limits, the approximation of the
# real value may be slightly below the tie limit, resulting in an error of
# 1 unit in the last place (ulp) after rounding.
# For example 2.675 == 2.6749999999999998.
# To correct this, we add a very small epsilon value, scaled to the
# the order of magnitude of the value, to tip the tie-break in the right
# direction.
# Credit: discussion with OpenERP community members on bug 882036
normalized_value = value / rounding_factor # normalize
epsilon_magnitude = math.log(abs(normalized_value), 2)
epsilon = 2**(epsilon_magnitude-53)
if rounding_method == 'HALF-UP':
normalized_value += cmp(normalized_value,0) * epsilon
rounded_value = round(normalized_value) # round to integer
# TIE-BREAKING: UP (for ceiling operations)
# When rounding the value up, we instead subtract the epsilon value
# as the the approximation of the real value may be slightly *above* the
# tie limit, this would result in incorrectly rounding up to the next number
# The math.ceil operation is applied on the absolute value in order to
# round "away from zero" and not "towards infinity", then the sign is
# restored.
elif rounding_method == 'UP':
sign = cmp(normalized_value, 0)
normalized_value -= sign*epsilon
rounded_value = math.ceil(abs(normalized_value))*sign # ceil to integer
result = rounded_value * rounding_factor # de-normalize
return result
def float_is_zero(value, precision_digits=None, precision_rounding=None):
"""Returns true if ``value`` is small enough to be treated as
zero at the given precision (smaller than the corresponding *epsilon*).
The precision (``10**-precision_digits`` or ``precision_rounding``)
is used as the zero *epsilon*: values less than that are considered
to be zero.
Precision must be given by ``precision_digits`` or ``precision_rounding``,
not both!
Warning: ``float_is_zero(value1-value2)`` is not equivalent to
``float_compare(value1,value2) == 0``, as the former will round after
computing the difference, while the latter will round before, giving
different results for e.g. 0.006 and 0.002 at 2 digits precision.
:param int precision_digits: number of fractional digits to round to.
:param float precision_rounding: decimal number representing the minimum
non-zero value at the desired precision (for example, 0.01 for a
2-digit precision).
:param float value: value to compare with the precision's zero
:return: True if ``value`` is considered zero
"""
epsilon = _float_check_precision(precision_digits=precision_digits,
precision_rounding=precision_rounding)
return abs(float_round(value, precision_rounding=epsilon)) < epsilon
def float_compare(value1, value2, precision_digits=None, precision_rounding=None):
"""Compare ``value1`` and ``value2`` after rounding them according to the
given precision. A value is considered lower/greater than another value
if their rounded value is different. This is not the same as having a
non-zero difference!
Precision must be given by ``precision_digits`` or ``precision_rounding``,
not both!
Example: 1.432 and 1.431 are equal at 2 digits precision,
so this method would return 0
However 0.006 and 0.002 are considered different (this method returns 1)
because they respectively round to 0.01 and 0.0, even though
0.006-0.002 = 0.004 which would be considered zero at 2 digits precision.
Warning: ``float_is_zero(value1-value2)`` is not equivalent to
``float_compare(value1,value2) == 0``, as the former will round after
computing the difference, while the latter will round before, giving
different results for e.g. 0.006 and 0.002 at 2 digits precision.
:param int precision_digits: number of fractional digits to round to.
:param float precision_rounding: decimal number representing the minimum
non-zero value at the desired precision (for example, 0.01 for a
2-digit precision).
:param float value1: first value to compare
:param float value2: second value to compare
:return: (resp.) -1, 0 or 1, if ``value1`` is (resp.) lower than,
equal to, or greater than ``value2``, at the given precision.
"""
rounding_factor = _float_check_precision(precision_digits=precision_digits,
precision_rounding=precision_rounding)
value1 = float_round(value1, precision_rounding=rounding_factor)
value2 = float_round(value2, precision_rounding=rounding_factor)
delta = value1 - value2
if float_is_zero(delta, precision_rounding=rounding_factor): return 0
return -1 if delta < 0.0 else 1
def float_repr(value, precision_digits):
"""Returns a string representation of a float with the
the given number of fractional digits. This should not be
used to perform a rounding operation (this is done via
:meth:`~.float_round`), but only to produce a suitable
string representation for a float.
:param int precision_digits: number of fractional digits to
include in the output
"""
# Can't use str() here because it seems to have an intrisic
# rounding to 12 significant digits, which causes a loss of
# precision. e.g. str(123456789.1234) == str(123456789.123)!!
return ("%%.%sf" % precision_digits) % value
if __name__ == "__main__":
import time
start = time.time()
count = 0
errors = 0
def try_round(amount, expected, precision_digits=3):
global count, errors; count += 1
result = float_repr(float_round(amount, precision_digits=precision_digits),
precision_digits=precision_digits)
if result != expected:
errors += 1
print '###!!! Rounding error: got %s , expected %s' % (result, expected)
# Extended float range test, inspired by Cloves Almeida's test on bug #882036.
fractions = [.0, .015, .01499, .675, .67499, .4555, .4555, .45555]
expecteds = ['.00', '.02', '.01', '.68', '.67', '.46', '.456', '.4556']
precisions = [2, 2, 2, 2, 2, 2, 3, 4]
for magnitude in range(7):
for i in xrange(len(fractions)):
frac, exp, prec = fractions[i], expecteds[i], precisions[i]
for sign in [-1,1]:
for x in xrange(0,10000,97):
n = x * 10**magnitude
f = sign * (n + frac)
f_exp = ('-' if f != 0 and sign == -1 else '') + str(n) + exp
try_round(f, f_exp, precision_digits=prec)
stop = time.time()
# Micro-bench results:
# 47130 round calls in 0.422306060791 secs, with Python 2.6.7 on Core i3 x64
# with decimal:
# 47130 round calls in 6.612248100021 secs, with Python 2.6.7 on Core i3 x64
print count, " round calls, ", errors, "errors, done in ", (stop-start), 'secs'
| agpl-3.0 |
merfii/PythonExercises | DIYgod/0007/test/test.py | 78 | 1114 | # -*- coding: utf-8 -*-
import re
import os
# Get all files in designated path
def get_files(path):
filepath = os.listdir(path)
files = []
for fp in filepath:
fppath = path + '/' + fp
if(os.path.isfile(fppath)):
files.append(fppath)
elif(os.path.isdir(fppath)):
files += get_files(fppath)
return files
# Get the most popular word in designated files
def get_important_word(files):
worddict = {}
for filename in files:
f = open(filename, 'rb')
s = f.read()
words = re.findall(r'[a-zA-Z0-9]+', s)
for word in words:
worddict[word] = worddict[word] + 1 if word in worddict else 1
wordsort = sorted(worddict.items(), key=lambda e:e[1], reverse=True)
return wordsort
if __name__ == '__main__':
files = get_files('.')
print files
wordsort = get_important_word(files)
maxnum = 1
for i in range(len(wordsort) - 1):
if wordsort[i][1] == wordsort[i + 1][1]:
maxnum += 1
else:
break
for i in range(maxnum):
print wordsort[i]
| mit |
drufat/vispy | vispy/visuals/line/line.py | 3 | 18295 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
Line visual implementing Agg- and GL-based drawing modes.
"""
from __future__ import division
import numpy as np
from ... import gloo, glsl
from ...color import Color, ColorArray, get_colormap
from ...ext.six import string_types
from ..shaders import Function
from ..visual import Visual, CompoundVisual
from ...util.profiler import Profiler
from .dash_atlas import DashAtlas
vec2to4 = Function("""
vec4 vec2to4(vec2 inp) {
return vec4(inp, 0, 1);
}
""")
vec3to4 = Function("""
vec4 vec3to4(vec3 inp) {
return vec4(inp, 1);
}
""")
"""
TODO:
* Agg support is very minimal; needs attention.
* Optimization--avoid creating new buffers, avoid triggering program
recompile.
"""
joins = {'miter': 0, 'round': 1, 'bevel': 2}
caps = {'': 0, 'none': 0, '.': 0,
'round': 1, ')': 1, '(': 1, 'o': 1,
'triangle in': 2, '<': 2,
'triangle out': 3, '>': 3,
'square': 4, '=': 4, 'butt': 4,
'|': 5}
class LineVisual(CompoundVisual):
"""Line visual
Parameters
----------
pos : array
Array of shape (..., 2) or (..., 3) specifying vertex coordinates.
color : Color, tuple, or array
The color to use when drawing the line. If an array is given, it
must be of shape (..., 4) and provide one rgba color per vertex.
Can also be a colormap name, or appropriate `Function`.
width:
The width of the line in px. Line widths > 1px are only
guaranteed to work when using 'agg' method.
connect : str or array
Determines which vertices are connected by lines.
* "strip" causes the line to be drawn with each vertex
connected to the next.
* "segments" causes each pair of vertices to draw an
independent line segment
* numpy arrays specify the exact set of segment pairs to
connect.
method : str
Mode to use for drawing.
* "agg" uses anti-grain geometry to draw nicely antialiased lines
with proper joins and endcaps.
* "gl" uses OpenGL's built-in line rendering. This is much faster,
but produces much lower-quality results and is not guaranteed to
obey the requested line width or join/endcap styles.
antialias : bool
Enables or disables antialiasing.
For method='gl', this specifies whether to use GL's line smoothing,
which may be unavailable or inconsistent on some platforms.
"""
def __init__(self, pos=None, color=(0.5, 0.5, 0.5, 1), width=1,
connect='strip', method='gl', antialias=False):
self._line_visual = None
self._changed = {'pos': False, 'color': False, 'width': False,
'connect': False}
self._pos = None
self._color = None
self._width = None
self._connect = None
self._bounds = None
self._antialias = None
self._method = 'none'
CompoundVisual.__init__(self, [])
# don't call subclass set_data; these often have different
# signatures.
LineVisual.set_data(self, pos=pos, color=color, width=width,
connect=connect)
self.antialias = antialias
self.method = method
@property
def antialias(self):
return self._antialias
@antialias.setter
def antialias(self, aa):
self._antialias = bool(aa)
self.update()
@property
def method(self):
"""The current drawing method"""
return self._method
@method.setter
def method(self, method):
if method not in ('agg', 'gl'):
raise ValueError('method argument must be "agg" or "gl".')
if method == self._method:
return
self._method = method
if self._line_visual is not None:
self.remove_subvisual(self._line_visual)
if method == 'gl':
self._line_visual = _GLLineVisual(self)
elif method == 'agg':
self._line_visual = _AggLineVisual(self)
self.add_subvisual(self._line_visual)
for k in self._changed:
self._changed[k] = True
def set_data(self, pos=None, color=None, width=None, connect=None):
""" Set the data used to draw this visual.
Parameters
----------
pos : array
Array of shape (..., 2) or (..., 3) specifying vertex coordinates.
color : Color, tuple, or array
The color to use when drawing the line. If an array is given, it
must be of shape (..., 4) and provide one rgba color per vertex.
width:
The width of the line in px. Line widths < 1 px will be rounded up
to 1 px when using the 'gl' method.
connect : str or array
Determines which vertices are connected by lines.
* "strip" causes the line to be drawn with each vertex
connected to the next.
* "segments" causes each pair of vertices to draw an
independent line segment
* int numpy arrays specify the exact set of segment pairs to
connect.
* bool numpy arrays specify which _adjacent_ pairs to connect.
"""
if pos is not None:
self._bounds = None
self._pos = pos
self._changed['pos'] = True
if color is not None:
self._color = color
self._changed['color'] = True
if width is not None:
self._width = width
self._changed['width'] = True
if connect is not None:
self._connect = connect
self._changed['connect'] = True
self.update()
@property
def color(self):
return self._color
@property
def width(self):
return self._width
@property
def connect(self):
return self._connect
@property
def pos(self):
return self._pos
def _interpret_connect(self):
if isinstance(self._connect, np.ndarray):
# Convert a boolean connection array to a vertex index array
if self._connect.ndim == 1 and self._connect.dtype == bool:
index = np.empty((len(self._connect), 2), dtype=np.uint32)
index[:] = np.arange(len(self._connect))[:, np.newaxis]
index[:, 1] += 1
return index[self._connect]
elif self._connect.ndim == 2 and self._connect.shape[1] == 2:
return self._connect.astype(np.uint32)
else:
raise TypeError("Got invalid connect array of shape %r and "
"dtype %r" % (self._connect.shape,
self._connect.dtype))
else:
return self._connect
def _interpret_color(self):
if isinstance(self._color, string_types):
try:
colormap = get_colormap(self._color)
color = Function(colormap.glsl_map)
except KeyError:
color = Color(self._color).rgba
elif isinstance(self._color, Function):
color = Function(self._color)
else:
color = ColorArray(self._color).rgba
if len(color) == 1:
color = color[0]
return color
def _compute_bounds(self, axis, view):
"""Get the bounds
Parameters
----------
mode : str
Describes the type of boundary requested. Can be "visual", "data",
or "mouse".
axis : 0, 1, 2
The axis along which to measure the bounding values, in
x-y-z order.
"""
# Can and should we calculate bounds?
if (self._bounds is None) and self._pos is not None:
pos = self._pos
self._bounds = [(pos[:, d].min(), pos[:, d].max())
for d in range(pos.shape[1])]
# Return what we can
if self._bounds is None:
return
else:
if axis < len(self._bounds):
return self._bounds[axis]
else:
return (0, 0)
def _prepare_draw(self, view):
if self._width == 0:
return False
CompoundVisual._prepare_draw(self, view)
class _GLLineVisual(Visual):
VERTEX_SHADER = """
varying vec4 v_color;
void main(void) {
gl_Position = $transform($to_vec4($position));
v_color = $color;
}
"""
FRAGMENT_SHADER = """
varying vec4 v_color;
void main() {
gl_FragColor = v_color;
}
"""
def __init__(self, parent):
self._parent = parent
self._pos_vbo = gloo.VertexBuffer()
self._color_vbo = gloo.VertexBuffer()
self._connect_ibo = gloo.IndexBuffer()
self._connect = None
Visual.__init__(self, vcode=self.VERTEX_SHADER,
fcode=self.FRAGMENT_SHADER)
self.set_gl_state('translucent')
def _prepare_transforms(self, view):
xform = view.transforms.get_transform()
view.view_program.vert['transform'] = xform
def _prepare_draw(self, view):
prof = Profiler()
if self._parent._changed['pos']:
if self._parent._pos is None:
return False
# todo: does this result in unnecessary copies?
pos = np.ascontiguousarray(self._parent._pos.astype(np.float32))
self._pos_vbo.set_data(pos)
self._program.vert['position'] = self._pos_vbo
if pos.shape[-1] == 2:
self._program.vert['to_vec4'] = vec2to4
elif pos.shape[-1] == 3:
self._program.vert['to_vec4'] = vec3to4
else:
raise TypeError("Got bad position array shape: %r"
% (pos.shape,))
if self._parent._changed['color']:
color = self._parent._interpret_color()
# If color is not visible, just quit now
if isinstance(color, Color) and color.is_blank:
return False
if isinstance(color, Function):
# TODO: Change to the parametric coordinate once that is done
self._program.vert['color'] = color(
'(gl_Position.x + 1.0) / 2.0')
else:
if color.ndim == 1:
self._program.vert['color'] = color
else:
self._color_vbo.set_data(color)
self._program.vert['color'] = self._color_vbo
# Do we want to use OpenGL, and can we?
GL = None
from ...app._default_app import default_app
if default_app is not None and \
default_app.backend_name != 'ipynb_webgl':
try:
import OpenGL.GL as GL
except Exception: # can be other than ImportError sometimes
pass
# Turn on line smooth and/or line width
if GL:
if self._parent._antialias:
GL.glEnable(GL.GL_LINE_SMOOTH)
else:
GL.glDisable(GL.GL_LINE_SMOOTH)
px_scale = self.transforms.pixel_scale
width = px_scale * self._parent._width
GL.glLineWidth(max(width, 1.))
if self._parent._changed['connect']:
self._connect = self._parent._interpret_connect()
if isinstance(self._connect, np.ndarray):
self._connect_ibo.set_data(self._connect)
if self._connect is None:
return False
prof('prepare')
# Draw
if isinstance(self._connect, string_types) and \
self._connect == 'strip':
self._draw_mode = 'line_strip'
self._index_buffer = None
elif isinstance(self._connect, string_types) and \
self._connect == 'segments':
self._draw_mode = 'lines'
self._index_buffer = None
elif isinstance(self._connect, np.ndarray):
self._draw_mode = 'lines'
self._index_buffer = self._connect_ibo
else:
raise ValueError("Invalid line connect mode: %r" % self._connect)
prof('draw')
class _AggLineVisual(Visual):
_agg_vtype = np.dtype([('a_position', np.float32, 2),
('a_tangents', np.float32, 4),
('a_segment', np.float32, 2),
('a_angles', np.float32, 2),
('a_texcoord', np.float32, 2),
('alength', np.float32, 1),
('color', np.float32, 4)])
VERTEX_SHADER = glsl.get('lines/agg.vert')
FRAGMENT_SHADER = glsl.get('lines/agg.frag')
def __init__(self, parent):
self._parent = parent
self._vbo = gloo.VertexBuffer()
self._pos = None
self._color = None
self._da = DashAtlas()
dash_index, dash_period = self._da['solid']
self._U = dict(dash_index=dash_index, dash_period=dash_period,
linejoin=joins['round'],
linecaps=(caps['round'], caps['round']),
dash_caps=(caps['round'], caps['round']),
antialias=1.0)
self._dash_atlas = gloo.Texture2D(self._da._data)
Visual.__init__(self, vcode=self.VERTEX_SHADER,
fcode=self.FRAGMENT_SHADER)
self._index_buffer = gloo.IndexBuffer()
self.set_gl_state('translucent', depth_test=False)
self._draw_mode = 'triangles'
def _prepare_transforms(self, view):
data_doc = view.get_transform('visual', 'document')
doc_px = view.get_transform('document', 'framebuffer')
px_ndc = view.get_transform('framebuffer', 'render')
vert = view.view_program.vert
vert['transform'] = data_doc
vert['doc_px_transform'] = doc_px
vert['px_ndc_transform'] = px_ndc
def _prepare_draw(self, view):
bake = False
if self._parent._changed['pos']:
if self._parent._pos is None:
return False
# todo: does this result in unnecessary copies?
self._pos = np.ascontiguousarray(
self._parent._pos.astype(np.float32))
bake = True
if self._parent._changed['color']:
self._color = self._parent._interpret_color()
bake = True
if self._parent._changed['connect']:
if self._parent._connect not in [None, 'strip']:
raise NotImplementedError("Only 'strip' connection mode "
"allowed for agg-method lines.")
if bake:
V, I = self._agg_bake(self._pos, self._color)
self._vbo.set_data(V)
self._index_buffer.set_data(I)
#self._program.prepare()
self.shared_program.bind(self._vbo)
uniforms = dict(closed=False, miter_limit=4.0, dash_phase=0.0,
linewidth=self._parent._width)
for n, v in uniforms.items():
self.shared_program[n] = v
for n, v in self._U.items():
self.shared_program[n] = v
self.shared_program['u_dash_atlas'] = self._dash_atlas
@classmethod
def _agg_bake(cls, vertices, color, closed=False):
"""
Bake a list of 2D vertices for rendering them as thick line. Each line
segment must have its own vertices because of antialias (this means no
vertex sharing between two adjacent line segments).
"""
n = len(vertices)
P = np.array(vertices).reshape(n, 2).astype(float)
idx = np.arange(n) # used to eventually tile the color array
dx, dy = P[0] - P[-1]
d = np.sqrt(dx*dx+dy*dy)
# If closed, make sure first vertex = last vertex (+/- epsilon=1e-10)
if closed and d > 1e-10:
P = np.append(P, P[0]).reshape(n+1, 2)
idx = np.append(idx, idx[-1])
n += 1
V = np.zeros(len(P), dtype=cls._agg_vtype)
V['a_position'] = P
# Tangents & norms
T = P[1:] - P[:-1]
N = np.sqrt(T[:, 0]**2 + T[:, 1]**2)
# T /= N.reshape(len(T),1)
V['a_tangents'][+1:, :2] = T
V['a_tangents'][0, :2] = T[-1] if closed else T[0]
V['a_tangents'][:-1, 2:] = T
V['a_tangents'][-1, 2:] = T[0] if closed else T[-1]
# Angles
T1 = V['a_tangents'][:, :2]
T2 = V['a_tangents'][:, 2:]
A = np.arctan2(T1[:, 0]*T2[:, 1]-T1[:, 1]*T2[:, 0],
T1[:, 0]*T2[:, 0]+T1[:, 1]*T2[:, 1])
V['a_angles'][:-1, 0] = A[:-1]
V['a_angles'][:-1, 1] = A[+1:]
# Segment
L = np.cumsum(N)
V['a_segment'][+1:, 0] = L
V['a_segment'][:-1, 1] = L
# V['a_lengths'][:,2] = L[-1]
# Step 1: A -- B -- C => A -- B, B' -- C
V = np.repeat(V, 2, axis=0)[1:-1]
V['a_segment'][1:] = V['a_segment'][:-1]
V['a_angles'][1:] = V['a_angles'][:-1]
V['a_texcoord'][0::2] = -1
V['a_texcoord'][1::2] = +1
idx = np.repeat(idx, 2)[1:-1]
# Step 2: A -- B, B' -- C -> A0/A1 -- B0/B1, B'0/B'1 -- C0/C1
V = np.repeat(V, 2, axis=0)
V['a_texcoord'][0::2, 1] = -1
V['a_texcoord'][1::2, 1] = +1
idx = np.repeat(idx, 2)
I = np.resize(np.array([0, 1, 2, 1, 2, 3], dtype=np.uint32),
(n-1)*(2*3))
I += np.repeat(4*np.arange(n-1, dtype=np.uint32), 6)
# Length
V['alength'] = L[-1] * np.ones(len(V))
# Color
if color.ndim == 1:
color = np.tile(color, (len(V), 1))
elif color.ndim == 2 and len(color) == n:
color = color[idx]
else:
raise ValueError('Color length %s does not match number of '
'vertices %s' % (len(color), n))
V['color'] = color
return V, I
| bsd-3-clause |
MechaCM/android_kernel_htc_mecha | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | 1927 | # system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
LuoZijun/uOffice | temp/pydocxx/docx/opc/packuri.py | 19 | 3880 | # encoding: utf-8
"""
Provides the PackURI value type along with some useful known pack URI strings
such as PACKAGE_URI.
"""
import posixpath
import re
class PackURI(str):
"""
Provides access to pack URI components such as the baseURI and the
filename slice. Behaves as |str| otherwise.
"""
_filename_re = re.compile('([a-zA-Z]+)([1-9][0-9]*)?')
def __new__(cls, pack_uri_str):
if not pack_uri_str[0] == '/':
tmpl = "PackURI must begin with slash, got '%s'"
raise ValueError(tmpl % pack_uri_str)
return str.__new__(cls, pack_uri_str)
@staticmethod
def from_rel_ref(baseURI, relative_ref):
"""
Return a |PackURI| instance containing the absolute pack URI formed by
translating *relative_ref* onto *baseURI*.
"""
joined_uri = posixpath.join(baseURI, relative_ref)
abs_uri = posixpath.abspath(joined_uri)
return PackURI(abs_uri)
@property
def baseURI(self):
"""
The base URI of this pack URI, the directory portion, roughly
speaking. E.g. ``'/ppt/slides'`` for ``'/ppt/slides/slide1.xml'``.
For the package pseudo-partname '/', baseURI is '/'.
"""
return posixpath.split(self)[0]
@property
def ext(self):
"""
The extension portion of this pack URI, e.g. ``'xml'`` for
``'/word/document.xml'``. Note the period is not included.
"""
# raw_ext is either empty string or starts with period, e.g. '.xml'
raw_ext = posixpath.splitext(self)[1]
return raw_ext[1:] if raw_ext.startswith('.') else raw_ext
@property
def filename(self):
"""
The "filename" portion of this pack URI, e.g. ``'slide1.xml'`` for
``'/ppt/slides/slide1.xml'``. For the package pseudo-partname '/',
filename is ''.
"""
return posixpath.split(self)[1]
@property
def idx(self):
"""
Return partname index as integer for tuple partname or None for
singleton partname, e.g. ``21`` for ``'/ppt/slides/slide21.xml'`` and
|None| for ``'/ppt/presentation.xml'``.
"""
filename = self.filename
if not filename:
return None
name_part = posixpath.splitext(filename)[0] # filename w/ext removed
match = self._filename_re.match(name_part)
if match is None:
return None
if match.group(2):
return int(match.group(2))
return None
@property
def membername(self):
"""
The pack URI with the leading slash stripped off, the form used as
the Zip file membername for the package item. Returns '' for the
package pseudo-partname '/'.
"""
return self[1:]
def relative_ref(self, baseURI):
"""
Return string containing relative reference to package item from
*baseURI*. E.g. PackURI('/ppt/slideLayouts/slideLayout1.xml') would
return '../slideLayouts/slideLayout1.xml' for baseURI '/ppt/slides'.
"""
# workaround for posixpath bug in 2.6, doesn't generate correct
# relative path when *start* (second) parameter is root ('/')
if baseURI == '/':
relpath = self[1:]
else:
relpath = posixpath.relpath(self, baseURI)
return relpath
@property
def rels_uri(self):
"""
The pack URI of the .rels part corresponding to the current pack URI.
Only produces sensible output if the pack URI is a partname or the
package pseudo-partname '/'.
"""
rels_filename = '%s.rels' % self.filename
rels_uri_str = posixpath.join(self.baseURI, '_rels', rels_filename)
return PackURI(rels_uri_str)
PACKAGE_URI = PackURI('/')
CONTENT_TYPES_URI = PackURI('/[Content_Types].xml')
| gpl-3.0 |
dsajkl/123 | cms/djangoapps/contentstore/management/commands/prompt.py | 70 | 1095 | import sys
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is one of "yes" or "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = raw_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' (or 'y' or 'n').\n")
| agpl-3.0 |
opencloudinfra/orchestrator | venv/Lib/site-packages/requests/packages/urllib3/response.py | 197 | 18103 | from __future__ import absolute_import
from contextlib import contextmanager
import zlib
import io
from socket import timeout as SocketTimeout
from socket import error as SocketError
from ._collections import HTTPHeaderDict
from .exceptions import (
ProtocolError, DecodeError, ReadTimeoutError, ResponseNotChunked
)
from .packages.six import string_types as basestring, binary_type, PY3
from .packages.six.moves import http_client as httplib
from .connection import HTTPException, BaseSSLError
from .util.response import is_fp_closed, is_response_to_head
class DeflateDecoder(object):
def __init__(self):
self._first_try = True
self._data = binary_type()
self._obj = zlib.decompressobj()
def __getattr__(self, name):
return getattr(self._obj, name)
def decompress(self, data):
if not data:
return data
if not self._first_try:
return self._obj.decompress(data)
self._data += data
try:
return self._obj.decompress(data)
except zlib.error:
self._first_try = False
self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
try:
return self.decompress(self._data)
finally:
self._data = None
class GzipDecoder(object):
def __init__(self):
self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
def __getattr__(self, name):
return getattr(self._obj, name)
def decompress(self, data):
if not data:
return data
return self._obj.decompress(data)
def _get_decoder(mode):
if mode == 'gzip':
return GzipDecoder()
return DeflateDecoder()
class HTTPResponse(io.IOBase):
"""
HTTP Response container.
Backwards-compatible to httplib's HTTPResponse but the response ``body`` is
loaded and decoded on-demand when the ``data`` property is accessed. This
class is also compatible with the Python standard library's :mod:`io`
module, and can hence be treated as a readable object in the context of that
framework.
Extra parameters for behaviour not present in httplib.HTTPResponse:
:param preload_content:
If True, the response's body will be preloaded during construction.
:param decode_content:
If True, attempts to decode specific content-encoding's based on headers
(like 'gzip' and 'deflate') will be skipped and raw data will be used
instead.
:param original_response:
When this HTTPResponse wrapper is generated from an httplib.HTTPResponse
object, it's convenient to include the original for debug purposes. It's
otherwise unused.
"""
CONTENT_DECODERS = ['gzip', 'deflate']
REDIRECT_STATUSES = [301, 302, 303, 307, 308]
def __init__(self, body='', headers=None, status=0, version=0, reason=None,
strict=0, preload_content=True, decode_content=True,
original_response=None, pool=None, connection=None):
if isinstance(headers, HTTPHeaderDict):
self.headers = headers
else:
self.headers = HTTPHeaderDict(headers)
self.status = status
self.version = version
self.reason = reason
self.strict = strict
self.decode_content = decode_content
self._decoder = None
self._body = None
self._fp = None
self._original_response = original_response
self._fp_bytes_read = 0
if body and isinstance(body, (basestring, binary_type)):
self._body = body
self._pool = pool
self._connection = connection
if hasattr(body, 'read'):
self._fp = body
# Are we using the chunked-style of transfer encoding?
self.chunked = False
self.chunk_left = None
tr_enc = self.headers.get('transfer-encoding', '').lower()
# Don't incur the penalty of creating a list and then discarding it
encodings = (enc.strip() for enc in tr_enc.split(","))
if "chunked" in encodings:
self.chunked = True
# If requested, preload the body.
if preload_content and not self._body:
self._body = self.read(decode_content=decode_content)
def get_redirect_location(self):
"""
Should we redirect and where to?
:returns: Truthy redirect location string if we got a redirect status
code and valid location. ``None`` if redirect status and no
location. ``False`` if not a redirect status code.
"""
if self.status in self.REDIRECT_STATUSES:
return self.headers.get('location')
return False
def release_conn(self):
if not self._pool or not self._connection:
return
self._pool._put_conn(self._connection)
self._connection = None
@property
def data(self):
# For backwords-compat with earlier urllib3 0.4 and earlier.
if self._body:
return self._body
if self._fp:
return self.read(cache_content=True)
def tell(self):
"""
Obtain the number of bytes pulled over the wire so far. May differ from
the amount of content returned by :meth:``HTTPResponse.read`` if bytes
are encoded on the wire (e.g, compressed).
"""
return self._fp_bytes_read
def _init_decoder(self):
"""
Set-up the _decoder attribute if necessar.
"""
# Note: content-encoding value should be case-insensitive, per RFC 7230
# Section 3.2
content_encoding = self.headers.get('content-encoding', '').lower()
if self._decoder is None and content_encoding in self.CONTENT_DECODERS:
self._decoder = _get_decoder(content_encoding)
def _decode(self, data, decode_content, flush_decoder):
"""
Decode the data passed in and potentially flush the decoder.
"""
try:
if decode_content and self._decoder:
data = self._decoder.decompress(data)
except (IOError, zlib.error) as e:
content_encoding = self.headers.get('content-encoding', '').lower()
raise DecodeError(
"Received response with content-encoding: %s, but "
"failed to decode it." % content_encoding, e)
if flush_decoder and decode_content:
data += self._flush_decoder()
return data
def _flush_decoder(self):
"""
Flushes the decoder. Should only be called if the decoder is actually
being used.
"""
if self._decoder:
buf = self._decoder.decompress(b'')
return buf + self._decoder.flush()
return b''
@contextmanager
def _error_catcher(self):
"""
Catch low-level python exceptions, instead re-raising urllib3
variants, so that low-level exceptions are not leaked in the
high-level api.
On exit, release the connection back to the pool.
"""
try:
try:
yield
except SocketTimeout:
# FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
# there is yet no clean way to get at it from this context.
raise ReadTimeoutError(self._pool, None, 'Read timed out.')
except BaseSSLError as e:
# FIXME: Is there a better way to differentiate between SSLErrors?
if 'read operation timed out' not in str(e): # Defensive:
# This shouldn't happen but just in case we're missing an edge
# case, let's avoid swallowing SSL errors.
raise
raise ReadTimeoutError(self._pool, None, 'Read timed out.')
except (HTTPException, SocketError) as e:
# This includes IncompleteRead.
raise ProtocolError('Connection broken: %r' % e, e)
except Exception:
# The response may not be closed but we're not going to use it anymore
# so close it now to ensure that the connection is released back to the pool.
if self._original_response and not self._original_response.isclosed():
self._original_response.close()
# Closing the response may not actually be sufficient to close
# everything, so if we have a hold of the connection close that
# too.
if self._connection is not None:
self._connection.close()
raise
finally:
if self._original_response and self._original_response.isclosed():
self.release_conn()
def read(self, amt=None, decode_content=None, cache_content=False):
"""
Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
parameters: ``decode_content`` and ``cache_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param cache_content:
If True, will save the returned data such that the same result is
returned despite of the state of the underlying file object. This
is useful if you want the ``.data`` property to continue working
after having ``.read()`` the file object. (Overridden if ``amt`` is
set.)
"""
self._init_decoder()
if decode_content is None:
decode_content = self.decode_content
if self._fp is None:
return
flush_decoder = False
data = None
with self._error_catcher():
if amt is None:
# cStringIO doesn't like amt=None
data = self._fp.read()
flush_decoder = True
else:
cache_content = False
data = self._fp.read(amt)
if amt != 0 and not data: # Platform-specific: Buggy versions of Python.
# Close the connection when no data is returned
#
# This is redundant to what httplib/http.client _should_
# already do. However, versions of python released before
# December 15, 2012 (http://bugs.python.org/issue16298) do
# not properly close the connection in all cases. There is
# no harm in redundantly calling close.
self._fp.close()
flush_decoder = True
if data:
self._fp_bytes_read += len(data)
data = self._decode(data, decode_content, flush_decoder)
if cache_content:
self._body = data
return data
def stream(self, amt=2**16, decode_content=None):
"""
A generator wrapper for the read() method. A call will block until
``amt`` bytes have been read from the connection or until the
connection is closed.
:param amt:
How much of the content to read. The generator will return up to
much data per iteration, but may return less. This is particularly
likely when using compressed data. However, the empty string will
never be returned.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
if self.chunked:
for line in self.read_chunked(amt, decode_content=decode_content):
yield line
else:
while not is_fp_closed(self._fp):
data = self.read(amt=amt, decode_content=decode_content)
if data:
yield data
@classmethod
def from_httplib(ResponseCls, r, **response_kw):
"""
Given an :class:`httplib.HTTPResponse` instance ``r``, return a
corresponding :class:`urllib3.response.HTTPResponse` object.
Remaining parameters are passed to the HTTPResponse constructor, along
with ``original_response=r``.
"""
headers = r.msg
if not isinstance(headers, HTTPHeaderDict):
if PY3: # Python 3
headers = HTTPHeaderDict(headers.items())
else: # Python 2
headers = HTTPHeaderDict.from_httplib(headers)
# HTTPResponse objects in Python 3 don't have a .strict attribute
strict = getattr(r, 'strict', 0)
resp = ResponseCls(body=r,
headers=headers,
status=r.status,
version=r.version,
reason=r.reason,
strict=strict,
original_response=r,
**response_kw)
return resp
# Backwards-compatibility methods for httplib.HTTPResponse
def getheaders(self):
return self.headers
def getheader(self, name, default=None):
return self.headers.get(name, default)
# Overrides from io.IOBase
def close(self):
if not self.closed:
self._fp.close()
@property
def closed(self):
if self._fp is None:
return True
elif hasattr(self._fp, 'closed'):
return self._fp.closed
elif hasattr(self._fp, 'isclosed'): # Python 2
return self._fp.isclosed()
else:
return True
def fileno(self):
if self._fp is None:
raise IOError("HTTPResponse has no file to get a fileno from")
elif hasattr(self._fp, "fileno"):
return self._fp.fileno()
else:
raise IOError("The file-like object this HTTPResponse is wrapped "
"around has no file descriptor")
def flush(self):
if self._fp is not None and hasattr(self._fp, 'flush'):
return self._fp.flush()
def readable(self):
# This method is required for `io` module compatibility.
return True
def readinto(self, b):
# This method is required for `io` module compatibility.
temp = self.read(len(b))
if len(temp) == 0:
return 0
else:
b[:len(temp)] = temp
return len(temp)
def _update_chunk_length(self):
# First, we'll figure out length of a chunk and then
# we'll try to read it from socket.
if self.chunk_left is not None:
return
line = self._fp.fp.readline()
line = line.split(b';', 1)[0]
try:
self.chunk_left = int(line, 16)
except ValueError:
# Invalid chunked protocol response, abort.
self.close()
raise httplib.IncompleteRead(line)
def _handle_chunk(self, amt):
returned_chunk = None
if amt is None:
chunk = self._fp._safe_read(self.chunk_left)
returned_chunk = chunk
self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
self.chunk_left = None
elif amt < self.chunk_left:
value = self._fp._safe_read(amt)
self.chunk_left = self.chunk_left - amt
returned_chunk = value
elif amt == self.chunk_left:
value = self._fp._safe_read(amt)
self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
self.chunk_left = None
returned_chunk = value
else: # amt > self.chunk_left
returned_chunk = self._fp._safe_read(self.chunk_left)
self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
self.chunk_left = None
return returned_chunk
def read_chunked(self, amt=None, decode_content=None):
"""
Similar to :meth:`HTTPResponse.read`, but with an additional
parameter: ``decode_content``.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
self._init_decoder()
# FIXME: Rewrite this method and make it a class with a better structured logic.
if not self.chunked:
raise ResponseNotChunked(
"Response is not chunked. "
"Header 'transfer-encoding: chunked' is missing.")
# Don't bother reading the body of a HEAD request.
if self._original_response and is_response_to_head(self._original_response):
self._original_response.close()
return
with self._error_catcher():
while True:
self._update_chunk_length()
if self.chunk_left == 0:
break
chunk = self._handle_chunk(amt)
decoded = self._decode(chunk, decode_content=decode_content,
flush_decoder=False)
if decoded:
yield decoded
if decode_content:
# On CPython and PyPy, we should never need to flush the
# decoder. However, on Jython we *might* need to, so
# lets defensively do it anyway.
decoded = self._flush_decoder()
if decoded: # Platform-specific: Jython.
yield decoded
# Chunk content ends with \r\n: discard it.
while True:
line = self._fp.fp.readline()
if not line:
# Some sites may not end with '\r\n'.
break
if line == b'\r\n':
break
# We read everything; close the "file".
if self._original_response:
self._original_response.close()
| gpl-3.0 |
sanghinitin/golismero | thirdparty_libs/django/template/defaulttags.py | 39 | 47883 | """Default tags used by the template system, available to all templates."""
from __future__ import unicode_literals
import sys
import re
from datetime import datetime
from itertools import groupby, cycle as itertools_cycle
from django.conf import settings
from django.template.base import (Node, NodeList, Template, Context, Library,
TemplateSyntaxError, VariableDoesNotExist, InvalidTemplateLibrary,
BLOCK_TAG_START, BLOCK_TAG_END, VARIABLE_TAG_START, VARIABLE_TAG_END,
SINGLE_BRACE_START, SINGLE_BRACE_END, COMMENT_TAG_START, COMMENT_TAG_END,
VARIABLE_ATTRIBUTE_SEPARATOR, get_library, token_kwargs, kwarg_re)
from django.template.smartif import IfParser, Literal
from django.template.defaultfilters import date
from django.utils.encoding import smart_text
from django.utils.safestring import mark_safe
from django.utils.html import format_html
from django.utils import six
from django.utils import timezone
register = Library()
class AutoEscapeControlNode(Node):
"""Implements the actions of the autoescape tag."""
def __init__(self, setting, nodelist):
self.setting, self.nodelist = setting, nodelist
def render(self, context):
old_setting = context.autoescape
context.autoescape = self.setting
output = self.nodelist.render(context)
context.autoescape = old_setting
if self.setting:
return mark_safe(output)
else:
return output
class CommentNode(Node):
def render(self, context):
return ''
class CsrfTokenNode(Node):
def render(self, context):
csrf_token = context.get('csrf_token', None)
if csrf_token:
if csrf_token == 'NOTPROVIDED':
return format_html("")
else:
return format_html("<input type='hidden' name='csrfmiddlewaretoken' value='{0}' />", csrf_token)
else:
# It's very probable that the token is missing because of
# misconfiguration, so we raise a warning
from django.conf import settings
if settings.DEBUG:
import warnings
warnings.warn("A {% csrf_token %} was used in a template, but the context did not provide the value. This is usually caused by not using RequestContext.")
return ''
class CycleNode(Node):
def __init__(self, cyclevars, variable_name=None, silent=False):
self.cyclevars = cyclevars
self.variable_name = variable_name
self.silent = silent
def render(self, context):
if self not in context.render_context:
# First time the node is rendered in template
context.render_context[self] = itertools_cycle(self.cyclevars)
cycle_iter = context.render_context[self]
value = next(cycle_iter).resolve(context)
if self.variable_name:
context[self.variable_name] = value
if self.silent:
return ''
return value
class DebugNode(Node):
def render(self, context):
from pprint import pformat
output = [pformat(val) for val in context]
output.append('\n\n')
output.append(pformat(sys.modules))
return ''.join(output)
class FilterNode(Node):
def __init__(self, filter_expr, nodelist):
self.filter_expr, self.nodelist = filter_expr, nodelist
def render(self, context):
output = self.nodelist.render(context)
# Apply filters.
context.update({'var': output})
filtered = self.filter_expr.resolve(context)
context.pop()
return filtered
class FirstOfNode(Node):
def __init__(self, vars):
self.vars = vars
def render(self, context):
for var in self.vars:
value = var.resolve(context, True)
if value:
return smart_text(value)
return ''
class ForNode(Node):
child_nodelists = ('nodelist_loop', 'nodelist_empty')
def __init__(self, loopvars, sequence, is_reversed, nodelist_loop, nodelist_empty=None):
self.loopvars, self.sequence = loopvars, sequence
self.is_reversed = is_reversed
self.nodelist_loop = nodelist_loop
if nodelist_empty is None:
self.nodelist_empty = NodeList()
else:
self.nodelist_empty = nodelist_empty
def __repr__(self):
reversed_text = self.is_reversed and ' reversed' or ''
return "<For Node: for %s in %s, tail_len: %d%s>" % \
(', '.join(self.loopvars), self.sequence, len(self.nodelist_loop),
reversed_text)
def __iter__(self):
for node in self.nodelist_loop:
yield node
for node in self.nodelist_empty:
yield node
def render(self, context):
if 'forloop' in context:
parentloop = context['forloop']
else:
parentloop = {}
context.push()
try:
values = self.sequence.resolve(context, True)
except VariableDoesNotExist:
values = []
if values is None:
values = []
if not hasattr(values, '__len__'):
values = list(values)
len_values = len(values)
if len_values < 1:
context.pop()
return self.nodelist_empty.render(context)
nodelist = NodeList()
if self.is_reversed:
values = reversed(values)
unpack = len(self.loopvars) > 1
# Create a forloop value in the context. We'll update counters on each
# iteration just below.
loop_dict = context['forloop'] = {'parentloop': parentloop}
for i, item in enumerate(values):
# Shortcuts for current loop iteration number.
loop_dict['counter0'] = i
loop_dict['counter'] = i+1
# Reverse counter iteration numbers.
loop_dict['revcounter'] = len_values - i
loop_dict['revcounter0'] = len_values - i - 1
# Boolean values designating first and last times through loop.
loop_dict['first'] = (i == 0)
loop_dict['last'] = (i == len_values - 1)
pop_context = False
if unpack:
# If there are multiple loop variables, unpack the item into
# them.
try:
unpacked_vars = dict(zip(self.loopvars, item))
except TypeError:
pass
else:
pop_context = True
context.update(unpacked_vars)
else:
context[self.loopvars[0]] = item
# In TEMPLATE_DEBUG mode provide source of the node which
# actually raised the exception
if settings.TEMPLATE_DEBUG:
for node in self.nodelist_loop:
try:
nodelist.append(node.render(context))
except Exception as e:
if not hasattr(e, 'django_template_source'):
e.django_template_source = node.source
raise
else:
for node in self.nodelist_loop:
nodelist.append(node.render(context))
if pop_context:
# The loop variables were pushed on to the context so pop them
# off again. This is necessary because the tag lets the length
# of loopvars differ to the length of each set of items and we
# don't want to leave any vars from the previous loop on the
# context.
context.pop()
context.pop()
return nodelist.render(context)
class IfChangedNode(Node):
child_nodelists = ('nodelist_true', 'nodelist_false')
def __init__(self, nodelist_true, nodelist_false, *varlist):
self.nodelist_true, self.nodelist_false = nodelist_true, nodelist_false
self._last_seen = None
self._varlist = varlist
self._id = str(id(self))
def render(self, context):
if 'forloop' in context and self._id not in context['forloop']:
self._last_seen = None
context['forloop'][self._id] = 1
try:
if self._varlist:
# Consider multiple parameters. This automatically behaves
# like an OR evaluation of the multiple variables.
compare_to = [var.resolve(context, True) for var in self._varlist]
else:
compare_to = self.nodelist_true.render(context)
except VariableDoesNotExist:
compare_to = None
if compare_to != self._last_seen:
self._last_seen = compare_to
content = self.nodelist_true.render(context)
return content
elif self.nodelist_false:
return self.nodelist_false.render(context)
return ''
class IfEqualNode(Node):
child_nodelists = ('nodelist_true', 'nodelist_false')
def __init__(self, var1, var2, nodelist_true, nodelist_false, negate):
self.var1, self.var2 = var1, var2
self.nodelist_true, self.nodelist_false = nodelist_true, nodelist_false
self.negate = negate
def __repr__(self):
return "<IfEqualNode>"
def render(self, context):
val1 = self.var1.resolve(context, True)
val2 = self.var2.resolve(context, True)
if (self.negate and val1 != val2) or (not self.negate and val1 == val2):
return self.nodelist_true.render(context)
return self.nodelist_false.render(context)
class IfNode(Node):
def __init__(self, conditions_nodelists):
self.conditions_nodelists = conditions_nodelists
def __repr__(self):
return "<IfNode>"
def __iter__(self):
for _, nodelist in self.conditions_nodelists:
for node in nodelist:
yield node
@property
def nodelist(self):
return NodeList(node for _, nodelist in self.conditions_nodelists for node in nodelist)
def render(self, context):
for condition, nodelist in self.conditions_nodelists:
if condition is not None: # if / elif clause
try:
match = condition.eval(context)
except VariableDoesNotExist:
match = None
else: # else clause
match = True
if match:
return nodelist.render(context)
return ''
class RegroupNode(Node):
def __init__(self, target, expression, var_name):
self.target, self.expression = target, expression
self.var_name = var_name
def resolve_expression(self, obj, context):
# This method is called for each object in self.target. See regroup()
# for the reason why we temporarily put the object in the context.
context[self.var_name] = obj
return self.expression.resolve(context, True)
def render(self, context):
obj_list = self.target.resolve(context, True)
if obj_list == None:
# target variable wasn't found in context; fail silently.
context[self.var_name] = []
return ''
# List of dictionaries in the format:
# {'grouper': 'key', 'list': [list of contents]}.
context[self.var_name] = [
{'grouper': key, 'list': list(val)}
for key, val in
groupby(obj_list, lambda obj: self.resolve_expression(obj, context))
]
return ''
def include_is_allowed(filepath):
for root in settings.ALLOWED_INCLUDE_ROOTS:
if filepath.startswith(root):
return True
return False
class SsiNode(Node):
def __init__(self, filepath, parsed):
self.filepath = filepath
self.parsed = parsed
def render(self, context):
filepath = self.filepath.resolve(context)
if not include_is_allowed(filepath):
if settings.DEBUG:
return "[Didn't have permission to include file]"
else:
return '' # Fail silently for invalid includes.
try:
with open(filepath, 'r') as fp:
output = fp.read()
except IOError:
output = ''
if self.parsed:
try:
t = Template(output, name=filepath)
return t.render(context)
except TemplateSyntaxError as e:
if settings.DEBUG:
return "[Included template had syntax error: %s]" % e
else:
return '' # Fail silently for invalid included templates.
return output
class LoadNode(Node):
def render(self, context):
return ''
class NowNode(Node):
def __init__(self, format_string):
self.format_string = format_string
def render(self, context):
tzinfo = timezone.get_current_timezone() if settings.USE_TZ else None
return date(datetime.now(tz=tzinfo), self.format_string)
class SpacelessNode(Node):
def __init__(self, nodelist):
self.nodelist = nodelist
def render(self, context):
from django.utils.html import strip_spaces_between_tags
return strip_spaces_between_tags(self.nodelist.render(context).strip())
class TemplateTagNode(Node):
mapping = {'openblock': BLOCK_TAG_START,
'closeblock': BLOCK_TAG_END,
'openvariable': VARIABLE_TAG_START,
'closevariable': VARIABLE_TAG_END,
'openbrace': SINGLE_BRACE_START,
'closebrace': SINGLE_BRACE_END,
'opencomment': COMMENT_TAG_START,
'closecomment': COMMENT_TAG_END,
}
def __init__(self, tagtype):
self.tagtype = tagtype
def render(self, context):
return self.mapping.get(self.tagtype, '')
class URLNode(Node):
def __init__(self, view_name, args, kwargs, asvar):
self.view_name = view_name
self.args = args
self.kwargs = kwargs
self.asvar = asvar
def render(self, context):
from django.core.urlresolvers import reverse, NoReverseMatch
args = [arg.resolve(context) for arg in self.args]
kwargs = dict([(smart_text(k, 'ascii'), v.resolve(context))
for k, v in self.kwargs.items()])
view_name = self.view_name.resolve(context)
if not view_name:
raise NoReverseMatch("'url' requires a non-empty first argument. "
"The syntax changed in Django 1.5, see the docs.")
# Try to look up the URL twice: once given the view name, and again
# relative to what we guess is the "main" app. If they both fail,
# re-raise the NoReverseMatch unless we're using the
# {% url ... as var %} construct in which case return nothing.
url = ''
try:
url = reverse(view_name, args=args, kwargs=kwargs, current_app=context.current_app)
except NoReverseMatch as e:
if settings.SETTINGS_MODULE:
project_name = settings.SETTINGS_MODULE.split('.')[0]
try:
url = reverse(project_name + '.' + view_name,
args=args, kwargs=kwargs,
current_app=context.current_app)
except NoReverseMatch:
if self.asvar is None:
# Re-raise the original exception, not the one with
# the path relative to the project. This makes a
# better error message.
raise e
else:
if self.asvar is None:
raise e
if self.asvar:
context[self.asvar] = url
return ''
else:
return url
class VerbatimNode(Node):
def __init__(self, content):
self.content = content
def render(self, context):
return self.content
class WidthRatioNode(Node):
def __init__(self, val_expr, max_expr, max_width):
self.val_expr = val_expr
self.max_expr = max_expr
self.max_width = max_width
def render(self, context):
try:
value = self.val_expr.resolve(context)
max_value = self.max_expr.resolve(context)
max_width = int(self.max_width.resolve(context))
except VariableDoesNotExist:
return ''
except (ValueError, TypeError):
raise TemplateSyntaxError("widthratio final argument must be an number")
try:
value = float(value)
max_value = float(max_value)
ratio = (value / max_value) * max_width
except ZeroDivisionError:
return '0'
except (ValueError, TypeError):
return ''
return str(int(round(ratio)))
class WithNode(Node):
def __init__(self, var, name, nodelist, extra_context=None):
self.nodelist = nodelist
# var and name are legacy attributes, being left in case they are used
# by third-party subclasses of this Node.
self.extra_context = extra_context or {}
if name:
self.extra_context[name] = var
def __repr__(self):
return "<WithNode>"
def render(self, context):
values = dict([(key, val.resolve(context)) for key, val in
six.iteritems(self.extra_context)])
context.update(values)
output = self.nodelist.render(context)
context.pop()
return output
@register.tag
def autoescape(parser, token):
"""
Force autoescape behavior for this block.
"""
args = token.contents.split()
if len(args) != 2:
raise TemplateSyntaxError("'autoescape' tag requires exactly one argument.")
arg = args[1]
if arg not in ('on', 'off'):
raise TemplateSyntaxError("'autoescape' argument should be 'on' or 'off'")
nodelist = parser.parse(('endautoescape',))
parser.delete_first_token()
return AutoEscapeControlNode((arg == 'on'), nodelist)
@register.tag
def comment(parser, token):
"""
Ignores everything between ``{% comment %}`` and ``{% endcomment %}``.
"""
parser.skip_past('endcomment')
return CommentNode()
@register.tag
def cycle(parser, token):
"""
Cycles among the given strings each time this tag is encountered.
Within a loop, cycles among the given strings each time through
the loop::
{% for o in some_list %}
<tr class="{% cycle 'row1' 'row2' %}">
...
</tr>
{% endfor %}
Outside of a loop, give the values a unique name the first time you call
it, then use that name each sucessive time through::
<tr class="{% cycle 'row1' 'row2' 'row3' as rowcolors %}">...</tr>
<tr class="{% cycle rowcolors %}">...</tr>
<tr class="{% cycle rowcolors %}">...</tr>
You can use any number of values, separated by spaces. Commas can also
be used to separate values; if a comma is used, the cycle values are
interpreted as literal strings.
The optional flag "silent" can be used to prevent the cycle declaration
from returning any value::
{% for o in some_list %}
{% cycle 'row1' 'row2' as rowcolors silent %}
<tr class="{{ rowcolors }}">{% include "subtemplate.html " %}</tr>
{% endfor %}
"""
# Note: This returns the exact same node on each {% cycle name %} call;
# that is, the node object returned from {% cycle a b c as name %} and the
# one returned from {% cycle name %} are the exact same object. This
# shouldn't cause problems (heh), but if it does, now you know.
#
# Ugly hack warning: This stuffs the named template dict into parser so
# that names are only unique within each template (as opposed to using
# a global variable, which would make cycle names have to be unique across
# *all* templates.
args = token.split_contents()
if len(args) < 2:
raise TemplateSyntaxError("'cycle' tag requires at least two arguments")
if ',' in args[1]:
# Backwards compatibility: {% cycle a,b %} or {% cycle a,b as foo %}
# case.
args[1:2] = ['"%s"' % arg for arg in args[1].split(",")]
if len(args) == 2:
# {% cycle foo %} case.
name = args[1]
if not hasattr(parser, '_namedCycleNodes'):
raise TemplateSyntaxError("No named cycles in template. '%s' is not defined" % name)
if not name in parser._namedCycleNodes:
raise TemplateSyntaxError("Named cycle '%s' does not exist" % name)
return parser._namedCycleNodes[name]
as_form = False
if len(args) > 4:
# {% cycle ... as foo [silent] %} case.
if args[-3] == "as":
if args[-1] != "silent":
raise TemplateSyntaxError("Only 'silent' flag is allowed after cycle's name, not '%s'." % args[-1])
as_form = True
silent = True
args = args[:-1]
elif args[-2] == "as":
as_form = True
silent = False
if as_form:
name = args[-1]
values = [parser.compile_filter(arg) for arg in args[1:-2]]
node = CycleNode(values, name, silent=silent)
if not hasattr(parser, '_namedCycleNodes'):
parser._namedCycleNodes = {}
parser._namedCycleNodes[name] = node
else:
values = [parser.compile_filter(arg) for arg in args[1:]]
node = CycleNode(values)
return node
@register.tag
def csrf_token(parser, token):
return CsrfTokenNode()
@register.tag
def debug(parser, token):
"""
Outputs a whole load of debugging information, including the current
context and imported modules.
Sample usage::
<pre>
{% debug %}
</pre>
"""
return DebugNode()
@register.tag('filter')
def do_filter(parser, token):
"""
Filters the contents of the block through variable filters.
Filters can also be piped through each other, and they can have
arguments -- just like in variable syntax.
Sample usage::
{% filter force_escape|lower %}
This text will be HTML-escaped, and will appear in lowercase.
{% endfilter %}
Note that the ``escape`` and ``safe`` filters are not acceptable arguments.
Instead, use the ``autoescape`` tag to manage autoescaping for blocks of
template code.
"""
_, rest = token.contents.split(None, 1)
filter_expr = parser.compile_filter("var|%s" % (rest))
for func, unused in filter_expr.filters:
if getattr(func, '_decorated_function', func).__name__ in ('escape', 'safe'):
raise TemplateSyntaxError('"filter %s" is not permitted. Use the "autoescape" tag instead.' % func.__name__)
nodelist = parser.parse(('endfilter',))
parser.delete_first_token()
return FilterNode(filter_expr, nodelist)
@register.tag
def firstof(parser, token):
"""
Outputs the first variable passed that is not False, without escaping.
Outputs nothing if all the passed variables are False.
Sample usage::
{% firstof var1 var2 var3 %}
This is equivalent to::
{% if var1 %}
{{ var1|safe }}
{% else %}{% if var2 %}
{{ var2|safe }}
{% else %}{% if var3 %}
{{ var3|safe }}
{% endif %}{% endif %}{% endif %}
but obviously much cleaner!
You can also use a literal string as a fallback value in case all
passed variables are False::
{% firstof var1 var2 var3 "fallback value" %}
If you want to escape the output, use a filter tag::
{% filter force_escape %}
{% firstof var1 var2 var3 "fallback value" %}
{% endfilter %}
"""
bits = token.split_contents()[1:]
if len(bits) < 1:
raise TemplateSyntaxError("'firstof' statement requires at least one argument")
return FirstOfNode([parser.compile_filter(bit) for bit in bits])
@register.tag('for')
def do_for(parser, token):
"""
Loops over each item in an array.
For example, to display a list of athletes given ``athlete_list``::
<ul>
{% for athlete in athlete_list %}
<li>{{ athlete.name }}</li>
{% endfor %}
</ul>
You can loop over a list in reverse by using
``{% for obj in list reversed %}``.
You can also unpack multiple values from a two-dimensional array::
{% for key,value in dict.items %}
{{ key }}: {{ value }}
{% endfor %}
The ``for`` tag can take an optional ``{% empty %}`` clause that will
be displayed if the given array is empty or could not be found::
<ul>
{% for athlete in athlete_list %}
<li>{{ athlete.name }}</li>
{% empty %}
<li>Sorry, no athletes in this list.</li>
{% endfor %}
<ul>
The above is equivalent to -- but shorter, cleaner, and possibly faster
than -- the following::
<ul>
{% if althete_list %}
{% for athlete in athlete_list %}
<li>{{ athlete.name }}</li>
{% endfor %}
{% else %}
<li>Sorry, no athletes in this list.</li>
{% endif %}
</ul>
The for loop sets a number of variables available within the loop:
========================== ================================================
Variable Description
========================== ================================================
``forloop.counter`` The current iteration of the loop (1-indexed)
``forloop.counter0`` The current iteration of the loop (0-indexed)
``forloop.revcounter`` The number of iterations from the end of the
loop (1-indexed)
``forloop.revcounter0`` The number of iterations from the end of the
loop (0-indexed)
``forloop.first`` True if this is the first time through the loop
``forloop.last`` True if this is the last time through the loop
``forloop.parentloop`` For nested loops, this is the loop "above" the
current one
========================== ================================================
"""
bits = token.contents.split()
if len(bits) < 4:
raise TemplateSyntaxError("'for' statements should have at least four"
" words: %s" % token.contents)
is_reversed = bits[-1] == 'reversed'
in_index = is_reversed and -3 or -2
if bits[in_index] != 'in':
raise TemplateSyntaxError("'for' statements should use the format"
" 'for x in y': %s" % token.contents)
loopvars = re.split(r' *, *', ' '.join(bits[1:in_index]))
for var in loopvars:
if not var or ' ' in var:
raise TemplateSyntaxError("'for' tag received an invalid argument:"
" %s" % token.contents)
sequence = parser.compile_filter(bits[in_index+1])
nodelist_loop = parser.parse(('empty', 'endfor',))
token = parser.next_token()
if token.contents == 'empty':
nodelist_empty = parser.parse(('endfor',))
parser.delete_first_token()
else:
nodelist_empty = None
return ForNode(loopvars, sequence, is_reversed, nodelist_loop, nodelist_empty)
def do_ifequal(parser, token, negate):
bits = list(token.split_contents())
if len(bits) != 3:
raise TemplateSyntaxError("%r takes two arguments" % bits[0])
end_tag = 'end' + bits[0]
nodelist_true = parser.parse(('else', end_tag))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse((end_tag,))
parser.delete_first_token()
else:
nodelist_false = NodeList()
val1 = parser.compile_filter(bits[1])
val2 = parser.compile_filter(bits[2])
return IfEqualNode(val1, val2, nodelist_true, nodelist_false, negate)
@register.tag
def ifequal(parser, token):
"""
Outputs the contents of the block if the two arguments equal each other.
Examples::
{% ifequal user.id comment.user_id %}
...
{% endifequal %}
{% ifnotequal user.id comment.user_id %}
...
{% else %}
...
{% endifnotequal %}
"""
return do_ifequal(parser, token, False)
@register.tag
def ifnotequal(parser, token):
"""
Outputs the contents of the block if the two arguments are not equal.
See ifequal.
"""
return do_ifequal(parser, token, True)
class TemplateLiteral(Literal):
def __init__(self, value, text):
self.value = value
self.text = text # for better error messages
def display(self):
return self.text
def eval(self, context):
return self.value.resolve(context, ignore_failures=True)
class TemplateIfParser(IfParser):
error_class = TemplateSyntaxError
def __init__(self, parser, *args, **kwargs):
self.template_parser = parser
super(TemplateIfParser, self).__init__(*args, **kwargs)
def create_var(self, value):
return TemplateLiteral(self.template_parser.compile_filter(value), value)
@register.tag('if')
def do_if(parser, token):
"""
The ``{% if %}`` tag evaluates a variable, and if that variable is "true"
(i.e., exists, is not empty, and is not a false boolean value), the
contents of the block are output:
::
{% if athlete_list %}
Number of athletes: {{ athlete_list|count }}
{% elif athlete_in_locker_room_list %}
Athletes should be out of the locker room soon!
{% else %}
No athletes.
{% endif %}
In the above, if ``athlete_list`` is not empty, the number of athletes will
be displayed by the ``{{ athlete_list|count }}`` variable.
As you can see, the ``if`` tag may take one or several `` {% elif %}``
clauses, as well as an ``{% else %}`` clause that will be displayed if all
previous conditions fail. These clauses are optional.
``if`` tags may use ``or``, ``and`` or ``not`` to test a number of
variables or to negate a given variable::
{% if not athlete_list %}
There are no athletes.
{% endif %}
{% if athlete_list or coach_list %}
There are some athletes or some coaches.
{% endif %}
{% if athlete_list and coach_list %}
Both atheletes and coaches are available.
{% endif %}
{% if not athlete_list or coach_list %}
There are no athletes, or there are some coaches.
{% endif %}
{% if athlete_list and not coach_list %}
There are some athletes and absolutely no coaches.
{% endif %}
Comparison operators are also available, and the use of filters is also
allowed, for example::
{% if articles|length >= 5 %}...{% endif %}
Arguments and operators _must_ have a space between them, so
``{% if 1>2 %}`` is not a valid if tag.
All supported operators are: ``or``, ``and``, ``in``, ``not in``
``==`` (or ``=``), ``!=``, ``>``, ``>=``, ``<`` and ``<=``.
Operator precedence follows Python.
"""
# {% if ... %}
bits = token.split_contents()[1:]
condition = TemplateIfParser(parser, bits).parse()
nodelist = parser.parse(('elif', 'else', 'endif'))
conditions_nodelists = [(condition, nodelist)]
token = parser.next_token()
# {% elif ... %} (repeatable)
while token.contents.startswith('elif'):
bits = token.split_contents()[1:]
condition = TemplateIfParser(parser, bits).parse()
nodelist = parser.parse(('elif', 'else', 'endif'))
conditions_nodelists.append((condition, nodelist))
token = parser.next_token()
# {% else %} (optional)
if token.contents == 'else':
nodelist = parser.parse(('endif',))
conditions_nodelists.append((None, nodelist))
token = parser.next_token()
# {% endif %}
assert token.contents == 'endif'
return IfNode(conditions_nodelists)
@register.tag
def ifchanged(parser, token):
"""
Checks if a value has changed from the last iteration of a loop.
The ``{% ifchanged %}`` block tag is used within a loop. It has two
possible uses.
1. Checks its own rendered contents against its previous state and only
displays the content if it has changed. For example, this displays a
list of days, only displaying the month if it changes::
<h1>Archive for {{ year }}</h1>
{% for date in days %}
{% ifchanged %}<h3>{{ date|date:"F" }}</h3>{% endifchanged %}
<a href="{{ date|date:"M/d"|lower }}/">{{ date|date:"j" }}</a>
{% endfor %}
2. If given one or more variables, check whether any variable has changed.
For example, the following shows the date every time it changes, while
showing the hour if either the hour or the date has changed::
{% for date in days %}
{% ifchanged date.date %} {{ date.date }} {% endifchanged %}
{% ifchanged date.hour date.date %}
{{ date.hour }}
{% endifchanged %}
{% endfor %}
"""
bits = token.contents.split()
nodelist_true = parser.parse(('else', 'endifchanged'))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse(('endifchanged',))
parser.delete_first_token()
else:
nodelist_false = NodeList()
values = [parser.compile_filter(bit) for bit in bits[1:]]
return IfChangedNode(nodelist_true, nodelist_false, *values)
@register.tag
def ssi(parser, token):
"""
Outputs the contents of a given file into the page.
Like a simple "include" tag, the ``ssi`` tag includes the contents
of another file -- which must be specified using an absolute path --
in the current page::
{% ssi "/home/html/ljworld.com/includes/right_generic.html" %}
If the optional "parsed" parameter is given, the contents of the included
file are evaluated as template code, with the current context::
{% ssi "/home/html/ljworld.com/includes/right_generic.html" parsed %}
"""
bits = token.split_contents()
parsed = False
if len(bits) not in (2, 3):
raise TemplateSyntaxError("'ssi' tag takes one argument: the path to"
" the file to be included")
if len(bits) == 3:
if bits[2] == 'parsed':
parsed = True
else:
raise TemplateSyntaxError("Second (optional) argument to %s tag"
" must be 'parsed'" % bits[0])
filepath = parser.compile_filter(bits[1])
return SsiNode(filepath, parsed)
@register.tag
def load(parser, token):
"""
Loads a custom template tag set.
For example, to load the template tags in
``django/templatetags/news/photos.py``::
{% load news.photos %}
Can also be used to load an individual tag/filter from
a library::
{% load byline from news %}
"""
bits = token.contents.split()
if len(bits) >= 4 and bits[-2] == "from":
try:
taglib = bits[-1]
lib = get_library(taglib)
except InvalidTemplateLibrary as e:
raise TemplateSyntaxError("'%s' is not a valid tag library: %s" %
(taglib, e))
else:
temp_lib = Library()
for name in bits[1:-2]:
if name in lib.tags:
temp_lib.tags[name] = lib.tags[name]
# a name could be a tag *and* a filter, so check for both
if name in lib.filters:
temp_lib.filters[name] = lib.filters[name]
elif name in lib.filters:
temp_lib.filters[name] = lib.filters[name]
else:
raise TemplateSyntaxError("'%s' is not a valid tag or filter in tag library '%s'" %
(name, taglib))
parser.add_library(temp_lib)
else:
for taglib in bits[1:]:
# add the library to the parser
try:
lib = get_library(taglib)
parser.add_library(lib)
except InvalidTemplateLibrary as e:
raise TemplateSyntaxError("'%s' is not a valid tag library: %s" %
(taglib, e))
return LoadNode()
@register.tag
def now(parser, token):
"""
Displays the date, formatted according to the given string.
Uses the same format as PHP's ``date()`` function; see http://php.net/date
for all the possible values.
Sample usage::
It is {% now "jS F Y H:i" %}
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'now' statement takes one argument")
format_string = bits[1][1:-1]
return NowNode(format_string)
@register.tag
def regroup(parser, token):
"""
Regroups a list of alike objects by a common attribute.
This complex tag is best illustrated by use of an example: say that
``people`` is a list of ``Person`` objects that have ``first_name``,
``last_name``, and ``gender`` attributes, and you'd like to display a list
that looks like:
* Male:
* George Bush
* Bill Clinton
* Female:
* Margaret Thatcher
* Colendeeza Rice
* Unknown:
* Pat Smith
The following snippet of template code would accomplish this dubious task::
{% regroup people by gender as grouped %}
<ul>
{% for group in grouped %}
<li>{{ group.grouper }}
<ul>
{% for item in group.list %}
<li>{{ item }}</li>
{% endfor %}
</ul>
{% endfor %}
</ul>
As you can see, ``{% regroup %}`` populates a variable with a list of
objects with ``grouper`` and ``list`` attributes. ``grouper`` contains the
item that was grouped by; ``list`` contains the list of objects that share
that ``grouper``. In this case, ``grouper`` would be ``Male``, ``Female``
and ``Unknown``, and ``list`` is the list of people with those genders.
Note that ``{% regroup %}`` does not work when the list to be grouped is not
sorted by the key you are grouping by! This means that if your list of
people was not sorted by gender, you'd need to make sure it is sorted
before using it, i.e.::
{% regroup people|dictsort:"gender" by gender as grouped %}
"""
firstbits = token.contents.split(None, 3)
if len(firstbits) != 4:
raise TemplateSyntaxError("'regroup' tag takes five arguments")
target = parser.compile_filter(firstbits[1])
if firstbits[2] != 'by':
raise TemplateSyntaxError("second argument to 'regroup' tag must be 'by'")
lastbits_reversed = firstbits[3][::-1].split(None, 2)
if lastbits_reversed[1][::-1] != 'as':
raise TemplateSyntaxError("next-to-last argument to 'regroup' tag must"
" be 'as'")
var_name = lastbits_reversed[0][::-1]
# RegroupNode will take each item in 'target', put it in the context under
# 'var_name', evaluate 'var_name'.'expression' in the current context, and
# group by the resulting value. After all items are processed, it will
# save the final result in the context under 'var_name', thus clearing the
# temporary values. This hack is necessary because the template engine
# doesn't provide a context-aware equivalent of Python's getattr.
expression = parser.compile_filter(var_name +
VARIABLE_ATTRIBUTE_SEPARATOR +
lastbits_reversed[2][::-1])
return RegroupNode(target, expression, var_name)
@register.tag
def spaceless(parser, token):
"""
Removes whitespace between HTML tags, including tab and newline characters.
Example usage::
{% spaceless %}
<p>
<a href="foo/">Foo</a>
</p>
{% endspaceless %}
This example would return this HTML::
<p><a href="foo/">Foo</a></p>
Only space between *tags* is normalized -- not space between tags and text.
In this example, the space around ``Hello`` won't be stripped::
{% spaceless %}
<strong>
Hello
</strong>
{% endspaceless %}
"""
nodelist = parser.parse(('endspaceless',))
parser.delete_first_token()
return SpacelessNode(nodelist)
@register.tag
def templatetag(parser, token):
"""
Outputs one of the bits used to compose template tags.
Since the template system has no concept of "escaping", to display one of
the bits used in template tags, you must use the ``{% templatetag %}`` tag.
The argument tells which template bit to output:
================== =======
Argument Outputs
================== =======
``openblock`` ``{%``
``closeblock`` ``%}``
``openvariable`` ``{{``
``closevariable`` ``}}``
``openbrace`` ``{``
``closebrace`` ``}``
``opencomment`` ``{#``
``closecomment`` ``#}``
================== =======
"""
bits = token.contents.split()
if len(bits) != 2:
raise TemplateSyntaxError("'templatetag' statement takes one argument")
tag = bits[1]
if tag not in TemplateTagNode.mapping:
raise TemplateSyntaxError("Invalid templatetag argument: '%s'."
" Must be one of: %s" %
(tag, list(TemplateTagNode.mapping)))
return TemplateTagNode(tag)
@register.tag
def url(parser, token):
"""
Returns an absolute URL matching given view with its parameters.
This is a way to define links that aren't tied to a particular URL
configuration::
{% url "path.to.some_view" arg1 arg2 %}
or
{% url "path.to.some_view" name1=value1 name2=value2 %}
The first argument is a path to a view. It can be an absolute Python path
or just ``app_name.view_name`` without the project name if the view is
located inside the project.
Other arguments are space-separated values that will be filled in place of
positional and keyword arguments in the URL. Don't mix positional and
keyword arguments.
All arguments for the URL should be present.
For example if you have a view ``app_name.client`` taking client's id and
the corresponding line in a URLconf looks like this::
('^client/(\d+)/$', 'app_name.client')
and this app's URLconf is included into the project's URLconf under some
path::
('^clients/', include('project_name.app_name.urls'))
then in a template you can create a link for a certain client like this::
{% url "app_name.client" client.id %}
The URL will look like ``/clients/client/123/``.
The first argument can also be a named URL instead of the Python path to
the view callable. For example if the URLconf entry looks like this::
url('^client/(\d+)/$', name='client-detail-view')
then in the template you can use::
{% url "client-detail-view" client.id %}
There is even another possible value type for the first argument. It can be
the name of a template variable that will be evaluated to obtain the view
name or the URL name, e.g.::
{% with view_path="app_name.client" %}
{% url view_path client.id %}
{% endwith %}
or,
{% with url_name="client-detail-view" %}
{% url url_name client.id %}
{% endwith %}
"""
bits = token.split_contents()
if len(bits) < 2:
raise TemplateSyntaxError("'%s' takes at least one argument"
" (path to a view)" % bits[0])
try:
viewname = parser.compile_filter(bits[1])
except TemplateSyntaxError as exc:
exc.args = (exc.args[0] + ". "
"The syntax of 'url' changed in Django 1.5, see the docs."),
raise
args = []
kwargs = {}
asvar = None
bits = bits[2:]
if len(bits) >= 2 and bits[-2] == 'as':
asvar = bits[-1]
bits = bits[:-2]
if len(bits):
for bit in bits:
match = kwarg_re.match(bit)
if not match:
raise TemplateSyntaxError("Malformed arguments to url tag")
name, value = match.groups()
if name:
kwargs[name] = parser.compile_filter(value)
else:
args.append(parser.compile_filter(value))
return URLNode(viewname, args, kwargs, asvar)
@register.tag
def verbatim(parser, token):
"""
Stops the template engine from rendering the contents of this block tag.
Usage::
{% verbatim %}
{% don't process this %}
{% endverbatim %}
You can also designate a specific closing tag block (allowing the
unrendered use of ``{% endverbatim %}``)::
{% verbatim myblock %}
...
{% endverbatim myblock %}
"""
nodelist = parser.parse(('endverbatim',))
parser.delete_first_token()
return VerbatimNode(nodelist.render(Context()))
@register.tag
def widthratio(parser, token):
"""
For creating bar charts and such, this tag calculates the ratio of a given
value to a maximum value, and then applies that ratio to a constant.
For example::
<img src='bar.gif' height='10' width='{% widthratio this_value max_value max_width %}' />
If ``this_value`` is 175, ``max_value`` is 200, and ``max_width`` is 100,
the image in the above example will be 88 pixels wide
(because 175/200 = .875; .875 * 100 = 87.5 which is rounded up to 88).
"""
bits = token.contents.split()
if len(bits) != 4:
raise TemplateSyntaxError("widthratio takes three arguments")
tag, this_value_expr, max_value_expr, max_width = bits
return WidthRatioNode(parser.compile_filter(this_value_expr),
parser.compile_filter(max_value_expr),
parser.compile_filter(max_width))
@register.tag('with')
def do_with(parser, token):
"""
Adds one or more values to the context (inside of this block) for caching
and easy access.
For example::
{% with total=person.some_sql_method %}
{{ total }} object{{ total|pluralize }}
{% endwith %}
Multiple values can be added to the context::
{% with foo=1 bar=2 %}
...
{% endwith %}
The legacy format of ``{% with person.some_sql_method as total %}`` is
still accepted.
"""
bits = token.split_contents()
remaining_bits = bits[1:]
extra_context = token_kwargs(remaining_bits, parser, support_legacy=True)
if not extra_context:
raise TemplateSyntaxError("%r expected at least one variable "
"assignment" % bits[0])
if remaining_bits:
raise TemplateSyntaxError("%r received an invalid token: %r" %
(bits[0], remaining_bits[0]))
nodelist = parser.parse(('endwith',))
parser.delete_first_token()
return WithNode(None, None, nodelist, extra_context=extra_context)
| gpl-2.0 |
noba3/KoTos | addons/plugin.video.phstreams/resources/lib/sources/dizibox_tv.py | 7 | 6433 | # -*- coding: utf-8 -*-
'''
Genesis Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse
from resources.lib.libraries import cleantitle
from resources.lib.libraries import client
from resources.lib.libraries import cache
from resources.lib import resolvers
class source:
def __init__(self):
self.base_link = 'http://www.dizibox.com'
def dizibox_shows(self):
try:
result = client.source(self.base_link)
result = client.parseDOM(result, 'input', {'id': 'filterAllCategories'})[0]
result = client.parseDOM(result, 'li')
result = zip(client.parseDOM(result, 'a', ret='href'), client.parseDOM(result, 'a'))
result = [(re.sub('http.+?//.+?/','/', i[0]), cleantitle.tv(i[1])) for i in result]
return result
except:
return
def get_show(self, imdb, tvdb, tvshowtitle, year):
try:
result = cache.get(self.dizibox_shows, 72)
tvshowtitle = cleantitle.tv(tvshowtitle)
result = [i[0] for i in result if tvshowtitle == i[1]][0]
try: url = re.compile('//.+?(/.+)').findall(result)[0]
except: url = result
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def get_episode(self, url, imdb, tvdb, title, date, season, episode):
try:
if url == None: return
url = urlparse.urljoin(self.base_link, url)
season, episode = '%01d' % int(season), '%01d' % int(episode)
result = client.source(url)
if not season == '1':
url = client.parseDOM(result, 'a', ret='href', attrs = {'class': 'season-.+?'})
url = [i for i in url if '/%s-sezon-' % season in i][0]
result = client.source(url)
result = client.parseDOM(result, 'a', ret='href')
result = [i for i in result if '%s-sezon-%s-bolum-' % (season, episode) in i][0]
try: url = re.compile('//.+?(/.+)').findall(result)[0]
except: url = result
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def get_sources(self, url, hosthdDict, hostDict, locDict):
try:
sources = []
if url == None: return sources
sources_url = urlparse.urljoin(self.base_link, url)
result = client.source(sources_url, close=False)
result = re.sub(r'[^\x00-\x7F]+','', result)
result = re.compile('(<option.*?</option>)', re.DOTALL).findall(result)
result = [(client.parseDOM(i, 'option', ret='href'), client.parseDOM(i, 'option', ret='value'), client.parseDOM(i, 'option')) for i in result]
result = [i[0] + i[1] for i in result if len(i[2]) > 0 and i[2][0] == 'Altyazsz'][0][0]
url = urlparse.urljoin(self.base_link, result)
result = client.source(url, close=False)
url = client.parseDOM(result, 'span', attrs = {'class': 'object-wrapper'})[0]
url = client.parseDOM(url, 'iframe', ret='src')[0]
url = client.replaceHTMLCodes(url)
result = client.source(url, close=False)
try:
r = re.compile('"?file"?\s*:\s*"([^"]+)"\s*,\s*"?label"?\s*:\s*"(\d+)p?"').findall(result)
if r == []: raise Exception()
r = [(i[0].replace('\\/', '/').replace('\\&', '&').decode('unicode_escape'), int(i[1])) for i in r]
u = [('%s|User-Agent=%s&Referer=%s' % (i[0], urllib.quote_plus(client.agent()), urllib.quote_plus(sources_url)), i[1], 'Dizibox') for i in r if not 'google' in i[0]]
u += [(i[0], i[1], 'GVideo') for i in r if 'google' in i[0]]
try: sources.append({'source': [i[2] for i in u if i[1] >= 1080][0], 'quality': '1080p', 'provider': 'Dizibox', 'url': [i[0] for i in u if i[1] >= 1080][0]})
except: pass
try: sources.append({'source': [i[2] for i in u if 720 <= i[1] < 1080][0], 'quality': 'HD', 'provider': 'Dizibox', 'url': [i[0] for i in u if 720 <= i[1] < 1080][0]})
except: pass
try: sources.append({'source': [i[2] for i in u if i[1] < 720][0], 'quality': 'SD', 'provider': 'Dizibox', 'url': [i[0] for i in u if i[1] < 720][0]})
except: pass
return sources
except:
pass
try:
if '.dizibox.' in url: url = re.compile('location\.href\s*=\s*"(.+?)"').findall(result)[0]
host = urlparse.urlparse(url).netloc
host = host.replace('mail.ru', 'mailru.ru').rsplit('.', 1)[0].split('.')[-1].lower()
strm = resolvers.request(url)
if strm == url or strm == None: raise Exception()
if type(strm) == list:
for i in strm: sources.append({'source': host, 'quality': i['quality'], 'provider': 'Dizibox', 'url': i['url']})
else:
sources.append({'source': host, 'quality': 'HD', 'provider': 'Dizibox', 'url': strm})
return sources
except:
pass
except:
return sources
def resolve(self, url):
try:
if not 'google' in url: return url
if url.startswith('stack://'): return url
url = client.request(url, output='geturl')
if 'requiressl=yes' in url: url = url.replace('http://', 'https://')
else: url = url.replace('https://', 'http://')
return url
except:
return
| gpl-2.0 |
isaiah1782-cmis/isaiah1782-cmis-cs2 | cs2quiz2.py | 1 | 2232 | #PART 1: Terminology
#1) Give 3 examples of boolean expressions.
#a)
# >>>bool(1)
# >>>True
#b)
# >>>bool(0)
# >>>False
#c)
# >>>p = False
# >>>bool(p)
# >>>False
#
#2) What does 'return' do?
#
#The retrun command puts a variable's assigned value, that you assigned in a function, for the function so that you can call the function's value in a variable without running the function again.
#
#3) What are 2 ways indentation is important in python code?
#
#Indentation is important in python code because:
#a)It can tell whenever something is a part of something, like a function.
#b)It can seperate two different things that wern't meant to be together.
#
#PART 2: Reading
#Type the values for 12 of the 16 of the variables below.
#
#problem1_a) -36
#problem1_b) -9
#problem1_c) square root of 0 times negative 1 (0)
#problem1_d) -(abs(-5))
#
#problem2_a) True
#problem2_b) False
#problem2_c) False
#problem2_d) False
#
#problem3_a) 0.3
#problem3_b) 0.5
#problem3_c) 0.5
#problem3_d) 0.5
#
#problem4_a) 7
#problem4_b) 5
#problem4_c)
#problem4_d)
#
#PART 3: Programming
#Write a script that asks the user to type in 3 different numbers.
#If the user types 3 different numbers the script should then print out the
#largest of the 3 numbers.
#If they don't, it should print a message telling them they didn't follow
#the directions.
#Be sure to use the program structure you've learned (main function, processing function, output function)
import math
print "Type in 3 different numbers (decimals are OK!)"
a = float(raw_input("A: "))
b = float(raw_input("B: "))
c = float(raw_input("C: "))
def if_numbers_were_the_same():
if a == b and a == c or a == c or a == b:
exit("You didn't follow directions")
elif b == a and b == c or b == a or b == c:
exit("You didn't follow directions")
elif c == a and c == b or c == a or c == b:
exit("You didn't follow directions")
def main():
if a > b and a > c:
output = "The largest number was " + str(a)
elif b > a and b > c:
output = "The largest number was " + str(b)
elif c > a and c > b:
output = "The largest number was " + str(c)
return output
if_numbers_were_the_same()
x = main()
print x
| cc0-1.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.