repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
bw4sz/MotionMeerkat_Bisque
|
MotionMeerkat/BackgroundSubtractor.py
|
1
|
7362
|
####Background subtraction for Motion class
import numpy as np
import cv2
#takes in the constructor and the method chosen
class Background:
def __init__(self,subMethod,display_image,acc,thresh,moghistory,mogvariance):
##Subtractor Method
self.subMethod=subMethod
####Create Background Constructor
if self.subMethod in ["Acc","Both"]:
self.running_average_image = np.float32(display_image)
self.accAvg=acc
self.threshT=thresh
if self.subMethod in ["MOG","Both"]:
#MOG method creator
self.fgbg = cv2.createBackgroundSubtractorMOG2(history=moghistory, detectShadows=False)
if self.subMethod=="KNN":
#MOG method creator
self.fgbg = cv2.createBackgroundSubtractorKNN()
#Frame Subtraction
def BackGroundSub(self,camera_imageROI):
## accumulated averaging
if self.subMethod in ["Acc","Both"]:
# Create an image with interactive feedback:
self.display_image = camera_imageROI.copy()
# Create a working "color image" to modify / blur
self.color_image = self.display_image.copy()
# Smooth to get rid of false positives
self.color_image = cv2.GaussianBlur(self.color_image,(3,3),0)
# Use the Running Average as the static background
cv2.accumulateWeighted(self.color_image,self.running_average_image,self.accAvg)
self.running_average_in_display_color_depth = cv2.convertScaleAbs(self.running_average_image)
# Subtract the current frame from the moving average.
self.difference=cv2.absdiff( self.color_image, self.running_average_in_display_color_depth)
#if vis: display("difference",5000,difference)
# Convert the image to greyscale.
self.grey_image=cv2.cvtColor( self.difference,cv2.COLOR_BGR2GRAY)
# Threshold the image to a black and white motion mask:
ret,self.grey_image = cv2.threshold(self.grey_image, self.threshT, 255, cv2.THRESH_BINARY )
##Mixture of Gaussians
if self.subMethod in ["MOG","KNN","Both"]:
self.grey_image = self.fgbg.apply(camera_imageROI)
#Dilate the areas to merge bounded objects
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(7,7))
self.grey_image= cv2.morphologyEx(self.grey_image, cv2.MORPH_OPEN, kernel)
return(self.grey_image)
def contourFilter(grey_image,minSize,ROI_include):
global display_image, camera_imageO
points = [] # Was using this to hold either pixel coords or polygon coords.
bounding_box_list = []
# Now calculate movements using the white pixels as "motion" data
_,contours,hierarchy = cv2.findContours(grey_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE )
if len(contours) == 0 :
#No movement, add to counter
self.nocountr=self.nocountr+1
#NoMotion flag
noMotion=True
return("Empty")
cnt=contours[0]
len(cnt)
drawing = np.uint8(display_image)
for cnt in contours:
bounding_rect = cv2.boundingRect( cnt )
point1 = ( bounding_rect[0], bounding_rect[1] )
point2 = ( bounding_rect[0] + bounding_rect[2], bounding_rect[1] + bounding_rect[3] )
bounding_box_list.append( ( point1, point2 ) )
# Find the average size of the bbox (targets), then
# remove any tiny bboxes (which are probably just noise).
# "Tiny" is defined as any box with 1/10th the area of the average box.
# This reduces false positives on tiny "sparkles" noise.
box_areas = []
for box in bounding_box_list:
box_width = box[right][0] - box[left][0]
box_height = box[bottom][0] - box[top][0]
box_areas.append( box_width * box_height )
average_box_area = 0.0
if len(box_areas): average_box_area = float( sum(box_areas) ) / len(box_areas)
trimmed_box_list = []
for box in bounding_box_list:
box_width = box[right][0] - box[left][0]
box_height = box[bottom][0] - box[top][0]
# Only keep the box if it's not a tiny noise box:
if (box_width * box_height) > average_box_area*.3:
trimmed_box_list.append( box )
#shapely does a much faster job of polygon union
#format into shapely bounding feature
shape_list=[]
## Centroids of each target
bound_center=[]
for out in trimmed_box_list:
sh_out=sg.box(out[0][0],out[0][1],out[1][0],out[1][1])
shape_list.append(sh_out)
#shape_pol=sg.MultiPolygon(shape_list)
casc=cascaded_union(shape_list).buffer(1)
if casc.type=="MultiPolygon":
#draw shapely bounds
for p in range(1,len(casc.geoms)):
b=casc.geoms[p].bounds
if casc.geoms[p].area > ((width * height) * (float(minSIZE/100))):
if ROI_include == "exclude":
cv2.rectangle(camera_imageO,(int(b[0]),int(b[1])),(int(b[2]),int(b[3])),(0,0,255),thickness=2)
#cv2.putText(camera_imageO, str(round(casc.geoms[p].area/(width * height),3)*100), (int(b[0]),int(b[1])),cv2.FONT_HERSHEY_COMPLEX,1,(0,0,0),1,-1)
else:
cv2.rectangle(display_image,(int(b[0]),int(b[1])),(int(b[2]),int(b[3])),(0,0,255),thickness=2)
#Return the centroid to list, rounded two decimals
x=round(casc.geoms[p].centroid.coords.xy[0][0],2)
y=round(casc.geoms[p].centroid.coords.xy[1][0],2)
bound_center.append((x,y))
else:
b=casc.bounds
#If bounding polygon is larger than the minsize, draw a rectangle
if casc.area > ((width * height) * (float(minSIZE/100))):
if ROI_include == "exclude":
cv2.rectangle(camera_imageO,(int(b[0]),int(b[1])),(int(b[2]),int(b[3])),(0,0,255),thickness=2)
#cv2.putText(camera_imageO, str(round(casc.area/(width * height),3)*100),(int(b[0]),int(b[1])),cv2.FONT_HERSHEY_COMPLEX,1,(255,255,255),1,-1)
else:
cv2.rectangle(display_image,(int(b[0]),int(b[1])),(int(b[2]),int(b[3])),(0,0,255),thickness=2)
x=round(casc.centroid.coords.xy[0][0],2)
y=round(casc.centroid.coords.xy[1][0],2)
bound_center.append((x,y))
#return bounding boxes and centers
|
gpl-3.0
|
electrolinux/weblate
|
weblate/trans/machine/glosbe.py
|
11
|
1996
|
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2015 Michal Čihař <[email protected]>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from weblate.trans.machine.base import MachineTranslation
from weblate import appsettings
class GlosbeTranslation(MachineTranslation):
'''
Glosbe machine translation support.
'''
name = 'Glosbe'
def convert_language(self, language):
'''
Converts language to service specific code.
'''
return language.replace('_', '-').split('-')[0].lower()
def is_supported(self, language):
'''
Any language is supported.
'''
return True
def download_translations(self, language, text, unit, user):
'''
Downloads list of possible translations from a service.
'''
params = {
'from': appsettings.SOURCE_LANGUAGE,
'dest': language,
'format': 'json',
'phrase': text.strip(',.:?! ').encode('utf-8')
}
response = self.json_req(
'http://glosbe.com/gapi/translate',
**params
)
if 'tuc' not in response:
return []
return [(match['phrase']['text'], 100, self.name, text)
for match in response['tuc']
if 'phrase' in match and match['phrase'] is not None]
|
gpl-3.0
|
jonasjberg/autonameow
|
autonameow/vendor/guessit/rules/properties/language.py
|
21
|
9861
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
language and subtitle_language properties
"""
# pylint: disable=no-member
import copy
import babelfish
from rebulk.remodule import re
from rebulk import Rebulk, Rule, RemoveMatch, RenameMatch
from ..common.words import iter_words, COMMON_WORDS
from ..common.validators import seps_surround
def language():
"""
Builder for rebulk object.
:return: Created Rebulk object
:rtype: Rebulk
"""
rebulk = Rebulk()
rebulk.string(*subtitle_prefixes, name="subtitle_language.prefix", ignore_case=True, private=True,
validator=seps_surround)
rebulk.string(*subtitle_suffixes, name="subtitle_language.suffix", ignore_case=True, private=True,
validator=seps_surround)
rebulk.functional(find_languages, properties={'language': [None]})
rebulk.rules(SubtitlePrefixLanguageRule, SubtitleSuffixLanguageRule, SubtitleExtensionRule)
return rebulk
COMMON_WORDS_STRICT = frozenset(['brazil'])
UNDETERMINED = babelfish.Language('und')
SYN = {('und', None): ['unknown', 'inconnu', 'unk', 'un'],
('ell', None): ['gr', 'greek'],
('spa', None): ['esp', 'español'],
('fra', None): ['français', 'vf', 'vff', 'vfi', 'vfq'],
('swe', None): ['se'],
('por', 'BR'): ['po', 'pb', 'pob', 'br', 'brazilian'],
('cat', None): ['català'],
('ces', None): ['cz'],
('ukr', None): ['ua'],
('zho', None): ['cn'],
('jpn', None): ['jp'],
('hrv', None): ['scr'],
('mul', None): ['multi', 'dl']} # http://scenelingo.wordpress.com/2009/03/24/what-does-dl-mean/
class GuessitConverter(babelfish.LanguageReverseConverter): # pylint: disable=missing-docstring
_with_country_regexp = re.compile(r'(.*)\((.*)\)')
_with_country_regexp2 = re.compile(r'(.*)-(.*)')
def __init__(self):
self.guessit_exceptions = {}
for (alpha3, country), synlist in SYN.items():
for syn in synlist:
self.guessit_exceptions[syn.lower()] = (alpha3, country, None)
@property
def codes(self): # pylint: disable=missing-docstring
return (babelfish.language_converters['alpha3b'].codes |
babelfish.language_converters['alpha2'].codes |
babelfish.language_converters['name'].codes |
babelfish.language_converters['opensubtitles'].codes |
babelfish.country_converters['name'].codes |
frozenset(self.guessit_exceptions.keys()))
def convert(self, alpha3, country=None, script=None):
return str(babelfish.Language(alpha3, country, script))
def reverse(self, name):
with_country = (GuessitConverter._with_country_regexp.match(name) or
GuessitConverter._with_country_regexp2.match(name))
name = name.lower()
if with_country:
lang = babelfish.Language.fromguessit(with_country.group(1).strip())
lang.country = babelfish.Country.fromguessit(with_country.group(2).strip())
return lang.alpha3, lang.country.alpha2 if lang.country else None, lang.script or None
# exceptions come first, as they need to override a potential match
# with any of the other guessers
try:
return self.guessit_exceptions[name]
except KeyError:
pass
for conv in [babelfish.Language,
babelfish.Language.fromalpha3b,
babelfish.Language.fromalpha2,
babelfish.Language.fromname,
babelfish.Language.fromopensubtitles]:
try:
reverse = conv(name)
return reverse.alpha3, reverse.country, reverse.script
except (ValueError, babelfish.LanguageReverseError):
pass
raise babelfish.LanguageReverseError(name)
babelfish.language_converters['guessit'] = GuessitConverter()
subtitle_both = ['sub', 'subs', 'subbed', 'custom subbed', 'custom subs', 'custom sub', 'customsubbed', 'customsubs',
'customsub']
subtitle_prefixes = subtitle_both + ['st', 'vost', 'subforced', 'fansub', 'hardsub']
subtitle_suffixes = subtitle_both + ['subforced', 'fansub', 'hardsub']
lang_prefixes = ['true']
all_lang_prefixes_suffixes = subtitle_prefixes + subtitle_suffixes + lang_prefixes
def find_languages(string, context=None):
"""Find languages in the string
:return: list of tuple (property, Language, lang_word, word)
"""
allowed_languages = context.get('allowed_languages')
common_words = COMMON_WORDS_STRICT if allowed_languages else COMMON_WORDS
matches = []
for word_match in iter_words(string):
word = word_match.value
start, end = word_match.span
lang_word = word.lower()
key = 'language'
for prefix in subtitle_prefixes:
if lang_word.startswith(prefix):
lang_word = lang_word[len(prefix):]
key = 'subtitle_language'
for suffix in subtitle_suffixes:
if lang_word.endswith(suffix):
lang_word = lang_word[:len(lang_word) - len(suffix)]
key = 'subtitle_language'
for prefix in lang_prefixes:
if lang_word.startswith(prefix):
lang_word = lang_word[len(prefix):]
if lang_word not in common_words and word.lower() not in common_words:
try:
lang = babelfish.Language.fromguessit(lang_word)
match = (start, end, {'name': key, 'value': lang})
if allowed_languages:
if lang.name.lower() in allowed_languages \
or lang.alpha2.lower() in allowed_languages \
or lang.alpha3.lower() in allowed_languages:
matches.append(match)
# Keep language with alpha2 equivalent. Others are probably
# uncommon languages.
elif lang == 'mul' or hasattr(lang, 'alpha2'):
matches.append(match)
except babelfish.Error:
pass
return matches
class SubtitlePrefixLanguageRule(Rule):
"""
Convert language guess as subtitle_language if previous match is a subtitle language prefix
"""
consequence = RemoveMatch
properties = {'subtitle_language': [None]}
def when(self, matches, context):
to_rename = []
to_remove = matches.named('subtitle_language.prefix')
for lang in matches.named('language'):
prefix = matches.previous(lang, lambda match: match.name == 'subtitle_language.prefix', 0)
if not prefix:
group_marker = matches.markers.at_match(lang, lambda marker: marker.name == 'group', 0)
if group_marker:
# Find prefix if placed just before the group
prefix = matches.previous(group_marker, lambda match: match.name == 'subtitle_language.prefix',
0)
if not prefix:
# Find prefix if placed before in the group
prefix = matches.range(group_marker.start, lang.start,
lambda match: match.name == 'subtitle_language.prefix', 0)
if prefix:
to_rename.append((prefix, lang))
if prefix in to_remove:
to_remove.remove(prefix)
return to_rename, to_remove
def then(self, matches, when_response, context):
to_rename, to_remove = when_response
super(SubtitlePrefixLanguageRule, self).then(matches, to_remove, context)
for prefix, match in to_rename:
# Remove suffix equivalent of prefix.
suffix = copy.copy(prefix)
suffix.name = 'subtitle_language.suffix'
if suffix in matches:
matches.remove(suffix)
matches.remove(match)
match.name = 'subtitle_language'
matches.append(match)
class SubtitleSuffixLanguageRule(Rule):
"""
Convert language guess as subtitle_language if next match is a subtitle language suffix
"""
dependency = SubtitlePrefixLanguageRule
consequence = RemoveMatch
properties = {'subtitle_language': [None]}
def when(self, matches, context):
to_append = []
to_remove = matches.named('subtitle_language.suffix')
for lang in matches.named('language'):
suffix = matches.next(lang, lambda match: match.name == 'subtitle_language.suffix', 0)
if suffix:
to_append.append(lang)
if suffix in to_remove:
to_remove.remove(suffix)
return to_append, to_remove
def then(self, matches, when_response, context):
to_rename, to_remove = when_response
super(SubtitleSuffixLanguageRule, self).then(matches, to_remove, context)
for match in to_rename:
matches.remove(match)
match.name = 'subtitle_language'
matches.append(match)
class SubtitleExtensionRule(Rule):
"""
Convert language guess as subtitle_language if next match is a subtitle extension
"""
consequence = RenameMatch('subtitle_language')
properties = {'subtitle_language': [None]}
def when(self, matches, context):
subtitle_extension = matches.named('container',
lambda match: 'extension' in match.tags and 'subtitle' in match.tags,
0)
if subtitle_extension:
subtitle_lang = matches.previous(subtitle_extension, lambda match: match.name == 'language', 0)
if subtitle_lang:
return subtitle_lang
|
gpl-2.0
|
joakim-hove/django
|
django/conf/locale/pl/formats.py
|
504
|
1147
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j E Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j E Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd-m-Y'
SHORT_DATETIME_FORMAT = 'd-m-Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%y-%m-%d', # '06-10-25'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
]
DATETIME_INPUT_FORMATS = [
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = ' '
NUMBER_GROUPING = 3
|
bsd-3-clause
|
ardekantur/pyglet
|
pyglet/gl/xlib.py
|
38
|
13415
|
#!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
from ctypes import *
from pyglet.canvas.xlib import XlibCanvas
from base import Config, CanvasConfig, Context
from pyglet import gl
from pyglet.gl import glx
from pyglet.gl import glxext_arb
from pyglet.gl import glx_info
from pyglet.gl import glxext_mesa
class XlibConfig(Config):
def match(self, canvas):
if not isinstance(canvas, XlibCanvas):
raise RuntimeError('Canvas must be instance of XlibCanvas')
x_display = canvas.display._display
x_screen = canvas.display.x_screen
info = glx_info.GLXInfo(x_display)
have_13 = info.have_version(1, 3)
if have_13:
config_class = XlibCanvasConfig13
else:
if 'ATI' in info.get_client_vendor():
config_class = XlibCanvasConfig10ATI
else:
config_class = XlibCanvasConfig10
# Construct array of attributes
attrs = []
for name, value in self.get_gl_attributes():
attr = config_class.attribute_ids.get(name, None)
if attr and value is not None:
attrs.extend([attr, int(value)])
if have_13:
attrs.extend([glx.GLX_X_RENDERABLE, True])
else:
attrs.extend([glx.GLX_RGBA, True])
if len(attrs):
attrs.extend([0, 0])
attrib_list = (c_int * len(attrs))(*attrs)
else:
attrib_list = None
if have_13:
elements = c_int()
configs = glx.glXChooseFBConfig(x_display, x_screen,
attrib_list, byref(elements))
if not configs:
return []
configs = cast(configs,
POINTER(glx.GLXFBConfig * elements.value)).contents
result = [config_class(canvas, info, c, self) for c in configs]
# Can't free array until all XlibGLConfig13's are GC'd. Too much
# hassle, live with leak. XXX
#xlib.XFree(configs)
return result
else:
try:
return [config_class(canvas, info, attrib_list, self)]
except gl.ContextException:
return []
class BaseXlibCanvasConfig(CanvasConfig):
# Common code shared between GLX 1.0 and GLX 1.3 configs.
attribute_ids = {
'buffer_size': glx.GLX_BUFFER_SIZE,
'level': glx.GLX_LEVEL, # Not supported
'double_buffer': glx.GLX_DOUBLEBUFFER,
'stereo': glx.GLX_STEREO,
'aux_buffers': glx.GLX_AUX_BUFFERS,
'red_size': glx.GLX_RED_SIZE,
'green_size': glx.GLX_GREEN_SIZE,
'blue_size': glx.GLX_BLUE_SIZE,
'alpha_size': glx.GLX_ALPHA_SIZE,
'depth_size': glx.GLX_DEPTH_SIZE,
'stencil_size': glx.GLX_STENCIL_SIZE,
'accum_red_size': glx.GLX_ACCUM_RED_SIZE,
'accum_green_size': glx.GLX_ACCUM_GREEN_SIZE,
'accum_blue_size': glx.GLX_ACCUM_BLUE_SIZE,
'accum_alpha_size': glx.GLX_ACCUM_ALPHA_SIZE,
}
def __init__(self, canvas, glx_info, config):
super(BaseXlibCanvasConfig, self).__init__(canvas, config)
self.glx_info = glx_info
def compatible(self, canvas):
# TODO check more
return isinstance(canvas, XlibCanvas)
def _create_glx_context(self, share):
raise NotImplementedError('abstract')
def is_complete(self):
return True
def get_visual_info(self):
raise NotImplementedError('abstract')
class XlibCanvasConfig10(BaseXlibCanvasConfig):
def __init__(self, canvas, glx_info, attrib_list, config):
super(XlibCanvasConfig10, self).__init__(canvas, glx_info, config)
x_display = canvas.display._display
x_screen = canvas.display.x_screen
self._visual_info = glx.glXChooseVisual(
x_display, x_screen, attrib_list)
if not self._visual_info:
raise gl.ContextException('No conforming visual exists')
for name, attr in self.attribute_ids.items():
value = c_int()
result = glx.glXGetConfig(
x_display, self._visual_info, attr, byref(value))
if result >= 0:
setattr(self, name, value.value)
self.sample_buffers = 0
self.samples = 0
def get_visual_info(self):
return self._visual_info.contents
def create_context(self, share):
return XlibContext10(self, share)
class XlibCanvasConfig10ATI(XlibCanvasConfig10):
attribute_ids = BaseXlibCanvasConfig.attribute_ids.copy()
del attribute_ids['stereo']
stereo = False
class XlibCanvasConfig13(BaseXlibCanvasConfig):
attribute_ids = BaseXlibCanvasConfig.attribute_ids.copy()
attribute_ids.update({
'sample_buffers': glx.GLX_SAMPLE_BUFFERS,
'samples': glx.GLX_SAMPLES,
# Not supported in current pyglet API:
'render_type': glx.GLX_RENDER_TYPE,
'config_caveat': glx.GLX_CONFIG_CAVEAT,
'transparent_type': glx.GLX_TRANSPARENT_TYPE,
'transparent_index_value': glx.GLX_TRANSPARENT_INDEX_VALUE,
'transparent_red_value': glx.GLX_TRANSPARENT_RED_VALUE,
'transparent_green_value': glx.GLX_TRANSPARENT_GREEN_VALUE,
'transparent_blue_value': glx.GLX_TRANSPARENT_BLUE_VALUE,
'transparent_alpha_value': glx.GLX_TRANSPARENT_ALPHA_VALUE,
# Used internally
'x_renderable': glx.GLX_X_RENDERABLE,
})
def __init__(self, canvas, glx_info, fbconfig, config):
super(XlibCanvasConfig13, self).__init__(canvas, glx_info, config)
x_display = canvas.display._display
self._fbconfig = fbconfig
for name, attr in self.attribute_ids.items():
value = c_int()
result = glx.glXGetFBConfigAttrib(
x_display, self._fbconfig, attr, byref(value))
if result >= 0:
setattr(self, name, value.value)
def get_visual_info(self):
return glx.glXGetVisualFromFBConfig(
self.canvas.display._display, self._fbconfig).contents
def create_context(self, share):
if self.glx_info.have_extension('GLX_ARB_create_context'):
return XlibContextARB(self, share)
else:
return XlibContext13(self, share)
class BaseXlibContext(Context):
def __init__(self, config, share):
super(BaseXlibContext, self).__init__(config, share)
self.x_display = config.canvas.display._display
self.glx_context = self._create_glx_context(share)
glx_context_id = self.glx_context.contents._opaque_struct
if glx_context_id == glx.GLX_BAD_CONTEXT:
raise gl.ContextException('Invalid context share')
elif glx_context_id == glx.GLXBadFBConfig:
raise gl.ContextException('Invalid GL configuration')
elif glx_context_id < 0:
raise gl.ContextException('Could not create GL context')
self._have_SGI_video_sync = \
config.glx_info.have_extension('GLX_SGI_video_sync')
self._have_SGI_swap_control = \
config.glx_info.have_extension('GLX_SGI_swap_control')
self._have_MESA_swap_control = \
config.glx_info.have_extension('GLX_MESA_swap_control')
# In order of preference:
# 1. GLX_MESA_swap_control (more likely to work where video_sync will
# not)
# 2. GLX_SGI_video_sync (does not work on Intel 945GM, but that has
# MESA)
# 3. GLX_SGI_swap_control (cannot be disabled once enabled).
self._use_video_sync = (self._have_SGI_video_sync and
not self._have_MESA_swap_control)
# XXX mandate that vsync defaults on across all platforms.
self._vsync = True
def is_direct(self):
return glx.glXIsDirect(self.x_display, self.glx_context)
def set_vsync(self, vsync=True):
self._vsync = vsync
if not self._use_video_sync:
interval = vsync and 1 or 0
if self._have_MESA_swap_control:
glxext_mesa.glXSwapIntervalMESA(interval)
elif self._have_SGI_swap_control and interval:
# SGI_swap_control interval cannot be set to 0
glxext_arb.glXSwapIntervalSGI(interval)
def get_vsync(self):
return self._vsync
def _wait_vsync(self):
if self._vsync and self._have_SGI_video_sync and self._use_video_sync:
count = c_uint()
glxext_arb.glXGetVideoSyncSGI(byref(count))
glxext_arb.glXWaitVideoSyncSGI(
2, (count.value + 1) % 2, byref(count))
class XlibContext10(BaseXlibContext):
def __init__(self, config, share):
super(XlibContext10, self).__init__(config, share)
def _create_glx_context(self, share):
if self.config._requires_gl_3():
raise gl.ContextException(
'Require GLX_ARB_create_context extension to create ' +
'OpenGL 3 contexts.')
if share:
share_context = share.glx_context
else:
share_context = None
return glx.glXCreateContext(self.config.canvas.display._display,
self.config._visual_info, share_context, True)
def attach(self, canvas):
super(XlibContext10, self).attach(canvas)
self.set_current()
def set_current(self):
glx.glXMakeCurrent(self.x_display, self.canvas.x_window,
self.glx_context)
super(XlibContext10, self).set_current()
def detach(self):
if not self.canvas:
return
self.set_current()
gl.glFlush()
glx.glXMakeCurrent(self.x_display, 0, None)
super(XlibContext10, self).detach()
def destroy(self):
super(XlibContext10, self).destroy()
glx.glXDestroyContext(self.x_display, self.glx_context)
self.glx_context = None
def flip(self):
if not self.canvas:
return
if self._vsync:
self._wait_vsync()
glx.glXSwapBuffers(self.x_display, self.canvas.x_window)
class XlibContext13(BaseXlibContext):
def __init__(self, config, share):
super(XlibContext13, self).__init__(config, share)
self.glx_window = None
def _create_glx_context(self, share):
if self.config._requires_gl_3():
raise gl.ContextException(
'Require GLX_ARB_create_context extension to create ' +
'OpenGL 3 contexts.')
if share:
share_context = share.glx_context
else:
share_context = None
return glx.glXCreateNewContext(self.config.canvas.display._display,
self.config._fbconfig, glx.GLX_RGBA_TYPE, share_context, True)
def attach(self, canvas):
if canvas is self.canvas: # XXX do this for carbon too?
return
super(XlibContext13, self).attach(canvas)
self.glx_window = glx.glXCreateWindow(
self.x_display, self.config._fbconfig, canvas.x_window, None)
self.set_current()
def set_current(self):
glx.glXMakeContextCurrent(
self.x_display, self.glx_window, self.glx_window, self.glx_context)
super(XlibContext13, self).set_current()
def detach(self):
if not self.canvas:
return
self.set_current()
gl.glFlush() # needs to be in try/except?
super(XlibContext13, self).detach()
glx.glXMakeContextCurrent(self.x_display, 0, 0, None)
if self.glx_window:
glx.glXDestroyWindow(self.x_display, self.glx_window)
self.glx_window = None
def destroy(self):
super(XlibContext13, self).destroy()
if self.glx_window:
glx.glXDestroyWindow(self.config.display._display, self.glx_window)
self.glx_window = None
if self.glx_context:
glx.glXDestroyContext(self.x_display, self.glx_context)
self.glx_context = None
def flip(self):
if not self.glx_window:
return
if self._vsync:
self._wait_vsync()
glx.glXSwapBuffers(self.x_display, self.glx_window)
class XlibContextARB(XlibContext13):
def _create_glx_context(self, share):
if share:
share_context = share.glx_context
else:
share_context = None
attribs = []
if self.config.major_version is not None:
attribs.extend([glxext_arb.GLX_CONTEXT_MAJOR_VERSION_ARB,
self.config.major_version])
if self.config.minor_version is not None:
attribs.extend([glxext_arb.GLX_CONTEXT_MINOR_VERSION_ARB,
self.config.minor_version])
flags = 0
if self.config.forward_compatible:
flags |= glxext_arb.GLX_CONTEXT_FORWARD_COMPATIBLE_BIT_ARB
if self.config.debug:
flags |= glxext_arb.GLX_CONTEXT_DEBUG_BIT_ARB
if flags:
attribs.extend([glxext_arb.GLX_CONTEXT_FLAGS_ARB, flags])
attribs.append(0)
attribs = (c_int * len(attribs))(*attribs)
return glxext_arb.glXCreateContextAttribsARB(
self.config.canvas.display._display,
self.config._fbconfig, share_context, True, attribs)
|
bsd-3-clause
|
vipmike007/virt-test
|
virttest/utils_test/libguestfs.py
|
9
|
31942
|
import re
import os
import logging
import commands
from autotest.client.shared import error, utils
from virttest import virsh, virt_vm, libvirt_vm, data_dir
from virttest import utils_net, xml_utils
from virttest.libvirt_xml import vm_xml, xcepts
from virttest import utils_libguestfs as lgf
from virttest import qemu_storage
class VTError(Exception):
pass
class VTAttachError(VTError):
def __init__(self, cmd, output):
super(VTAttachError, self).__init__(cmd, output)
self.cmd = cmd
self.output = output
def __str__(self):
return ("Attach command failed:%s\n%s" % (self.cmd, self.output))
class VTMountError(VTError):
def __init__(self, cmd, output):
VTError.__init__(self, cmd, output)
self.cmd = cmd
self.output = output
def __str__(self):
return ("Mount command failed:%s\n%s" % (self.cmd, self.output))
class VTXMLParseError(VTError):
def __init__(self, cmd, output):
super(VTXMLParseError, self).__init__(cmd, output)
self.cmd = cmd
self.output = output
def __str__(self):
return ("Parse XML with '%s' failed:%s" % (self.cmd, self.output))
def preprocess_image(params):
"""
Create a disk which used by guestfish
params: Get params from cfg file
"""
image_dir = params.get("img_dir", data_dir.get_tmp_dir())
image_name = params.get("image_name", "gs_common")
image = qemu_storage.QemuImg(params, image_dir, image_name)
image_path, _ = image.create(params)
logging.info("Image created in %s" % image_path)
return image_path
def primary_disk_virtio(vm):
"""
To verify if system disk is virtio.
:param vm: Libvirt VM object.
"""
vmdisks = vm.get_disk_devices()
if "vda" in vmdisks.keys():
return True
return False
def get_primary_disk(vm):
"""
Get primary disk source.
:param vm: Libvirt VM object.
"""
vmdisks = vm.get_disk_devices()
if len(vmdisks):
pri_target = ['vda', 'sda', 'hda']
for target in pri_target:
try:
return vmdisks[target]['source']
except KeyError:
pass
return None
def attach_additional_disk(vm, disksize, targetdev):
"""
Create a disk with disksize, then attach it to given vm.
:param vm: Libvirt VM object.
:param disksize: size of attached disk
:param targetdev: target of disk device
"""
logging.info("Attaching disk...")
disk_path = os.path.join(data_dir.get_tmp_dir(), targetdev)
cmd = "qemu-img create %s %s" % (disk_path, disksize)
status, output = commands.getstatusoutput(cmd)
if status:
return (False, output)
# To confirm attached device do not exist.
virsh.detach_disk(vm.name, targetdev, extra="--config")
attach_result = virsh.attach_disk(vm.name, disk_path, targetdev,
extra="--config", debug=True)
if attach_result.exit_status:
return (False, attach_result)
return (True, disk_path)
def define_new_vm(vm_name, new_name):
"""
Just define a new vm from given name
"""
try:
vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
vmxml.vm_name = new_name
del vmxml.uuid
logging.debug(str(vmxml))
vmxml.define()
return True
except xcepts.LibvirtXMLError, detail:
logging.error(detail)
return False
def cleanup_vm(vm_name=None, disk=None):
"""
Cleanup the vm with its disk deleted.
"""
try:
if vm_name is not None:
virsh.undefine(vm_name)
except error.CmdError, detail:
logging.error("Undefine %s failed:%s", vm_name, detail)
try:
if disk is not None:
if os.path.exists(disk):
os.remove(disk)
except IOError, detail:
logging.error("Remove disk %s failed:%s", disk, detail)
class VirtTools(object):
"""
Useful functions for virt-commands.
Some virt-tools need an input disk and output disk.
Main for virt-clone, virt-sparsify, virt-resize.
"""
def __init__(self, vm, params):
self.params = params
self.oldvm = vm
# Many command will create a new vm or disk, init it here
self.newvm = libvirt_vm.VM("VTNEWVM", vm.params, vm.root_dir,
vm.address_cache)
# Prepare for created vm disk
self.indisk = get_primary_disk(vm)
self.outdisk = None
def update_vm_disk(self):
"""
Update oldvm's disk, and then create a newvm.
"""
target_dev = self.params.get("gf_updated_target_dev", "vdb")
device_size = self.params.get("gf_updated_device_size", "50M")
self.newvm.name = self.params.get("gf_updated_new_vm")
if self.newvm.is_alive():
self.newvm.destroy()
self.newvm.wait_for_shutdown()
attachs, attacho = attach_additional_disk(self.newvm,
disksize=device_size,
targetdev=target_dev)
if attachs:
# Restart vm for guestfish command
# Otherwise updated disk is not visible
try:
self.newvm.start()
self.newvm.wait_for_login()
self.newvm.destroy()
self.newvm.wait_for_shutdown()
self.params['added_disk_path'] = attacho
except virt_vm.VMError, detail:
raise VTAttachError("", str(detail))
else:
raise VTAttachError("", attacho)
def clone_vm_filesystem(self, newname=None):
"""
Clone a new vm with only its filesystem disk.
:param newname:if newname is None,
create a new name with clone added.
"""
logging.info("Cloning...")
# Init options for virt-clone
options = {}
autoclone = bool(self.params.get("autoclone", False))
new_filesystem_path = self.params.get("new_filesystem_path")
cloned_files = []
if new_filesystem_path:
self.outdisk = new_filesystem_path
elif self.indisk is not None:
self.outdisk = "%s-clone" % self.indisk
cloned_files.append(self.outdisk)
options['files'] = cloned_files
# cloned_mac can be CREATED, RANDOM or a string.
cloned_mac = self.params.get("cloned_mac", "CREATED")
if cloned_mac == "CREATED":
options['mac'] = utils_net.generate_mac_address_simple()
else:
options['mac'] = cloned_mac
options['ignore_status'] = True
options['debug'] = True
options['timeout'] = int(self.params.get("timeout", 240))
if newname is None:
newname = "%s-virtclone" % self.oldvm.name
result = lgf.virt_clone_cmd(self.oldvm.name, newname,
autoclone, **options)
if result.exit_status:
error_info = "Clone %s to %s failed." % (self.oldvm.name, newname)
logging.error(error_info)
return (False, result)
else:
self.newvm.name = newname
cloned_mac = vm_xml.VMXML.get_first_mac_by_name(newname)
if cloned_mac is not None:
self.newvm.address_cache[cloned_mac] = None
return (True, result)
def sparsify_disk(self):
"""
Sparsify a disk
"""
logging.info("Sparsifing...")
if self.indisk is None:
logging.error("No disk can be sparsified.")
return (False, "Input disk is None.")
if self.outdisk is None:
self.outdisk = "%s-sparsify" % self.indisk
timeout = int(self.params.get("timeout", 240))
result = lgf.virt_sparsify_cmd(self.indisk, self.outdisk,
ignore_status=True, debug=True,
timeout=timeout)
if result.exit_status:
error_info = "Sparsify %s to %s failed." % (self.indisk,
self.outdisk)
logging.error(error_info)
return (False, result)
return (True, result)
def define_vm_with_newdisk(self):
"""
Define the new vm with old vm's configuration
Changes:
1.replace name
2.delete uuid
3.replace disk
"""
logging.info("Define a new vm:")
old_vm_name = self.oldvm.name
new_vm_name = "%s-vtnewdisk" % old_vm_name
self.newvm.name = new_vm_name
old_disk = self.indisk
new_disk = self.outdisk
try:
vmxml = vm_xml.VMXML.new_from_dumpxml(old_vm_name)
vmxml.vm_name = new_vm_name
vmxml.uuid = ""
vmxml.set_xml(re.sub(old_disk, new_disk,
str(vmxml.__dict_get__('xml'))))
logging.debug(vmxml.__dict_get__('xml'))
vmxml.define()
except xcepts.LibvirtXMLError, detail:
logging.debug(detail)
return (False, detail)
return (True, vmxml.xml)
def expand_vm_filesystem(self, resize_part_num=2, resized_size="+1G",
new_disk=None):
"""
Expand vm's filesystem with virt-resize.
"""
logging.info("Resizing vm's disk...")
options = {}
options['resize'] = "/dev/sda%s" % resize_part_num
options['resized_size'] = resized_size
if new_disk is not None:
self.outdisk = new_disk
elif self.outdisk is None:
self.outdisk = "%s-resize" % self.indisk
options['ignore_status'] = True
options['debug'] = True
options['timeout'] = int(self.params.get("timeout", 480))
result = lgf.virt_resize_cmd(self.indisk, self.outdisk, **options)
if result.exit_status:
logging.error(result)
return (False, result)
return (True, self.outdisk)
def guestmount(self, mountpoint, disk_or_domain=None):
"""
Mount filesystems in a disk or domain to host mountpoint.
:param disk_or_domain: if it is None, use default vm in params
"""
logging.info("Mounting filesystems...")
if disk_or_domain is None:
disk_or_domain = self.oldvm.name
if not os.path.isdir(mountpoint):
os.mkdir(mountpoint)
if os.path.ismount(mountpoint):
utils.run("umount -l %s" % mountpoint, ignore_status=True)
inspector = "yes" == self.params.get("gm_inspector", "yes")
readonly = "yes" == self.params.get("gm_readonly", "no")
special_mountpoints = self.params.get("special_mountpoints", [])
is_disk = "yes" == self.params.get("gm_is_disk", "no")
options = {}
options['ignore_status'] = True
options['debug'] = True
options['timeout'] = int(self.params.get("timeout", 240))
options['special_mountpoints'] = special_mountpoints
options['is_disk'] = is_disk
result = lgf.guestmount(disk_or_domain, mountpoint,
inspector, readonly, **options)
if result.exit_status:
error_info = "Mount %s to %s failed." % (disk_or_domain,
mountpoint)
logging.error(result)
return (False, error_info)
return (True, mountpoint)
def write_file_with_guestmount(self, mountpoint, path,
content=None, vm_ref=None,
cleanup=True):
"""
Write content to file with guestmount
"""
logging.info("Creating file...")
gms, gmo = self.guestmount(mountpoint, vm_ref)
if gms is True:
mountpoint = gmo
else:
logging.error("Create file %s failed.", path)
return (False, gmo)
# file's path on host's mountpoint
# Connect mountpoint and path, then remove additional character '/'
file_path = os.path.abspath("%s/%s" % (mountpoint, path))
if content is None:
content = "This is a temp file with guestmount."
try:
fd = open(file_path, "w")
fd.write(content)
fd.close()
except IOError, detail:
logging.error(detail)
return (False, detail)
logging.info("Create file %s successfully", file_path)
# Cleanup created file
if cleanup:
utils.run("rm -f %s" % file_path, ignore_status=True)
return (True, file_path)
def get_primary_disk_fs_type(self):
"""
Get primary disk filesystem type
"""
result = lgf.virt_filesystems(self.oldvm.name, long_format=True)
if result.exit_status:
raise error.TestNAError("Cannot get primary disk"
" filesystem information!")
fs_info = result.stdout.strip().splitlines()
if len(fs_info) <= 1:
raise error.TestNAError("No disk filesystem information!")
try:
primary_disk_info = fs_info[1]
fs_type = primary_disk_info.split()[2]
return fs_type
except (KeyError, ValueError), detail:
raise error.TestFail(str(detail))
def tar_in(self, tar_file, dest="/tmp", vm_ref=None):
if vm_ref is None:
vm_ref = self.oldvm.name
result = lgf.virt_tar_in(vm_ref, tar_file, dest,
debug=True, ignore_status=True)
return result
def tar_out(self, directory, tar_file="temp.tar", vm_ref=None):
if vm_ref is None:
vm_ref = self.oldvm.name
result = lgf.virt_tar_out(vm_ref, directory, tar_file,
debug=True, ignore_status=True)
return result
def cat(self, filename, vm_ref=None):
if vm_ref is None:
vm_ref = self.oldvm.name
result = lgf.virt_cat_cmd(vm_ref, filename, debug=True,
ignore_status=True)
return result
def copy_in(self, filename, dest="/tmp", vm_ref=None):
if vm_ref is None:
vm_ref = self.oldvm.name
result = lgf.virt_copy_in(vm_ref, filename, dest, debug=True,
ignore_status=True)
return result
def copy_out(self, file_path, localdir="/tmp", vm_ref=None):
if vm_ref is None:
vm_ref = self.oldvm.name
result = lgf.virt_copy_out(vm_ref, file_path, localdir,
debug=True, ignore_status=True)
return result
def format_disk(self, disk_path=None, filesystem=None, partition=None,
lvm=None):
"""
:param disk_path: None for additional disk by update_vm_disk() only
"""
if disk_path is None:
disk_path = self.params.get("added_disk_path")
result = lgf.virt_format(disk_path, filesystem,
lvm=lvm, partition=partition,
debug=True, ignore_status=True)
return result
def get_filesystems_info(self, vm_ref=None):
if vm_ref is None:
vm_ref = self.oldvm.name
result = lgf.virt_filesystems(vm_ref, long_format=True,
debug=True, all=True,
ignore_status=True)
return result
def list_df(self, vm_ref=None):
if vm_ref is None:
vm_ref = self.oldvm.name
result = lgf.virt_df(vm_ref, debug=True, ignore_status=True)
return result
def get_vm_info_with_inspector(self, vm_ref=None):
"""
Return a dict includes os information.
"""
if vm_ref is None:
vm_ref = self.oldvm.name
# A dict to include system information
sys_info = {}
result = lgf.virt_inspector(vm_ref, ignore_status=True)
if result.exit_status:
logging.error("Get %s information with inspector(2) failed:\n%s",
vm_ref, result)
return sys_info
# Analyse output to get information
try:
xmltreefile = xml_utils.XMLTreeFile(result.stdout)
os_root = xmltreefile.find("operatingsystem")
if os_root is None:
raise VTXMLParseError("operatingsystem", os_root)
except (IOError, VTXMLParseError), detail:
logging.error(detail)
return sys_info
sys_info['root'] = os_root.findtext("root")
sys_info['name'] = os_root.findtext("name")
sys_info['arch'] = os_root.findtext("arch")
sys_info['distro'] = os_root.findtext("distro")
sys_info['release'] = os_root.findtext("product_name")
sys_info['major_version'] = os_root.findtext("major_version")
sys_info['minor_version'] = os_root.findtext("minor_version")
sys_info['hostname'] = os_root.findtext("hostname")
# filesystems and mountpoints are dict to restore detail info
mountpoints = {}
for node in os_root.find("mountpoints"):
mp_device = node.get("dev")
if mp_device is not None:
mountpoints[mp_device] = node.text
sys_info['mountpoints'] = mountpoints
filesystems = {}
for node in os_root.find("filesystems"):
fs_detail = {}
fs_device = node.get("dev")
if fs_device is not None:
fs_detail['type'] = node.findtext("type")
fs_detail['label'] = node.findtext("label")
fs_detail['uuid'] = node.findtext("uuid")
filesystems[fs_device] = fs_detail
sys_info['filesystems'] = filesystems
logging.debug("VM information:\n%s", sys_info)
return sys_info
class GuestfishTools(lgf.GuestfishPersistent):
"""Useful Tools for Guestfish class."""
__slots__ = ('params', )
def __init__(self, params):
"""
Init a persistent guestfish shellsession.
"""
self.params = params
disk_img = params.get("disk_img")
ro_mode = bool(params.get("gf_ro_mode", False))
libvirt_domain = params.get("libvirt_domain")
inspector = bool(params.get("gf_inspector", False))
mount_options = params.get("mount_options")
run_mode = params.get("gf_run_mode", "interactive")
super(GuestfishTools, self).__init__(disk_img, ro_mode,
libvirt_domain, inspector,
mount_options=mount_options,
run_mode=run_mode)
def get_root(self):
"""
Get root filesystem w/ guestfish
"""
getroot_result = self.inspect_os()
roots_list = getroot_result.stdout.splitlines()
if getroot_result.exit_status or not len(roots_list):
logging.error("Get root failed:%s", getroot_result)
return (False, getroot_result)
return (True, roots_list[0].strip())
def analyse_release(self):
"""
Analyse /etc/redhat-release
"""
logging.info("Analysing /etc/redhat-release...")
release_result = self.cat("/etc/redhat-release")
logging.debug(release_result)
if release_result.exit_status:
logging.error("Cat /etc/redhat-release failed")
return (False, release_result)
release_type = {'rhel': "Red Hat Enterprise Linux",
'fedora': "Fedora"}
for key in release_type:
if re.search(release_type[key], release_result.stdout):
return (True, key)
def write_file(self, path, content):
"""
Create a new file to vm with guestfish
"""
logging.info("Creating file %s in vm...", path)
write_result = self.write(path, content)
if write_result.exit_status:
logging.error("Create '%s' with content '%s' failed:%s",
path, content, write_result)
return False
return True
def get_partitions_info(self, device="/dev/sda"):
"""
Get disk partition's information.
"""
list_result = self.part_list(device)
if list_result.exit_status:
logging.error("List partition info failed:%s", list_result)
return (False, list_result)
list_lines = list_result.stdout.splitlines()
# This dict is a struct like this: {key:{a dict}, key:{a dict}}
partitions = {}
# This dict is a struct of normal dict, for temp value of a partition
part_details = {}
index = -1
for line in list_lines:
# Init for a partition
if re.search("\[\d\]\s+=", line):
index = line.split("]")[0].split("[")[-1]
part_details = {}
partitions[index] = part_details
if re.search("part_num", line):
part_num = int(line.split(":")[-1].strip())
part_details['num'] = part_num
elif re.search("part_start", line):
part_start = int(line.split(":")[-1].strip())
part_details['start'] = part_start
elif re.search("part_end", line):
part_end = int(line.split(":")[-1].strip())
part_details['end'] = part_end
elif re.search("part_size", line):
part_size = int(line.split(":")[-1].strip())
part_details['size'] = part_size
if index != -1:
partitions[index] = part_details
logging.info(partitions)
return (True, partitions)
def get_part_size(self, part_num):
status, partitions = self.get_partitions_info()
if status is False:
return None
for partition in partitions.values():
if str(partition.get("num")) == str(part_num):
return partition.get("size")
def create_fs(self):
"""
Create filesystem of disk
Choose lvm or physical partition and create fs on it
"""
image_path = self.params.get("image_path")
self.add_drive(image_path)
self.run()
partition_type = self.params.get("partition_type")
fs_type = self.params.get("fs_type", "ext3")
image_size = self.params.get("image_size", "6G")
with_blocksize = self.params.get("with_blocksize")
blocksize = self.params.get("blocksize")
tarball_path = self.params.get("tarball_path")
if partition_type not in ['lvm', 'physical']:
return (False, "partition_type is incorrect, support [physical,lvm]")
if partition_type == "lvm":
logging.info("create lvm partition...")
pv_name = self.params.get("pv_name", "/dev/sdb")
vg_name = self.params.get("vg_name", "vol_test")
lv_name = self.params.get("lv_name", "vol_file")
mount_point = "/dev/%s/%s" % (vg_name, lv_name)
if 'G' in image_size:
lv_size = int(image_size.replace('G', '')) * 1000
else:
lv_size = int(image_size.replace('M', '')) - 10
self.pvcreate(pv_name)
self.vgcreate(vg_name, pv_name)
self.lvcreate(lv_name, vg_name, lv_size)
elif partition_type == "physical":
logging.info("create physical partition...")
pv_name = self.params.get("pv_name", "/dev/sdb")
mount_point = pv_name + "1"
self.part_disk(pv_name, "mbr")
self.part_list(pv_name)
self.params["mount_point"] = mount_point
if with_blocksize == "yes" and fs_type != "btrfs" and fs_type != "no_fs":
if blocksize:
self.mkfs_opts(fs_type, mount_point, "blocksize:%s" % (blocksize))
self.vfs_type(mount_point)
else:
logging.error("with_blocksize is set but blocksize not given")
self.umount_all()
self.sync()
return (False, "with_blocksize is set but blocksize not given")
elif fs_type != "no_fs":
self.mkfs(fs_type, mount_point)
self.vfs_type(mount_point)
if tarball_path:
self.mount_options("noatime", mount_point, '/')
self.tar_in_opts(tarball_path, '/', 'gzip')
self.ll('/')
self.umount_all()
self.sync()
return (True, "create_fs successfully")
def create_msdos_part(self, device, start="1", end="-1"):
"""
Create a msdos partition in given device.
Default partition section is whole disk(1~-1).
And return its part name if part add succeed.
"""
logging.info("Creating a new partition on %s...", device)
init_result = self.part_init(device, "msdos")
if init_result.exit_status:
logging.error("Init disk failed:%s", init_result)
return (False, init_result)
add_result = self.part_add(device, "p", start, end)
if add_result.exit_status:
logging.error("Add a partition failed:%s", add_result)
return (False, add_result)
# Get latest created part num to return
status, partitions = self.get_partitions_info(device)
if status is False:
return (False, partitions)
part_num = -1
for partition in partitions.values():
cur_num = partition.get("num")
if cur_num > part_num:
part_num = cur_num
if part_num == -1:
return (False, partitions)
return (True, part_num)
def create_whole_disk_msdos_part(self, device):
"""
Create only one msdos partition in given device.
And return its part name if part add succeed.
"""
logging.info("Creating one partition of whole %s...", device)
init_result = self.part_init(device, "msdos")
if init_result.exit_status:
logging.error("Init disk failed:%s", init_result)
return (False, init_result)
disk_result = self.part_disk(device, "msdos")
if disk_result.exit_status:
logging.error("Init disk failed:%s", disk_result)
return (False, disk_result)
# Get latest created part num to return
status, partitions = self.get_partitions_info(device)
if status is False:
return (False, partitions)
part_num = -1
for partition in partitions.values():
cur_num = partition.get("num")
if cur_num > part_num:
part_num = cur_num
if part_num == -1:
return (False, partitions)
return (True, part_num)
def get_bootable_part(self, device="/dev/sda"):
status, partitions = self.get_partitions_info(device)
if status is False:
return (False, partitions)
for partition in partitions.values():
num = partition.get("num")
ba_result = self.part_get_bootable(device, num)
if ba_result.stdout.strip() == "true":
return (True, "%s%s" % (device, num))
return (False, partitions)
def get_mbr_id(self, device="/dev/sda"):
status, partitions = self.get_partitions_info(device)
if status is False:
return (False, partitions)
for partition in partitions.values():
num = partition.get("num")
mbr_id_result = self.part_get_mbr_id(device, num)
if mbr_id_result.exit_status == 0:
return (True, mbr_id_result.stdout.strip())
return (False, partitions)
def get_part_type(self, device="/dev/sda"):
part_type_result = self.part_get_parttype(device)
if part_type_result.exit_status:
return (False, part_type_result)
return (True, part_type_result.stdout.strip())
def get_md5(self, path):
"""
Get files md5 value.
"""
logging.info("Computing %s's md5...", path)
md5_result = self.checksum("md5", path)
if md5_result.exit_status:
logging.error("Check %s's md5 failed:%s", path, md5_result)
return (False, md5_result)
return (True, md5_result.stdout.strip())
def reset_interface(self, iface_mac):
"""
Check interface through guestfish.Fix mac if necessary.
"""
# disk or domain
vm_ref = self.params.get("libvirt_domain")
if not vm_ref:
vm_ref = self.params.get("disk_img")
if not vm_ref:
logging.error("No object to edit.")
return False
logging.info("Resetting %s's mac to %s", vm_ref, iface_mac)
# Fix file which includes interface devices information
# Default is /etc/udev/rules.d/70-persistent-net.rules
devices_file = "/etc/udev/rules.d/70-persistent-net.rules"
# Set file which binds mac and IP-address
ifcfg_files = ["/etc/sysconfig/network-scripts/ifcfg-p1p1",
"/etc/sysconfig/network-scripts/ifcfg-eth0"]
# Fix devices file
mac_regex = (r"\w.:\w.:\w.:\w.:\w.:\w.")
edit_expr = "s/%s/%s/g" % (mac_regex, iface_mac)
file_ret = self.is_file(devices_file)
if file_ret.stdout.strip() == "true":
self.close_session()
try:
result = lgf.virt_edit_cmd(vm_ref, devices_file,
expr=edit_expr, debug=True,
ignore_status=True)
if result.exit_status:
logging.error("Edit %s failed:%s", devices_file, result)
return False
except lgf.LibguestfsCmdError, detail:
logging.error("Edit %s failed:%s", devices_file, detail)
return False
self.new_session()
# Just to keep output looking better
self.is_ready()
logging.debug(self.cat(devices_file))
# Fix interface file
for ifcfg_file in ifcfg_files:
file_ret = self.is_file(ifcfg_file)
if file_ret.stdout.strip() == "false":
continue
self.close_session()
self.params['ifcfg_file'] = ifcfg_file
try:
result = lgf.virt_edit_cmd(vm_ref, ifcfg_file,
expr=edit_expr, debug=True,
ignore_status=True)
if result.exit_status:
logging.error("Edit %s failed:%s", ifcfg_file, result)
return False
except lgf.LibguestfsCmdError, detail:
logging.error("Edit %s failed:%s", ifcfg_file, detail)
return False
self.new_session()
# Just to keep output looking better
self.is_ready()
logging.debug(self.cat(ifcfg_file))
return True
def copy_ifcfg_back(self):
# This function must be called after reset_interface()
ifcfg_file = self.params.get("ifcfg_file")
bak_file = "%s.bak" % ifcfg_file
if ifcfg_file:
self.is_ready()
is_need = self.is_file(ifcfg_file)
if is_need.stdout.strip() == "false":
cp_result = self.cp(bak_file, ifcfg_file)
if cp_result.exit_status:
logging.warn("Recover ifcfg file failed:%s", cp_result)
return False
return True
|
gpl-2.0
|
VigTech/Vigtech-Services
|
env/lib/python2.7/site-packages/django/middleware/clickjacking.py
|
185
|
1994
|
"""
Clickjacking Protection Middleware.
This module provides a middleware that implements protection against a
malicious site loading resources from your site in a hidden frame.
"""
from django.conf import settings
class XFrameOptionsMiddleware(object):
"""
Middleware that sets the X-Frame-Options HTTP header in HTTP responses.
Does not set the header if it's already set or if the response contains
a xframe_options_exempt value set to True.
By default, sets the X-Frame-Options header to 'SAMEORIGIN', meaning the
response can only be loaded on a frame within the same site. To prevent the
response from being loaded in a frame in any site, set X_FRAME_OPTIONS in
your project's Django settings to 'DENY'.
Note: older browsers will quietly ignore this header, thus other
clickjacking protection techniques should be used if protection in those
browsers is required.
http://en.wikipedia.org/wiki/Clickjacking#Server_and_client
"""
def process_response(self, request, response):
# Don't set it if it's already in the response
if response.get('X-Frame-Options', None) is not None:
return response
# Don't set it if they used @xframe_options_exempt
if getattr(response, 'xframe_options_exempt', False):
return response
response['X-Frame-Options'] = self.get_xframe_options_value(request,
response)
return response
def get_xframe_options_value(self, request, response):
"""
Gets the value to set for the X_FRAME_OPTIONS header.
By default this uses the value from the X_FRAME_OPTIONS Django
settings. If not found in settings, defaults to 'SAMEORIGIN'.
This method can be overridden if needed, allowing it to vary based on
the request or response.
"""
return getattr(settings, 'X_FRAME_OPTIONS', 'SAMEORIGIN').upper()
|
lgpl-3.0
|
xiandiancloud/ji
|
lms/djangoapps/open_ended_grading/views.py
|
16
|
13661
|
import logging
from django.views.decorators.cache import cache_control
from edxmako.shortcuts import render_to_response
from django.core.urlresolvers import reverse
from courseware.courses import get_course_with_access
from xmodule.open_ended_grading_classes.grading_service_module import GradingServiceError
import json
from student.models import unique_id_for_user
import open_ended_notifications
from xmodule.modulestore.django import modulestore
from xmodule.modulestore import search
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.modulestore.exceptions import NoPathToItem
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.utils.translation import ugettext as _
from open_ended_grading.utils import (
STAFF_ERROR_MESSAGE, StudentProblemList, generate_problem_url, create_controller_query_service
)
log = logging.getLogger(__name__)
def _reverse_with_slash(url_name, course_key):
"""
Reverses the URL given the name and the course id, and then adds a trailing slash if
it does not exist yet.
@param url_name: The name of the url (eg 'staff_grading').
@param course_id: The id of the course object (eg course.id).
@returns: The reversed url with a trailing slash.
"""
ajax_url = _reverse_without_slash(url_name, course_key)
if not ajax_url.endswith('/'):
ajax_url += '/'
return ajax_url
def _reverse_without_slash(url_name, course_key):
course_id = course_key.to_deprecated_string()
ajax_url = reverse(url_name, kwargs={'course_id': course_id})
return ajax_url
DESCRIPTION_DICT = {
'Peer Grading': _("View all problems that require peer assessment in this particular course."),
'Staff Grading': _("View ungraded submissions submitted by students for the open ended problems in the course."),
'Problems you have submitted': _("View open ended problems that you have previously submitted for grading."),
'Flagged Submissions': _("View submissions that have been flagged by students as inappropriate."),
}
ALERT_DICT = {
'Peer Grading': _("New submissions to grade"),
'Staff Grading': _("New submissions to grade"),
'Problems you have submitted': _("New grades have been returned"),
'Flagged Submissions': _("Submissions have been flagged for review"),
}
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def staff_grading(request, course_id):
"""
Show the instructor grading interface.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'staff', course_key)
ajax_url = _reverse_with_slash('staff_grading', course_key)
return render_to_response('instructor/staff_grading.html', {
'course': course,
'course_id': course_id,
'ajax_url': ajax_url,
# Checked above
'staff_access': True, })
def find_peer_grading_module(course):
"""
Given a course, finds the first peer grading module in it.
@param course: A course object.
@return: boolean found_module, string problem_url
"""
# Reverse the base course url.
base_course_url = reverse('courses')
found_module = False
problem_url = ""
# Get the peer grading modules currently in the course. Explicitly specify the course id to avoid issues with different runs.
items = modulestore().get_items(course.id, qualifiers={'category': 'peergrading'})
# See if any of the modules are centralized modules (ie display info from multiple problems)
items = [i for i in items if not getattr(i, "use_for_single_location", True)]
# Loop through all potential peer grading modules, and find the first one that has a path to it.
for item in items:
# Generate a url for the first module and redirect the user to it.
try:
problem_url_parts = search.path_to_location(modulestore(), item.location)
except NoPathToItem:
# In the case of nopathtoitem, the peer grading module that was found is in an invalid state, and
# can no longer be accessed. Log an informational message, but this will not impact normal behavior.
log.info(u"Invalid peer grading module location %s in course %s. This module may need to be removed.", item.location, course.id)
continue
problem_url = generate_problem_url(problem_url_parts, base_course_url)
found_module = True
return found_module, problem_url
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def peer_grading(request, course_id):
'''
When a student clicks on the "peer grading" button in the open ended interface, link them to a peer grading
xmodule in the course.
'''
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
#Get the current course
course = get_course_with_access(request.user, 'load', course_key)
found_module, problem_url = find_peer_grading_module(course)
if not found_module:
error_message = _("""
Error with initializing peer grading.
There has not been a peer grading module created in the courseware that would allow you to grade others.
Please check back later for this.
""")
log.exception(error_message + u"Current course is: {0}".format(course_id))
return HttpResponse(error_message)
return HttpResponseRedirect(problem_url)
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def student_problem_list(request, course_id):
"""
Show a list of problems they have attempted to a student.
Fetch the list from the grading controller server and append some data.
@param request: The request object for this view.
@param course_id: The id of the course to get the problem list for.
@return: Renders an HTML problem list table.
"""
assert isinstance(course_id, basestring)
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
# Load the course. Don't catch any errors here, as we want them to be loud.
course = get_course_with_access(request.user, 'load', course_key)
# The anonymous student id is needed for communication with ORA.
student_id = unique_id_for_user(request.user)
base_course_url = reverse('courses')
error_text = ""
student_problem_list = StudentProblemList(course_key, student_id)
# Get the problem list from ORA.
success = student_problem_list.fetch_from_grading_service()
# If we fetched the problem list properly, add in additional problem data.
if success:
# Add in links to problems.
valid_problems = student_problem_list.add_problem_data(base_course_url)
else:
# Get an error message to show to the student.
valid_problems = []
error_text = student_problem_list.error_text
ajax_url = _reverse_with_slash('open_ended_problems', course_key)
context = {
'course': course,
'course_id': course_key.to_deprecated_string(),
'ajax_url': ajax_url,
'success': success,
'problem_list': valid_problems,
'error_text': error_text,
# Checked above
'staff_access': False,
}
return render_to_response('open_ended_problems/open_ended_problems.html', context)
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def flagged_problem_list(request, course_id):
'''
Show a student problem list
'''
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'staff', course_key)
# call problem list service
success = False
error_text = ""
problem_list = []
# Make a service that can query edX ORA.
controller_qs = create_controller_query_service()
try:
problem_list_dict = controller_qs.get_flagged_problem_list(course_key)
success = problem_list_dict['success']
if 'error' in problem_list_dict:
error_text = problem_list_dict['error']
problem_list = []
else:
problem_list = problem_list_dict['flagged_submissions']
except GradingServiceError:
#This is a staff_facing_error
error_text = STAFF_ERROR_MESSAGE
#This is a dev_facing_error
log.error("Could not get flagged problem list from external grading service for open ended.")
success = False
# catch error if if the json loads fails
except ValueError:
#This is a staff_facing_error
error_text = STAFF_ERROR_MESSAGE
#This is a dev_facing_error
log.error("Could not parse problem list from external grading service response.")
success = False
ajax_url = _reverse_with_slash('open_ended_flagged_problems', course_key)
context = {
'course': course,
'course_id': course_id,
'ajax_url': ajax_url,
'success': success,
'problem_list': problem_list,
'error_text': error_text,
# Checked above
'staff_access': True,
}
return render_to_response('open_ended_problems/open_ended_flagged_problems.html', context)
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def combined_notifications(request, course_id):
"""
Gets combined notifications from the grading controller and displays them
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
user = request.user
notifications = open_ended_notifications.combined_notifications(course, user)
response = notifications['response']
notification_tuples = open_ended_notifications.NOTIFICATION_TYPES
notification_list = []
for response_num in xrange(0, len(notification_tuples)):
tag = notification_tuples[response_num][0]
if tag in response:
url_name = notification_tuples[response_num][1]
human_name = notification_tuples[response_num][2]
url = _reverse_without_slash(url_name, course_key)
has_img = response[tag]
# check to make sure we have descriptions and alert messages
if human_name in DESCRIPTION_DICT:
description = DESCRIPTION_DICT[human_name]
else:
description = ""
if human_name in ALERT_DICT:
alert_message = ALERT_DICT[human_name]
else:
alert_message = ""
notification_item = {
'url': url,
'name': human_name,
'alert': has_img,
'description': description,
'alert_message': alert_message
}
#The open ended panel will need to link the "peer grading" button in the panel to a peer grading
#xmodule defined in the course. This checks to see if the human name of the server notification
#that we are currently processing is "peer grading". If it is, it looks for a peer grading
#module in the course. If none exists, it removes the peer grading item from the panel.
if human_name == "Peer Grading":
found_module, problem_url = find_peer_grading_module(course)
if found_module:
notification_list.append(notification_item)
else:
notification_list.append(notification_item)
ajax_url = _reverse_with_slash('open_ended_notifications', course_key)
combined_dict = {
'error_text': "",
'notification_list': notification_list,
'course': course,
'success': True,
'ajax_url': ajax_url,
}
return render_to_response('open_ended_problems/combined_notifications.html', combined_dict)
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def take_action_on_flags(request, course_id):
"""
Takes action on student flagged submissions.
Currently, only support unflag and ban actions.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
if request.method != 'POST':
raise Http404
required = ['submission_id', 'action_type', 'student_id']
for key in required:
if key not in request.POST:
error_message = u'Missing key {0} from submission. Please reload and try again.'.format(key)
response = {
'success': False,
'error': STAFF_ERROR_MESSAGE + error_message
}
return HttpResponse(json.dumps(response), mimetype="application/json")
p = request.POST
submission_id = p['submission_id']
action_type = p['action_type']
student_id = p['student_id']
student_id = student_id.strip(' \t\n\r')
submission_id = submission_id.strip(' \t\n\r')
action_type = action_type.lower().strip(' \t\n\r')
# Make a service that can query edX ORA.
controller_qs = create_controller_query_service()
try:
response = controller_qs.take_action_on_flags(course_key, student_id, submission_id, action_type)
return HttpResponse(json.dumps(response), mimetype="application/json")
except GradingServiceError:
log.exception(
u"Error taking action on flagged peer grading submissions, "
u"submission_id: {0}, action_type: {1}, grader_id: {2}".format(
submission_id, action_type, student_id)
)
response = {
'success': False,
'error': STAFF_ERROR_MESSAGE
}
return HttpResponse(json.dumps(response),mimetype="application/json")
|
agpl-3.0
|
rhinstaller/system-config-kickstart
|
src/savedialog.py
|
1
|
3163
|
#
# Chris Lumens <[email protected]>
# Brent Fox <[email protected]>
# Tammy Fox <[email protected]>
#
# Copyright (C) 2000-2008 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2 or, at your option, any later version. This
# program is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
import gtk
import gtk.glade
import savefile
import signal
import kickstartGui
from pykickstart.errors import *
##
## I18N
##
import gettext
gtk.glade.bindtextdomain("system-config-kickstart")
_ = lambda x: gettext.ldgettext("system-config-kickstart", x)
class saveDialog:
def destroy(self, args):
self.dialog.hide()
return True
def __init__ (self, buf, xml):
self.xml = xml
self.buf = buf
self.dialog = self.xml.get_widget("save_dialog")
self.dialog.connect("delete-event", self.hide)
self.dialog.set_modal(True)
toplevel = self.xml.get_widget("main_window")
self.dialog.set_transient_for(toplevel)
self.save_ok_button = self.xml.get_widget("save_ok_button")
self.save_cancel_button = self.xml.get_widget("save_cancel_button")
self.dialog.set_current_name("ks.cfg")
self.dialog.filePath= ""
self.dialog.connect ("destroy", self.destroy)
self.save_ok_button.connect("clicked", self.saveFile)
self.save_cancel_button.connect("clicked", self.hide)
self.dialog.set_icon(kickstartGui.iconPixbuf)
self.dialog.show_all()
#save file
def saveFile(self, *args):
self.dialog.filePath = self.dialog.get_filename()
if not self.dialog.filePath:
return
try:
ksFile = open(self.dialog.filePath, "w")
except KickstartError, e:
msg = _("The following error occurred while saving the "
"kickstart config %s: %s") % (self.dialog.filePath, e)
dlg = gtk.MessageDialog (None, 0, gtk.MESSAGE_ERROR,
gtk.BUTTONS_OK, msg)
dlg.set_title(_("Error Saving Kickstart Config"))
dlg.set_position(gtk.WIN_POS_CENTER_ON_PARENT)
dlg.set_modal(True)
dlg.run()
dlg.destroy()
return
ksFile.write(self.buf)
ksFile.close()
self.dialog.hide()
def hide(self, *args):
self.dialog.hide()
return True
|
gpl-2.0
|
forge33/CouchPotatoServer
|
libs/guessit/transfo/__init__.py
|
94
|
4117
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2012 Nicolas Wack <[email protected]>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from guessit import base_text_type, Guess
from guessit.patterns import canonical_form
from guessit.textutils import clean_string
import logging
log = logging.getLogger(__name__)
def found_property(node, name, confidence):
node.guess = Guess({name: node.clean_value}, confidence=confidence, raw=node.value)
log.debug('Found with confidence %.2f: %s' % (confidence, node.guess))
def format_guess(guess):
"""Format all the found values to their natural type.
For instance, a year would be stored as an int value, etc...
Note that this modifies the dictionary given as input.
"""
for prop, value in guess.items():
if prop in ('season', 'episodeNumber', 'year', 'cdNumber',
'cdNumberTotal', 'bonusNumber', 'filmNumber'):
guess[prop] = int(guess[prop])
elif isinstance(value, base_text_type):
if prop in ('edition',):
value = clean_string(value)
guess[prop] = canonical_form(value).replace('\\', '')
return guess
def find_and_split_node(node, strategy, logger):
string = ' %s ' % node.value # add sentinels
for matcher, confidence, args, kwargs in strategy:
all_args = [string]
if getattr(matcher, 'use_node', False):
all_args.append(node)
if args:
all_args.append(args)
if kwargs:
result, span = matcher(*all_args, **kwargs)
else:
result, span = matcher(*all_args)
if result:
# readjust span to compensate for sentinels
span = (span[0] - 1, span[1] - 1)
if isinstance(result, Guess):
if confidence is None:
confidence = result.confidence(list(result.keys())[0])
else:
if confidence is None:
confidence = 1.0
guess = format_guess(Guess(result, confidence=confidence, raw=string[span[0] + 1:span[1] + 1]))
msg = 'Found with confidence %.2f: %s' % (confidence, guess)
(logger or log).debug(msg)
node.partition(span)
absolute_span = (span[0] + node.offset, span[1] + node.offset)
for child in node.children:
if child.span == absolute_span:
child.guess = guess
else:
find_and_split_node(child, strategy, logger)
return
class SingleNodeGuesser(object):
def __init__(self, guess_func, confidence, logger, *args, **kwargs):
self.guess_func = guess_func
self.confidence = confidence
self.logger = logger
self.args = args
self.kwargs = kwargs
def process(self, mtree):
# strategy is a list of pairs (guesser, confidence)
# - if the guesser returns a guessit.Guess and confidence is specified,
# it will override it, otherwise it will leave the guess confidence
# - if the guesser returns a simple dict as a guess and confidence is
# specified, it will use it, or 1.0 otherwise
strategy = [ (self.guess_func, self.confidence, self.args, self.kwargs) ]
for node in mtree.unidentified_leaves():
find_and_split_node(node, strategy, self.logger)
|
gpl-3.0
|
QISKit/qiskit-sdk-py
|
test/python/transpiler/test_barrier_before_final_measurements.py
|
1
|
14581
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test the BarrierBeforeFinalMeasurements pass"""
import unittest
from qiskit.transpiler.passes import BarrierBeforeFinalMeasurements
from qiskit.converters import circuit_to_dag
from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister
from qiskit.test import QiskitTestCase
class TestBarrierBeforeFinalMeasurements(QiskitTestCase):
"""Tests the BarrierBeforeFinalMeasurements pass."""
def test_single_measure(self):
""" A single measurement at the end
|
q:--[m]-- q:--|-[m]---
| -> | |
c:---.--- c:-----.---
"""
qr = QuantumRegister(1, 'q')
cr = ClassicalRegister(1, 'c')
circuit = QuantumCircuit(qr, cr)
circuit.measure(qr, cr)
expected = QuantumCircuit(qr, cr)
expected.barrier(qr)
expected.measure(qr, cr)
pass_ = BarrierBeforeFinalMeasurements()
result = pass_.run(circuit_to_dag(circuit))
self.assertEqual(result, circuit_to_dag(expected))
def test_ignore_single_measure(self):
"""Ignore single measurement because it is not at the end
q:--[m]-[H]- q:--[m]-[H]-
| -> |
c:---.------ c:---.------
"""
qr = QuantumRegister(1, 'q')
cr = ClassicalRegister(1, 'c')
circuit = QuantumCircuit(qr, cr)
circuit.measure(qr, cr)
circuit.h(qr[0])
expected = QuantumCircuit(qr, cr)
expected.measure(qr, cr)
expected.h(qr[0])
pass_ = BarrierBeforeFinalMeasurements()
result = pass_.run(circuit_to_dag(circuit))
self.assertEqual(result, circuit_to_dag(expected))
def test_single_measure_mix(self):
"""Two measurements, but only one is at the end
|
q0:--[m]--[H]--[m]-- q0:--[m]--[H]--|-[m]---
| | -> | | |
c:---.---------.--- c:---.-----------.---
"""
qr = QuantumRegister(1, 'q')
cr = ClassicalRegister(1, 'c')
circuit = QuantumCircuit(qr, cr)
circuit.measure(qr, cr)
circuit.h(qr)
circuit.measure(qr, cr)
expected = QuantumCircuit(qr, cr)
expected.measure(qr, cr)
expected.h(qr)
expected.barrier(qr)
expected.measure(qr, cr)
pass_ = BarrierBeforeFinalMeasurements()
result = pass_.run(circuit_to_dag(circuit))
self.assertEqual(result, circuit_to_dag(expected))
def test_two_qregs(self):
"""Two measurements in different qregs to different cregs
|
q0:--[H]--[m]------ q0:--[H]--|--[m]------
| | |
q1:--------|--[m]-- -> q1:-------|---|--[m]--
| | | | |
c0:--------.---|--- c0:----------.---|---
| |
c1:------------.--- c0:--------------.---
"""
qr0 = QuantumRegister(1, 'q0')
qr1 = QuantumRegister(1, 'q1')
cr0 = ClassicalRegister(1, 'c0')
cr1 = ClassicalRegister(1, 'c1')
circuit = QuantumCircuit(qr0, qr1, cr0, cr1)
circuit.h(qr0)
circuit.measure(qr0, cr0)
circuit.measure(qr1, cr1)
expected = QuantumCircuit(qr0, qr1, cr0, cr1)
expected.h(qr0)
expected.barrier(qr0, qr1)
expected.measure(qr0, cr0)
expected.measure(qr1, cr1)
pass_ = BarrierBeforeFinalMeasurements()
result = pass_.run(circuit_to_dag(circuit))
self.assertEqual(result, circuit_to_dag(expected))
def test_two_qregs_to_a_single_creg(self):
"""Two measurements in different qregs to the same creg
|
q0:--[H]--[m]------ q0:--[H]--|--[m]------
| | |
q1:--------|--[m]-- -> q1:-------|---|--[m]--
| | | | |
c0:--------.---|--- c0:-----------.---|---
------------.--- ---------------.---
"""
qr0 = QuantumRegister(1, 'q0')
qr1 = QuantumRegister(1, 'q1')
cr0 = ClassicalRegister(2, 'c0')
circuit = QuantumCircuit(qr0, qr1, cr0)
circuit.h(qr0)
circuit.measure(qr0, cr0[0])
circuit.measure(qr1, cr0[1])
expected = QuantumCircuit(qr0, qr1, cr0)
expected.h(qr0)
expected.barrier(qr0, qr1)
expected.measure(qr0, cr0[0])
expected.measure(qr1, cr0[1])
pass_ = BarrierBeforeFinalMeasurements()
result = pass_.run(circuit_to_dag(circuit))
self.assertEqual(result, circuit_to_dag(expected))
def test_preserve_measure_for_conditional(self):
"""Test barrier is inserted after any measurements used for conditionals
q0:--[H]--[m]------------ q0:--[H]--[m]---------------
| |
q1:--------|--[ z]--[m]-- -> q1:--------|--[ z]--|--[m]--
| | | | | |
c0:--------.--[=1]---|--- c0:--------.--[=1]------|---
| |
c1:------------------.--- c1:---------------------.---
"""
qr0 = QuantumRegister(1, 'q0')
qr1 = QuantumRegister(1, 'q1')
cr0 = ClassicalRegister(1, 'c0')
cr1 = ClassicalRegister(1, 'c1')
circuit = QuantumCircuit(qr0, qr1, cr0, cr1)
circuit.h(qr0)
circuit.measure(qr0, cr0)
circuit.z(qr1).c_if(cr0, 1)
circuit.measure(qr1, cr1)
expected = QuantumCircuit(qr0, qr1, cr0, cr1)
expected.h(qr0)
expected.measure(qr0, cr0)
expected.z(qr1).c_if(cr0, 1)
expected.barrier(qr1)
expected.measure(qr1, cr1)
pass_ = BarrierBeforeFinalMeasurements()
result = pass_.run(circuit_to_dag(circuit))
self.assertEqual(result, circuit_to_dag(expected))
class TestBarrierBeforeMeasuremetsWhenABarrierIsAlreadyThere(QiskitTestCase):
"""Tests the BarrierBeforeFinalMeasurements pass when there is a barrier already"""
def test_handle_redundancy(self):
"""The pass is idempotent
| |
q:--|-[m]-- q:--|-[m]---
| | -> | |
c:-----.--- c:-----.---
"""
qr = QuantumRegister(1, 'q')
cr = ClassicalRegister(1, 'c')
circuit = QuantumCircuit(qr, cr)
circuit.barrier(qr)
circuit.measure(qr, cr)
expected = QuantumCircuit(qr, cr)
expected.barrier(qr)
expected.measure(qr, cr)
pass_ = BarrierBeforeFinalMeasurements()
result = pass_.run(circuit_to_dag(circuit))
self.assertEqual(result, circuit_to_dag(expected))
def test_remove_barrier_in_different_qregs(self):
"""Two measurements in different qregs to the same creg
q0:--|--[m]------ q0:---|--[m]------
| | |
q1:--|---|--[m]-- -> q1:---|---|--[m]--
| | | |
c0:------.---|--- c0:-------.---|---
----------.--- -----------.---
"""
qr0 = QuantumRegister(1, 'q0')
qr1 = QuantumRegister(1, 'q1')
cr0 = ClassicalRegister(2, 'c0')
circuit = QuantumCircuit(qr0, qr1, cr0)
circuit.barrier(qr0)
circuit.barrier(qr1)
circuit.measure(qr0, cr0[0])
circuit.measure(qr1, cr0[1])
expected = QuantumCircuit(qr0, qr1, cr0)
expected.barrier(qr0, qr1)
expected.measure(qr0, cr0[0])
expected.measure(qr1, cr0[1])
pass_ = BarrierBeforeFinalMeasurements()
result = pass_.run(circuit_to_dag(circuit))
self.assertEqual(result, circuit_to_dag(expected))
def test_preserve_barriers_for_measurement_ordering(self):
"""If the circuit has a barrier to enforce a measurement order,
preserve it in the output.
q:---[m]--|------- q:---|--[m]--|-------
----|---|--[m]-- -> ---|---|---|--[m]--
| | | |
c:----.-------|--- c:-------.-------|---
------------.--- ---------------.---
"""
qr = QuantumRegister(2, 'q')
cr = ClassicalRegister(2, 'c')
circuit = QuantumCircuit(qr, cr)
circuit.measure(qr[0], cr[0])
circuit.barrier(qr)
circuit.measure(qr[1], cr[1])
expected = QuantumCircuit(qr, cr)
expected.barrier(qr)
expected.measure(qr[0], cr[0])
expected.barrier(qr)
expected.measure(qr[1], cr[1])
pass_ = BarrierBeforeFinalMeasurements()
result = pass_.run(circuit_to_dag(circuit))
self.assertEqual(result, circuit_to_dag(expected))
def test_measures_followed_by_barriers_should_be_final(self):
"""If a measurement is followed only by a barrier,
insert the barrier before it.
q:---[H]--|--[m]--|------- q:---[H]--|--[m]-|-------
---[H]--|---|---|--[m]-- -> ---[H]--|---|--|--[m]--
| | | |
c:------------.-------|--- c:------------.------|---
--------------------.--- -------------------.---
"""
qr = QuantumRegister(2, 'q')
cr = ClassicalRegister(2, 'c')
circuit = QuantumCircuit(qr, cr)
circuit.h(qr)
circuit.barrier(qr)
circuit.measure(qr[0], cr[0])
circuit.barrier(qr)
circuit.measure(qr[1], cr[1])
expected = QuantumCircuit(qr, cr)
expected.h(qr)
expected.barrier(qr)
expected.measure(qr[0], cr[0])
expected.barrier(qr)
expected.measure(qr[1], cr[1])
pass_ = BarrierBeforeFinalMeasurements()
result = pass_.run(circuit_to_dag(circuit))
self.assertEqual(result, circuit_to_dag(expected))
def test_should_merge_with_smaller_duplicate_barrier(self):
"""If an equivalent barrier exists covering a subset of the qubits
covered by the new barrier, it should be replaced.
q:---|--[m]------------- q:---|--[m]-------------
---|---|---[m]-------- -> ---|---|---[m]--------
-------|----|---[m]--- ---|---|----|---[m]---
| | | | | |
c:-------.----|----|---- c:-------.----|----|----
------------.----|---- ------------.----|----
-----------------.---- -----------------.----
"""
qr = QuantumRegister(3, 'q')
cr = ClassicalRegister(3, 'c')
circuit = QuantumCircuit(qr, cr)
circuit.barrier(qr[0], qr[1])
circuit.measure(qr, cr)
expected = QuantumCircuit(qr, cr)
expected.barrier(qr)
expected.measure(qr, cr)
pass_ = BarrierBeforeFinalMeasurements()
result = pass_.run(circuit_to_dag(circuit))
self.assertEqual(result, circuit_to_dag(expected))
def test_should_merge_with_larger_duplicate_barrier(self):
"""If a barrier exists and is stronger than the barrier to be inserted,
preserve the existing barrier and do not insert a new barrier.
q:---|--[m]--|------- q:---|--[m]-|-------
---|---|---|--[m]-- -> ---|---|--|--[m]--
---|---|---|---|--- ---|---|--|---|---
| | | |
c:-------.-------|--- c:-------.------|---
---------------.--- --------------.---
------------------- ------------------
"""
qr = QuantumRegister(3, 'q')
cr = ClassicalRegister(3, 'c')
circuit = QuantumCircuit(qr, cr)
circuit.barrier(qr)
circuit.measure(qr[0], cr[0])
circuit.barrier(qr)
circuit.measure(qr[1], cr[1])
expected = circuit
pass_ = BarrierBeforeFinalMeasurements()
result = pass_.run(circuit_to_dag(circuit))
self.assertEqual(result, circuit_to_dag(expected))
def test_barrier_doesnt_reorder_gates(self):
""" A barrier should not allow the reordering of gates, as pointed out in #2102
q:--[u1(0)]-----------[m]--------- q:--[u1(0)]------------|--[m]---------
--[u1(1)]------------|-[m]------ -> --[u1(1)]------------|---|-[m]------
--[u1(2)]-|----------|--|-[m]---- --[u1(2)]-|----------|---|--|-[m]----
----------|-[u1(03)]-|--|--|-[m]- ----------|-[u1(03)]-|---|--|--|-[m]-
| | | | | | | |
c:---------------------.--|--|--|- c:--------------------------.--|--|--|-
------------------------.--|--|- -----------------------------.--|--|-
---------------------------.--|- --------------------------------.--|-
------------------------------.- -----------------------------------.-
"""
qr = QuantumRegister(4)
cr = ClassicalRegister(4)
circuit = QuantumCircuit(qr, cr)
circuit.u1(0, qr[0])
circuit.u1(1, qr[1])
circuit.u1(2, qr[2])
circuit.barrier(qr[2], qr[3])
circuit.u1(3, qr[3])
test_circuit = circuit.copy()
test_circuit.measure(qr, cr)
# expected circuit is the same, just with a barrier before the measurements
expected = circuit.copy()
expected.barrier(qr)
expected.measure(qr, cr)
pass_ = BarrierBeforeFinalMeasurements()
result = pass_.run(circuit_to_dag(test_circuit))
self.assertEqual(result, circuit_to_dag(expected))
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
yongli3/rt-thread
|
bsp/stm32f411-nucleo/rtconfig.py
|
5
|
3761
|
# BSP Note: For TI EK-TM4C1294XL Tiva C Series Connected LancuhPad (REV D)
import os
# toolchains options
ARCH='arm'
CPU='cortex-m4'
CROSS_TOOL='keil'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
#device options
PART_TYPE = 'STM32F411xE'
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = 'D:/ArdaArmTools/GNUARM_4.9_2015q1/bin'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = 'C:/Keil_v5'
elif CROSS_TOOL == 'iar':
PLATFORM = 'iar'
EXEC_PATH = 'C:/Program Files (x86)/IAR Systems/Embedded Workbench 7.2'
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
#BUILD = 'release'
if PLATFORM == 'gcc':
# tool-chains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m4 -mthumb -mfpu=fpv4-sp-d16 -mfloat-abi=softfp -ffunction-sections -fdata-sections'
CFLAGS = DEVICE + ' -std=c99 -Dgcc' # -D' + PART_TYPE
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -Wa,-mimplicit-it=thumb '
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rtthread-stm32.map,-cref,-u,Reset_Handler -T stm32_rom.ld'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2 -g'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --cpu Cortex-M4.fp '
CFLAGS = '-c ' + DEVICE + ' --apcs=interwork --c99' # -D' + PART_TYPE
AFLAGS = DEVICE + ' --apcs=interwork '
LFLAGS = DEVICE + ' --scatter "stm32_rom.sct" --info sizes --info totals --info unused --info veneers --list rtthread-stm32.map --strict'
CFLAGS += ' -I' + EXEC_PATH + '/ARM/ARMCC/INC'
LFLAGS += ' --libpath ' + EXEC_PATH + '/ARM/ARMCC/LIB'
CFLAGS += ' -D__MICROLIB '
AFLAGS += ' --pd "__MICROLIB SETA 1" '
LFLAGS += ' --library_type=microlib '
EXEC_PATH += '/arm/armcc/bin/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
elif PLATFORM == 'iar':
# toolchains
CC = 'iccarm'
AS = 'iasmarm'
AR = 'iarchive'
LINK = 'ilinkarm'
TARGET_EXT = 'out'
DEVICE = '-Dewarm' # + ' -D' + PART_TYPE
CFLAGS = DEVICE
CFLAGS += ' --diag_suppress Pa050'
CFLAGS += ' --no_cse'
CFLAGS += ' --no_unroll'
CFLAGS += ' --no_inline'
CFLAGS += ' --no_code_motion'
CFLAGS += ' --no_tbaa'
CFLAGS += ' --no_clustering'
CFLAGS += ' --no_scheduling'
CFLAGS += ' --endian=little'
CFLAGS += ' --cpu=Cortex-M4'
CFLAGS += ' -e'
CFLAGS += ' --fpu=VFPv4_sp'
CFLAGS += ' --dlib_config "' + EXEC_PATH + '/arm/INC/c/DLib_Config_Normal.h"'
CFLAGS += ' --silent'
AFLAGS = DEVICE
AFLAGS += ' -s+'
AFLAGS += ' -w+'
AFLAGS += ' -r'
AFLAGS += ' --cpu Cortex-M4'
AFLAGS += ' --fpu VFPv4_sp'
AFLAGS += ' -S'
if BUILD == 'debug':
CFLAGS += ' --debug'
CFLAGS += ' -On'
else:
CFLAGS += ' -Oh'
LFLAGS = ' --config "stm32_rom.icf"'
LFLAGS += ' --entry __iar_program_start'
#LFLAGS += ' --silent'
EXEC_PATH = EXEC_PATH + '/arm/bin/'
POST_ACTION = ''
|
apache-2.0
|
uwescience/myria-web
|
appengine/networkx/algorithms/distance_measures.py
|
3
|
3839
|
# -*- coding: utf-8 -*-
"""
Graph diameter, radius, eccentricity and other properties.
"""
__author__ = "\n".join(['Aric Hagberg ([email protected])',
'Dan Schult([email protected])'])
# Copyright (C) 2004-2010 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
__all__ = ['eccentricity', 'diameter', 'radius', 'periphery', 'center']
import networkx
def eccentricity(G, v=None, sp=None):
"""Return the eccentricity of nodes in G.
The eccentricity of a node v is the maximum distance from v to
all other nodes in G.
Parameters
----------
G : NetworkX graph
A graph
v : node, optional
Return value of specified node
sp : dict of dicts, optional
All pairs shortest path lenghts as a dictionary of dictionaries
Returns
-------
ecc : dictionary
A dictionary of eccentricity values keyed by node.
"""
nodes=[]
if v is None: # none, use entire graph
nodes=G.nodes()
elif isinstance(v, list): # check for a list
nodes=v
else: # assume it is a single value
nodes=[v]
order=G.order()
e={}
for v in nodes:
if sp is None:
length=networkx.single_source_shortest_path_length(G,v)
else:
length=sp[v]
try:
L = len(length)
except TypeError:
raise networkx.NetworkXError('Format of "sp" is invalid.')
else:
if L != order:
msg = "Graph not connected: infinite path length"
raise networkx.NetworkXError(msg)
e[v]=max(length.values())
if len(e)==1:
return list(e.values())[0] # return single value
else:
return e
def diameter(G, e=None):
"""Return the diameter of the graph G.
The diameter is the maximum eccentricity.
Parameters
----------
G : NetworkX graph
A graph
e : eccentricity dictionary, optional
A precomputed dictionary of eccentricities.
Returns
-------
d : integer
Diameter of graph
See Also
--------
eccentricity
"""
if e is None:
e=eccentricity(G)
return max(e.values())
def periphery(G, e=None):
"""Return the periphery of the graph G.
The periphery is the set of nodes with eccentricity equal to the diameter.
Parameters
----------
G : NetworkX graph
A graph
e : eccentricity dictionary, optional
A precomputed dictionary of eccentricities.
Returns
-------
p : list
List of nodes in periphery
"""
if e is None:
e=eccentricity(G)
diameter=max(e.values())
p=[v for v in e if e[v]==diameter]
return p
def radius(G, e=None):
"""Return the radius of the graph G.
The radius is the minimum eccentricity.
Parameters
----------
G : NetworkX graph
A graph
e : eccentricity dictionary, optional
A precomputed dictionary of eccentricities.
Returns
-------
r : integer
Radius of graph
"""
if e is None:
e=eccentricity(G)
return min(e.values())
def center(G, e=None):
"""Return the periphery of the graph G.
The center is the set of nodes with eccentricity equal to radius.
Parameters
----------
G : NetworkX graph
A graph
e : eccentricity dictionary, optional
A precomputed dictionary of eccentricities.
Returns
-------
c : list
List of nodes in center
"""
if e is None:
e=eccentricity(G)
# order the nodes by path length
radius=min(e.values())
p=[v for v in e if e[v]==radius]
return p
|
bsd-3-clause
|
cpalmit/drupaltest
|
sites/all/themes/bootstrap/node_modules/npm-shrinkwrap/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/cmake.py
|
531
|
41681
|
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""cmake output module
This module is under development and should be considered experimental.
This module produces cmake (2.8.8+) input as its output. One CMakeLists.txt is
created for each configuration.
This module's original purpose was to support editing in IDEs like KDevelop
which use CMake for project management. It is also possible to use CMake to
generate projects for other IDEs such as eclipse cdt and code::blocks. QtCreator
will convert the CMakeLists.txt to a code::blocks cbp for the editor to read,
but build using CMake. As a result QtCreator editor is unaware of compiler
defines. The generated CMakeLists.txt can also be used to build on Linux. There
is currently no support for building on platforms other than Linux.
The generated CMakeLists.txt should properly compile all projects. However,
there is a mismatch between gyp and cmake with regard to linking. All attempts
are made to work around this, but CMake sometimes sees -Wl,--start-group as a
library and incorrectly repeats it. As a result the output of this generator
should not be relied on for building.
When using with kdevelop, use version 4.4+. Previous versions of kdevelop will
not be able to find the header file directories described in the generated
CMakeLists.txt file.
"""
import multiprocessing
import os
import signal
import string
import subprocess
import gyp.common
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_PREFIX': 'lib',
'SHARED_LIB_SUFFIX': '.so',
'SHARED_LIB_DIR': '${builddir}/lib.${TOOLSET}',
'LIB_DIR': '${obj}.${TOOLSET}',
'INTERMEDIATE_DIR': '${obj}.${TOOLSET}/${TARGET}/geni',
'SHARED_INTERMEDIATE_DIR': '${obj}/gen',
'PRODUCT_DIR': '${builddir}',
'RULE_INPUT_PATH': '${RULE_INPUT_PATH}',
'RULE_INPUT_DIRNAME': '${RULE_INPUT_DIRNAME}',
'RULE_INPUT_NAME': '${RULE_INPUT_NAME}',
'RULE_INPUT_ROOT': '${RULE_INPUT_ROOT}',
'RULE_INPUT_EXT': '${RULE_INPUT_EXT}',
'CONFIGURATION_NAME': '${configuration}',
}
FULL_PATH_VARS = ('${CMAKE_SOURCE_DIR}', '${builddir}', '${obj}')
generator_supports_multiple_toolsets = True
generator_wants_static_library_dependencies_adjusted = True
COMPILABLE_EXTENSIONS = {
'.c': 'cc',
'.cc': 'cxx',
'.cpp': 'cxx',
'.cxx': 'cxx',
'.s': 's', # cc
'.S': 's', # cc
}
def RemovePrefix(a, prefix):
"""Returns 'a' without 'prefix' if it starts with 'prefix'."""
return a[len(prefix):] if a.startswith(prefix) else a
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
default_variables.setdefault('OS', gyp.common.GetFlavor(params))
def Compilable(filename):
"""Return true if the file is compilable (should be in OBJS)."""
return any(filename.endswith(e) for e in COMPILABLE_EXTENSIONS)
def Linkable(filename):
"""Return true if the file is linkable (should be on the link line)."""
return filename.endswith('.o')
def NormjoinPathForceCMakeSource(base_path, rel_path):
"""Resolves rel_path against base_path and returns the result.
If rel_path is an absolute path it is returned unchanged.
Otherwise it is resolved against base_path and normalized.
If the result is a relative path, it is forced to be relative to the
CMakeLists.txt.
"""
if os.path.isabs(rel_path):
return rel_path
if any([rel_path.startswith(var) for var in FULL_PATH_VARS]):
return rel_path
# TODO: do we need to check base_path for absolute variables as well?
return os.path.join('${CMAKE_SOURCE_DIR}',
os.path.normpath(os.path.join(base_path, rel_path)))
def NormjoinPath(base_path, rel_path):
"""Resolves rel_path against base_path and returns the result.
TODO: what is this really used for?
If rel_path begins with '$' it is returned unchanged.
Otherwise it is resolved against base_path if relative, then normalized.
"""
if rel_path.startswith('$') and not rel_path.startswith('${configuration}'):
return rel_path
return os.path.normpath(os.path.join(base_path, rel_path))
def CMakeStringEscape(a):
"""Escapes the string 'a' for use inside a CMake string.
This means escaping
'\' otherwise it may be seen as modifying the next character
'"' otherwise it will end the string
';' otherwise the string becomes a list
The following do not need to be escaped
'#' when the lexer is in string state, this does not start a comment
The following are yet unknown
'$' generator variables (like ${obj}) must not be escaped,
but text $ should be escaped
what is wanted is to know which $ come from generator variables
"""
return a.replace('\\', '\\\\').replace(';', '\\;').replace('"', '\\"')
def SetFileProperty(output, source_name, property_name, values, sep):
"""Given a set of source file, sets the given property on them."""
output.write('set_source_files_properties(')
output.write(source_name)
output.write(' PROPERTIES ')
output.write(property_name)
output.write(' "')
for value in values:
output.write(CMakeStringEscape(value))
output.write(sep)
output.write('")\n')
def SetFilesProperty(output, source_names, property_name, values, sep):
"""Given a set of source files, sets the given property on them."""
output.write('set_source_files_properties(\n')
for source_name in source_names:
output.write(' ')
output.write(source_name)
output.write('\n')
output.write(' PROPERTIES\n ')
output.write(property_name)
output.write(' "')
for value in values:
output.write(CMakeStringEscape(value))
output.write(sep)
output.write('"\n)\n')
def SetTargetProperty(output, target_name, property_name, values, sep=''):
"""Given a target, sets the given property."""
output.write('set_target_properties(')
output.write(target_name)
output.write(' PROPERTIES ')
output.write(property_name)
output.write(' "')
for value in values:
output.write(CMakeStringEscape(value))
output.write(sep)
output.write('")\n')
def SetVariable(output, variable_name, value):
"""Sets a CMake variable."""
output.write('set(')
output.write(variable_name)
output.write(' "')
output.write(CMakeStringEscape(value))
output.write('")\n')
def SetVariableList(output, variable_name, values):
"""Sets a CMake variable to a list."""
if not values:
return SetVariable(output, variable_name, "")
if len(values) == 1:
return SetVariable(output, variable_name, values[0])
output.write('list(APPEND ')
output.write(variable_name)
output.write('\n "')
output.write('"\n "'.join([CMakeStringEscape(value) for value in values]))
output.write('")\n')
def UnsetVariable(output, variable_name):
"""Unsets a CMake variable."""
output.write('unset(')
output.write(variable_name)
output.write(')\n')
def WriteVariable(output, variable_name, prepend=None):
if prepend:
output.write(prepend)
output.write('${')
output.write(variable_name)
output.write('}')
class CMakeTargetType:
def __init__(self, command, modifier, property_modifier):
self.command = command
self.modifier = modifier
self.property_modifier = property_modifier
cmake_target_type_from_gyp_target_type = {
'executable': CMakeTargetType('add_executable', None, 'RUNTIME'),
'static_library': CMakeTargetType('add_library', 'STATIC', 'ARCHIVE'),
'shared_library': CMakeTargetType('add_library', 'SHARED', 'LIBRARY'),
'loadable_module': CMakeTargetType('add_library', 'MODULE', 'LIBRARY'),
'none': CMakeTargetType('add_custom_target', 'SOURCES', None),
}
def StringToCMakeTargetName(a):
"""Converts the given string 'a' to a valid CMake target name.
All invalid characters are replaced by '_'.
Invalid for cmake: ' ', '/', '(', ')'
Invalid for make: ':'
Invalid for unknown reasons but cause failures: '.'
"""
return a.translate(string.maketrans(' /():.', '______'))
def WriteActions(target_name, actions, extra_sources, extra_deps,
path_to_gyp, output):
"""Write CMake for the 'actions' in the target.
Args:
target_name: the name of the CMake target being generated.
actions: the Gyp 'actions' dict for this target.
extra_sources: [(<cmake_src>, <src>)] to append with generated source files.
extra_deps: [<cmake_taget>] to append with generated targets.
path_to_gyp: relative path from CMakeLists.txt being generated to
the Gyp file in which the target being generated is defined.
"""
for action in actions:
action_name = StringToCMakeTargetName(action['action_name'])
action_target_name = '%s__%s' % (target_name, action_name)
inputs = action['inputs']
inputs_name = action_target_name + '__input'
SetVariableList(output, inputs_name,
[NormjoinPathForceCMakeSource(path_to_gyp, dep) for dep in inputs])
outputs = action['outputs']
cmake_outputs = [NormjoinPathForceCMakeSource(path_to_gyp, out)
for out in outputs]
outputs_name = action_target_name + '__output'
SetVariableList(output, outputs_name, cmake_outputs)
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set(dir for dir in (os.path.dirname(o) for o in outputs) if dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources.extend(zip(cmake_outputs, outputs))
# add_custom_command
output.write('add_custom_command(OUTPUT ')
WriteVariable(output, outputs_name)
output.write('\n')
if len(dirs) > 0:
for directory in dirs:
output.write(' COMMAND ${CMAKE_COMMAND} -E make_directory ')
output.write(directory)
output.write('\n')
output.write(' COMMAND ')
output.write(gyp.common.EncodePOSIXShellList(action['action']))
output.write('\n')
output.write(' DEPENDS ')
WriteVariable(output, inputs_name)
output.write('\n')
output.write(' WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/')
output.write(path_to_gyp)
output.write('\n')
output.write(' COMMENT ')
if 'message' in action:
output.write(action['message'])
else:
output.write(action_target_name)
output.write('\n')
output.write(' VERBATIM\n')
output.write(')\n')
# add_custom_target
output.write('add_custom_target(')
output.write(action_target_name)
output.write('\n DEPENDS ')
WriteVariable(output, outputs_name)
output.write('\n SOURCES ')
WriteVariable(output, inputs_name)
output.write('\n)\n')
extra_deps.append(action_target_name)
def NormjoinRulePathForceCMakeSource(base_path, rel_path, rule_source):
if rel_path.startswith(("${RULE_INPUT_PATH}","${RULE_INPUT_DIRNAME}")):
if any([rule_source.startswith(var) for var in FULL_PATH_VARS]):
return rel_path
return NormjoinPathForceCMakeSource(base_path, rel_path)
def WriteRules(target_name, rules, extra_sources, extra_deps,
path_to_gyp, output):
"""Write CMake for the 'rules' in the target.
Args:
target_name: the name of the CMake target being generated.
actions: the Gyp 'actions' dict for this target.
extra_sources: [(<cmake_src>, <src>)] to append with generated source files.
extra_deps: [<cmake_taget>] to append with generated targets.
path_to_gyp: relative path from CMakeLists.txt being generated to
the Gyp file in which the target being generated is defined.
"""
for rule in rules:
rule_name = StringToCMakeTargetName(target_name + '__' + rule['rule_name'])
inputs = rule.get('inputs', [])
inputs_name = rule_name + '__input'
SetVariableList(output, inputs_name,
[NormjoinPathForceCMakeSource(path_to_gyp, dep) for dep in inputs])
outputs = rule['outputs']
var_outputs = []
for count, rule_source in enumerate(rule.get('rule_sources', [])):
action_name = rule_name + '_' + str(count)
rule_source_dirname, rule_source_basename = os.path.split(rule_source)
rule_source_root, rule_source_ext = os.path.splitext(rule_source_basename)
SetVariable(output, 'RULE_INPUT_PATH', rule_source)
SetVariable(output, 'RULE_INPUT_DIRNAME', rule_source_dirname)
SetVariable(output, 'RULE_INPUT_NAME', rule_source_basename)
SetVariable(output, 'RULE_INPUT_ROOT', rule_source_root)
SetVariable(output, 'RULE_INPUT_EXT', rule_source_ext)
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set(dir for dir in (os.path.dirname(o) for o in outputs) if dir)
# Create variables for the output, as 'local' variable will be unset.
these_outputs = []
for output_index, out in enumerate(outputs):
output_name = action_name + '_' + str(output_index)
SetVariable(output, output_name,
NormjoinRulePathForceCMakeSource(path_to_gyp, out,
rule_source))
if int(rule.get('process_outputs_as_sources', False)):
extra_sources.append(('${' + output_name + '}', out))
these_outputs.append('${' + output_name + '}')
var_outputs.append('${' + output_name + '}')
# add_custom_command
output.write('add_custom_command(OUTPUT\n')
for out in these_outputs:
output.write(' ')
output.write(out)
output.write('\n')
for directory in dirs:
output.write(' COMMAND ${CMAKE_COMMAND} -E make_directory ')
output.write(directory)
output.write('\n')
output.write(' COMMAND ')
output.write(gyp.common.EncodePOSIXShellList(rule['action']))
output.write('\n')
output.write(' DEPENDS ')
WriteVariable(output, inputs_name)
output.write(' ')
output.write(NormjoinPath(path_to_gyp, rule_source))
output.write('\n')
# CMAKE_SOURCE_DIR is where the CMakeLists.txt lives.
# The cwd is the current build directory.
output.write(' WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/')
output.write(path_to_gyp)
output.write('\n')
output.write(' COMMENT ')
if 'message' in rule:
output.write(rule['message'])
else:
output.write(action_name)
output.write('\n')
output.write(' VERBATIM\n')
output.write(')\n')
UnsetVariable(output, 'RULE_INPUT_PATH')
UnsetVariable(output, 'RULE_INPUT_DIRNAME')
UnsetVariable(output, 'RULE_INPUT_NAME')
UnsetVariable(output, 'RULE_INPUT_ROOT')
UnsetVariable(output, 'RULE_INPUT_EXT')
# add_custom_target
output.write('add_custom_target(')
output.write(rule_name)
output.write(' DEPENDS\n')
for out in var_outputs:
output.write(' ')
output.write(out)
output.write('\n')
output.write('SOURCES ')
WriteVariable(output, inputs_name)
output.write('\n')
for rule_source in rule.get('rule_sources', []):
output.write(' ')
output.write(NormjoinPath(path_to_gyp, rule_source))
output.write('\n')
output.write(')\n')
extra_deps.append(rule_name)
def WriteCopies(target_name, copies, extra_deps, path_to_gyp, output):
"""Write CMake for the 'copies' in the target.
Args:
target_name: the name of the CMake target being generated.
actions: the Gyp 'actions' dict for this target.
extra_deps: [<cmake_taget>] to append with generated targets.
path_to_gyp: relative path from CMakeLists.txt being generated to
the Gyp file in which the target being generated is defined.
"""
copy_name = target_name + '__copies'
# CMake gets upset with custom targets with OUTPUT which specify no output.
have_copies = any(copy['files'] for copy in copies)
if not have_copies:
output.write('add_custom_target(')
output.write(copy_name)
output.write(')\n')
extra_deps.append(copy_name)
return
class Copy:
def __init__(self, ext, command):
self.cmake_inputs = []
self.cmake_outputs = []
self.gyp_inputs = []
self.gyp_outputs = []
self.ext = ext
self.inputs_name = None
self.outputs_name = None
self.command = command
file_copy = Copy('', 'copy')
dir_copy = Copy('_dirs', 'copy_directory')
for copy in copies:
files = copy['files']
destination = copy['destination']
for src in files:
path = os.path.normpath(src)
basename = os.path.split(path)[1]
dst = os.path.join(destination, basename)
copy = file_copy if os.path.basename(src) else dir_copy
copy.cmake_inputs.append(NormjoinPath(path_to_gyp, src))
copy.cmake_outputs.append(NormjoinPathForceCMakeSource(path_to_gyp, dst))
copy.gyp_inputs.append(src)
copy.gyp_outputs.append(dst)
for copy in (file_copy, dir_copy):
if copy.cmake_inputs:
copy.inputs_name = copy_name + '__input' + copy.ext
SetVariableList(output, copy.inputs_name, copy.cmake_inputs)
copy.outputs_name = copy_name + '__output' + copy.ext
SetVariableList(output, copy.outputs_name, copy.cmake_outputs)
# add_custom_command
output.write('add_custom_command(\n')
output.write('OUTPUT')
for copy in (file_copy, dir_copy):
if copy.outputs_name:
WriteVariable(output, copy.outputs_name, ' ')
output.write('\n')
for copy in (file_copy, dir_copy):
for src, dst in zip(copy.gyp_inputs, copy.gyp_outputs):
# 'cmake -E copy src dst' will create the 'dst' directory if needed.
output.write('COMMAND ${CMAKE_COMMAND} -E %s ' % copy.command)
output.write(src)
output.write(' ')
output.write(dst)
output.write("\n")
output.write('DEPENDS')
for copy in (file_copy, dir_copy):
if copy.inputs_name:
WriteVariable(output, copy.inputs_name, ' ')
output.write('\n')
output.write('WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/')
output.write(path_to_gyp)
output.write('\n')
output.write('COMMENT Copying for ')
output.write(target_name)
output.write('\n')
output.write('VERBATIM\n')
output.write(')\n')
# add_custom_target
output.write('add_custom_target(')
output.write(copy_name)
output.write('\n DEPENDS')
for copy in (file_copy, dir_copy):
if copy.outputs_name:
WriteVariable(output, copy.outputs_name, ' ')
output.write('\n SOURCES')
if file_copy.inputs_name:
WriteVariable(output, file_copy.inputs_name, ' ')
output.write('\n)\n')
extra_deps.append(copy_name)
def CreateCMakeTargetBaseName(qualified_target):
"""This is the name we would like the target to have."""
_, gyp_target_name, gyp_target_toolset = (
gyp.common.ParseQualifiedTarget(qualified_target))
cmake_target_base_name = gyp_target_name
if gyp_target_toolset and gyp_target_toolset != 'target':
cmake_target_base_name += '_' + gyp_target_toolset
return StringToCMakeTargetName(cmake_target_base_name)
def CreateCMakeTargetFullName(qualified_target):
"""An unambiguous name for the target."""
gyp_file, gyp_target_name, gyp_target_toolset = (
gyp.common.ParseQualifiedTarget(qualified_target))
cmake_target_full_name = gyp_file + ':' + gyp_target_name
if gyp_target_toolset and gyp_target_toolset != 'target':
cmake_target_full_name += '_' + gyp_target_toolset
return StringToCMakeTargetName(cmake_target_full_name)
class CMakeNamer(object):
"""Converts Gyp target names into CMake target names.
CMake requires that target names be globally unique. One way to ensure
this is to fully qualify the names of the targets. Unfortunatly, this
ends up with all targets looking like "chrome_chrome_gyp_chrome" instead
of just "chrome". If this generator were only interested in building, it
would be possible to fully qualify all target names, then create
unqualified target names which depend on all qualified targets which
should have had that name. This is more or less what the 'make' generator
does with aliases. However, one goal of this generator is to create CMake
files for use with IDEs, and fully qualified names are not as user
friendly.
Since target name collision is rare, we do the above only when required.
Toolset variants are always qualified from the base, as this is required for
building. However, it also makes sense for an IDE, as it is possible for
defines to be different.
"""
def __init__(self, target_list):
self.cmake_target_base_names_conficting = set()
cmake_target_base_names_seen = set()
for qualified_target in target_list:
cmake_target_base_name = CreateCMakeTargetBaseName(qualified_target)
if cmake_target_base_name not in cmake_target_base_names_seen:
cmake_target_base_names_seen.add(cmake_target_base_name)
else:
self.cmake_target_base_names_conficting.add(cmake_target_base_name)
def CreateCMakeTargetName(self, qualified_target):
base_name = CreateCMakeTargetBaseName(qualified_target)
if base_name in self.cmake_target_base_names_conficting:
return CreateCMakeTargetFullName(qualified_target)
return base_name
def WriteTarget(namer, qualified_target, target_dicts, build_dir, config_to_use,
options, generator_flags, all_qualified_targets, output):
# The make generator does this always.
# TODO: It would be nice to be able to tell CMake all dependencies.
circular_libs = generator_flags.get('circular', True)
if not generator_flags.get('standalone', False):
output.write('\n#')
output.write(qualified_target)
output.write('\n')
gyp_file, _, _ = gyp.common.ParseQualifiedTarget(qualified_target)
rel_gyp_file = gyp.common.RelativePath(gyp_file, options.toplevel_dir)
rel_gyp_dir = os.path.dirname(rel_gyp_file)
# Relative path from build dir to top dir.
build_to_top = gyp.common.InvertRelativePath(build_dir, options.toplevel_dir)
# Relative path from build dir to gyp dir.
build_to_gyp = os.path.join(build_to_top, rel_gyp_dir)
path_from_cmakelists_to_gyp = build_to_gyp
spec = target_dicts.get(qualified_target, {})
config = spec.get('configurations', {}).get(config_to_use, {})
target_name = spec.get('target_name', '<missing target name>')
target_type = spec.get('type', '<missing target type>')
target_toolset = spec.get('toolset')
SetVariable(output, 'TARGET', target_name)
SetVariable(output, 'TOOLSET', target_toolset)
cmake_target_name = namer.CreateCMakeTargetName(qualified_target)
extra_sources = []
extra_deps = []
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
WriteActions(cmake_target_name, spec['actions'], extra_sources, extra_deps,
path_from_cmakelists_to_gyp, output)
# Rules must be early like actions.
if 'rules' in spec:
WriteRules(cmake_target_name, spec['rules'], extra_sources, extra_deps,
path_from_cmakelists_to_gyp, output)
# Copies
if 'copies' in spec:
WriteCopies(cmake_target_name, spec['copies'], extra_deps,
path_from_cmakelists_to_gyp, output)
# Target and sources
srcs = spec.get('sources', [])
# Gyp separates the sheep from the goats based on file extensions.
def partition(l, p):
return reduce(lambda x, e: x[not p(e)].append(e) or x, l, ([], []))
compilable_srcs, other_srcs = partition(srcs, Compilable)
# CMake gets upset when executable targets provide no sources.
if target_type == 'executable' and not compilable_srcs and not extra_sources:
print ('Executable %s has no complilable sources, treating as "none".' %
target_name )
target_type = 'none'
cmake_target_type = cmake_target_type_from_gyp_target_type.get(target_type)
if cmake_target_type is None:
print ('Target %s has unknown target type %s, skipping.' %
( target_name, target_type ) )
return
other_srcs_name = None
if other_srcs:
other_srcs_name = cmake_target_name + '__other_srcs'
SetVariableList(output, other_srcs_name,
[NormjoinPath(path_from_cmakelists_to_gyp, src) for src in other_srcs])
# CMake is opposed to setting linker directories and considers the practice
# of setting linker directories dangerous. Instead, it favors the use of
# find_library and passing absolute paths to target_link_libraries.
# However, CMake does provide the command link_directories, which adds
# link directories to targets defined after it is called.
# As a result, link_directories must come before the target definition.
# CMake unfortunately has no means of removing entries from LINK_DIRECTORIES.
library_dirs = config.get('library_dirs')
if library_dirs is not None:
output.write('link_directories(')
for library_dir in library_dirs:
output.write(' ')
output.write(NormjoinPath(path_from_cmakelists_to_gyp, library_dir))
output.write('\n')
output.write(')\n')
output.write(cmake_target_type.command)
output.write('(')
output.write(cmake_target_name)
if cmake_target_type.modifier is not None:
output.write(' ')
output.write(cmake_target_type.modifier)
if other_srcs_name:
WriteVariable(output, other_srcs_name, ' ')
output.write('\n')
for src in compilable_srcs:
output.write(' ')
output.write(NormjoinPath(path_from_cmakelists_to_gyp, src))
output.write('\n')
for extra_source in extra_sources:
output.write(' ')
src, _ = extra_source
output.write(NormjoinPath(path_from_cmakelists_to_gyp, src))
output.write('\n')
output.write(')\n')
# Output name and location.
if target_type != 'none':
# Mark uncompiled sources as uncompiled.
if other_srcs_name:
output.write('set_source_files_properties(')
WriteVariable(output, other_srcs_name, '')
output.write(' PROPERTIES HEADER_FILE_ONLY "TRUE")\n')
# Output directory
target_output_directory = spec.get('product_dir')
if target_output_directory is None:
if target_type in ('executable', 'loadable_module'):
target_output_directory = generator_default_variables['PRODUCT_DIR']
elif target_type in ('shared_library'):
target_output_directory = '${builddir}/lib.${TOOLSET}'
elif spec.get('standalone_static_library', False):
target_output_directory = generator_default_variables['PRODUCT_DIR']
else:
base_path = gyp.common.RelativePath(os.path.dirname(gyp_file),
options.toplevel_dir)
target_output_directory = '${obj}.${TOOLSET}'
target_output_directory = (
os.path.join(target_output_directory, base_path))
cmake_target_output_directory = NormjoinPathForceCMakeSource(
path_from_cmakelists_to_gyp,
target_output_directory)
SetTargetProperty(output,
cmake_target_name,
cmake_target_type.property_modifier + '_OUTPUT_DIRECTORY',
cmake_target_output_directory)
# Output name
default_product_prefix = ''
default_product_name = target_name
default_product_ext = ''
if target_type == 'static_library':
static_library_prefix = generator_default_variables['STATIC_LIB_PREFIX']
default_product_name = RemovePrefix(default_product_name,
static_library_prefix)
default_product_prefix = static_library_prefix
default_product_ext = generator_default_variables['STATIC_LIB_SUFFIX']
elif target_type in ('loadable_module', 'shared_library'):
shared_library_prefix = generator_default_variables['SHARED_LIB_PREFIX']
default_product_name = RemovePrefix(default_product_name,
shared_library_prefix)
default_product_prefix = shared_library_prefix
default_product_ext = generator_default_variables['SHARED_LIB_SUFFIX']
elif target_type != 'executable':
print ('ERROR: What output file should be generated?',
'type', target_type, 'target', target_name)
product_prefix = spec.get('product_prefix', default_product_prefix)
product_name = spec.get('product_name', default_product_name)
product_ext = spec.get('product_extension')
if product_ext:
product_ext = '.' + product_ext
else:
product_ext = default_product_ext
SetTargetProperty(output, cmake_target_name, 'PREFIX', product_prefix)
SetTargetProperty(output, cmake_target_name,
cmake_target_type.property_modifier + '_OUTPUT_NAME',
product_name)
SetTargetProperty(output, cmake_target_name, 'SUFFIX', product_ext)
# Make the output of this target referenceable as a source.
cmake_target_output_basename = product_prefix + product_name + product_ext
cmake_target_output = os.path.join(cmake_target_output_directory,
cmake_target_output_basename)
SetFileProperty(output, cmake_target_output, 'GENERATED', ['TRUE'], '')
# Let CMake know if the 'all' target should depend on this target.
exclude_from_all = ('TRUE' if qualified_target not in all_qualified_targets
else 'FALSE')
SetTargetProperty(output, cmake_target_name,
'EXCLUDE_FROM_ALL', exclude_from_all)
for extra_target_name in extra_deps:
SetTargetProperty(output, extra_target_name,
'EXCLUDE_FROM_ALL', exclude_from_all)
# Includes
includes = config.get('include_dirs')
if includes:
# This (target include directories) is what requires CMake 2.8.8
includes_name = cmake_target_name + '__include_dirs'
SetVariableList(output, includes_name,
[NormjoinPathForceCMakeSource(path_from_cmakelists_to_gyp, include)
for include in includes])
output.write('set_property(TARGET ')
output.write(cmake_target_name)
output.write(' APPEND PROPERTY INCLUDE_DIRECTORIES ')
WriteVariable(output, includes_name, '')
output.write(')\n')
# Defines
defines = config.get('defines')
if defines is not None:
SetTargetProperty(output,
cmake_target_name,
'COMPILE_DEFINITIONS',
defines,
';')
# Compile Flags - http://www.cmake.org/Bug/view.php?id=6493
# CMake currently does not have target C and CXX flags.
# So, instead of doing...
# cflags_c = config.get('cflags_c')
# if cflags_c is not None:
# SetTargetProperty(output, cmake_target_name,
# 'C_COMPILE_FLAGS', cflags_c, ' ')
# cflags_cc = config.get('cflags_cc')
# if cflags_cc is not None:
# SetTargetProperty(output, cmake_target_name,
# 'CXX_COMPILE_FLAGS', cflags_cc, ' ')
# Instead we must...
s_sources = []
c_sources = []
cxx_sources = []
for src in srcs:
_, ext = os.path.splitext(src)
src_type = COMPILABLE_EXTENSIONS.get(ext, None)
if src_type == 's':
s_sources.append(NormjoinPath(path_from_cmakelists_to_gyp, src))
if src_type == 'cc':
c_sources.append(NormjoinPath(path_from_cmakelists_to_gyp, src))
if src_type == 'cxx':
cxx_sources.append(NormjoinPath(path_from_cmakelists_to_gyp, src))
for extra_source in extra_sources:
src, real_source = extra_source
_, ext = os.path.splitext(real_source)
src_type = COMPILABLE_EXTENSIONS.get(ext, None)
if src_type == 's':
s_sources.append(NormjoinPath(path_from_cmakelists_to_gyp, src))
if src_type == 'cc':
c_sources.append(NormjoinPath(path_from_cmakelists_to_gyp, src))
if src_type == 'cxx':
cxx_sources.append(NormjoinPath(path_from_cmakelists_to_gyp, src))
cflags = config.get('cflags', [])
cflags_c = config.get('cflags_c', [])
cflags_cxx = config.get('cflags_cc', [])
if c_sources and not (s_sources or cxx_sources):
flags = []
flags.extend(cflags)
flags.extend(cflags_c)
SetTargetProperty(output, cmake_target_name, 'COMPILE_FLAGS', flags, ' ')
elif cxx_sources and not (s_sources or c_sources):
flags = []
flags.extend(cflags)
flags.extend(cflags_cxx)
SetTargetProperty(output, cmake_target_name, 'COMPILE_FLAGS', flags, ' ')
else:
if s_sources and cflags:
SetFilesProperty(output, s_sources, 'COMPILE_FLAGS', cflags, ' ')
if c_sources and (cflags or cflags_c):
flags = []
flags.extend(cflags)
flags.extend(cflags_c)
SetFilesProperty(output, c_sources, 'COMPILE_FLAGS', flags, ' ')
if cxx_sources and (cflags or cflags_cxx):
flags = []
flags.extend(cflags)
flags.extend(cflags_cxx)
SetFilesProperty(output, cxx_sources, 'COMPILE_FLAGS', flags, ' ')
# Have assembly link as c if there are no other files
if not c_sources and not cxx_sources and s_sources:
SetTargetProperty(output, cmake_target_name, 'LINKER_LANGUAGE', ['C'])
# Linker flags
ldflags = config.get('ldflags')
if ldflags is not None:
SetTargetProperty(output, cmake_target_name, 'LINK_FLAGS', ldflags, ' ')
# Note on Dependencies and Libraries:
# CMake wants to handle link order, resolving the link line up front.
# Gyp does not retain or enforce specifying enough information to do so.
# So do as other gyp generators and use --start-group and --end-group.
# Give CMake as little information as possible so that it doesn't mess it up.
# Dependencies
rawDeps = spec.get('dependencies', [])
static_deps = []
shared_deps = []
other_deps = []
for rawDep in rawDeps:
dep_cmake_name = namer.CreateCMakeTargetName(rawDep)
dep_spec = target_dicts.get(rawDep, {})
dep_target_type = dep_spec.get('type', None)
if dep_target_type == 'static_library':
static_deps.append(dep_cmake_name)
elif dep_target_type == 'shared_library':
shared_deps.append(dep_cmake_name)
else:
other_deps.append(dep_cmake_name)
# ensure all external dependencies are complete before internal dependencies
# extra_deps currently only depend on their own deps, so otherwise run early
if static_deps or shared_deps or other_deps:
for extra_dep in extra_deps:
output.write('add_dependencies(')
output.write(extra_dep)
output.write('\n')
for deps in (static_deps, shared_deps, other_deps):
for dep in gyp.common.uniquer(deps):
output.write(' ')
output.write(dep)
output.write('\n')
output.write(')\n')
linkable = target_type in ('executable', 'loadable_module', 'shared_library')
other_deps.extend(extra_deps)
if other_deps or (not linkable and (static_deps or shared_deps)):
output.write('add_dependencies(')
output.write(cmake_target_name)
output.write('\n')
for dep in gyp.common.uniquer(other_deps):
output.write(' ')
output.write(dep)
output.write('\n')
if not linkable:
for deps in (static_deps, shared_deps):
for lib_dep in gyp.common.uniquer(deps):
output.write(' ')
output.write(lib_dep)
output.write('\n')
output.write(')\n')
# Libraries
if linkable:
external_libs = [lib for lib in spec.get('libraries', []) if len(lib) > 0]
if external_libs or static_deps or shared_deps:
output.write('target_link_libraries(')
output.write(cmake_target_name)
output.write('\n')
if static_deps:
write_group = circular_libs and len(static_deps) > 1
if write_group:
output.write('-Wl,--start-group\n')
for dep in gyp.common.uniquer(static_deps):
output.write(' ')
output.write(dep)
output.write('\n')
if write_group:
output.write('-Wl,--end-group\n')
if shared_deps:
for dep in gyp.common.uniquer(shared_deps):
output.write(' ')
output.write(dep)
output.write('\n')
if external_libs:
for lib in gyp.common.uniquer(external_libs):
output.write(' ')
output.write(lib)
output.write('\n')
output.write(')\n')
UnsetVariable(output, 'TOOLSET')
UnsetVariable(output, 'TARGET')
def GenerateOutputForConfig(target_list, target_dicts, data,
params, config_to_use):
options = params['options']
generator_flags = params['generator_flags']
# generator_dir: relative path from pwd to where make puts build files.
# Makes migrating from make to cmake easier, cmake doesn't put anything here.
# Each Gyp configuration creates a different CMakeLists.txt file
# to avoid incompatibilities between Gyp and CMake configurations.
generator_dir = os.path.relpath(options.generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = generator_flags.get('output_dir', 'out')
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.normpath(os.path.join(generator_dir,
output_dir,
config_to_use))
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
output_file = os.path.join(toplevel_build, 'CMakeLists.txt')
gyp.common.EnsureDirExists(output_file)
output = open(output_file, 'w')
output.write('cmake_minimum_required(VERSION 2.8.8 FATAL_ERROR)\n')
output.write('cmake_policy(VERSION 2.8.8)\n')
_, project_target, _ = gyp.common.ParseQualifiedTarget(target_list[-1])
output.write('project(')
output.write(project_target)
output.write(')\n')
SetVariable(output, 'configuration', config_to_use)
# The following appears to be as-yet undocumented.
# http://public.kitware.com/Bug/view.php?id=8392
output.write('enable_language(ASM)\n')
# ASM-ATT does not support .S files.
# output.write('enable_language(ASM-ATT)\n')
SetVariable(output, 'builddir', '${CMAKE_BINARY_DIR}')
SetVariable(output, 'obj', '${builddir}/obj')
output.write('\n')
# TODO: Undocumented/unsupported (the CMake Java generator depends on it).
# CMake by default names the object resulting from foo.c to be foo.c.o.
# Gyp traditionally names the object resulting from foo.c foo.o.
# This should be irrelevant, but some targets extract .o files from .a
# and depend on the name of the extracted .o files.
output.write('set(CMAKE_C_OUTPUT_EXTENSION_REPLACE 1)\n')
output.write('set(CMAKE_CXX_OUTPUT_EXTENSION_REPLACE 1)\n')
output.write('\n')
namer = CMakeNamer(target_list)
# The list of targets upon which the 'all' target should depend.
# CMake has it's own implicit 'all' target, one is not created explicitly.
all_qualified_targets = set()
for build_file in params['build_files']:
for qualified_target in gyp.common.AllTargets(target_list,
target_dicts,
os.path.normpath(build_file)):
all_qualified_targets.add(qualified_target)
for qualified_target in target_list:
WriteTarget(namer, qualified_target, target_dicts, build_dir, config_to_use,
options, generator_flags, all_qualified_targets, output)
output.close()
def PerformBuild(data, configurations, params):
options = params['options']
generator_flags = params['generator_flags']
# generator_dir: relative path from pwd to where make puts build files.
# Makes migrating from make to cmake easier, cmake doesn't put anything here.
generator_dir = os.path.relpath(options.generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = generator_flags.get('output_dir', 'out')
for config_name in configurations:
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.normpath(os.path.join(generator_dir,
output_dir,
config_name))
arguments = ['cmake', '-G', 'Ninja']
print 'Generating [%s]: %s' % (config_name, arguments)
subprocess.check_call(arguments, cwd=build_dir)
arguments = ['ninja', '-C', build_dir]
print 'Building [%s]: %s' % (config_name, arguments)
subprocess.check_call(arguments)
def CallGenerateOutputForConfig(arglist):
# Ignore the interrupt signal so that the parent process catches it and
# kills all multiprocessing children.
signal.signal(signal.SIGINT, signal.SIG_IGN)
target_list, target_dicts, data, params, config_name = arglist
GenerateOutputForConfig(target_list, target_dicts, data, params, config_name)
def GenerateOutput(target_list, target_dicts, data, params):
user_config = params.get('generator_flags', {}).get('config', None)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data,
params, user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
if params['parallel']:
try:
pool = multiprocessing.Pool(len(config_names))
arglists = []
for config_name in config_names:
arglists.append((target_list, target_dicts, data,
params, config_name))
pool.map(CallGenerateOutputForConfig, arglists)
except KeyboardInterrupt, e:
pool.terminate()
raise e
else:
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data,
params, config_name)
|
gpl-2.0
|
GdZ/scriptfile
|
software/googleAppEngine/lib/django_1_4/django/core/handlers/wsgi.py
|
83
|
8644
|
import sys
from threading import Lock
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django import http
from django.core import signals
from django.core.handlers import base
from django.core.urlresolvers import set_script_prefix
from django.utils import datastructures
from django.utils.encoding import force_unicode, iri_to_uri
from django.utils.log import getLogger
logger = getLogger('django.request')
# See http://www.iana.org/assignments/http-status-codes
STATUS_CODE_TEXT = {
100: 'CONTINUE',
101: 'SWITCHING PROTOCOLS',
102: 'PROCESSING',
200: 'OK',
201: 'CREATED',
202: 'ACCEPTED',
203: 'NON-AUTHORITATIVE INFORMATION',
204: 'NO CONTENT',
205: 'RESET CONTENT',
206: 'PARTIAL CONTENT',
207: 'MULTI-STATUS',
208: 'ALREADY REPORTED',
226: 'IM USED',
300: 'MULTIPLE CHOICES',
301: 'MOVED PERMANENTLY',
302: 'FOUND',
303: 'SEE OTHER',
304: 'NOT MODIFIED',
305: 'USE PROXY',
306: 'RESERVED',
307: 'TEMPORARY REDIRECT',
400: 'BAD REQUEST',
401: 'UNAUTHORIZED',
402: 'PAYMENT REQUIRED',
403: 'FORBIDDEN',
404: 'NOT FOUND',
405: 'METHOD NOT ALLOWED',
406: 'NOT ACCEPTABLE',
407: 'PROXY AUTHENTICATION REQUIRED',
408: 'REQUEST TIMEOUT',
409: 'CONFLICT',
410: 'GONE',
411: 'LENGTH REQUIRED',
412: 'PRECONDITION FAILED',
413: 'REQUEST ENTITY TOO LARGE',
414: 'REQUEST-URI TOO LONG',
415: 'UNSUPPORTED MEDIA TYPE',
416: 'REQUESTED RANGE NOT SATISFIABLE',
417: 'EXPECTATION FAILED',
422: 'UNPROCESSABLE ENTITY',
423: 'LOCKED',
424: 'FAILED DEPENDENCY',
426: 'UPGRADE REQUIRED',
500: 'INTERNAL SERVER ERROR',
501: 'NOT IMPLEMENTED',
502: 'BAD GATEWAY',
503: 'SERVICE UNAVAILABLE',
504: 'GATEWAY TIMEOUT',
505: 'HTTP VERSION NOT SUPPORTED',
506: 'VARIANT ALSO NEGOTIATES',
507: 'INSUFFICIENT STORAGE',
508: 'LOOP DETECTED',
510: 'NOT EXTENDED',
}
class LimitedStream(object):
'''
LimitedStream wraps another stream in order to not allow reading from it
past specified amount of bytes.
'''
def __init__(self, stream, limit, buf_size=64 * 1024 * 1024):
self.stream = stream
self.remaining = limit
self.buffer = ''
self.buf_size = buf_size
def _read_limited(self, size=None):
if size is None or size > self.remaining:
size = self.remaining
if size == 0:
return ''
result = self.stream.read(size)
self.remaining -= len(result)
return result
def read(self, size=None):
if size is None:
result = self.buffer + self._read_limited()
self.buffer = ''
elif size < len(self.buffer):
result = self.buffer[:size]
self.buffer = self.buffer[size:]
else: # size >= len(self.buffer)
result = self.buffer + self._read_limited(size - len(self.buffer))
self.buffer = ''
return result
def readline(self, size=None):
while '\n' not in self.buffer and \
(size is None or len(self.buffer) < size):
if size:
# since size is not None here, len(self.buffer) < size
chunk = self._read_limited(size - len(self.buffer))
else:
chunk = self._read_limited()
if not chunk:
break
self.buffer += chunk
sio = StringIO(self.buffer)
if size:
line = sio.readline(size)
else:
line = sio.readline()
self.buffer = sio.read()
return line
class WSGIRequest(http.HttpRequest):
def __init__(self, environ):
script_name = base.get_script_name(environ)
path_info = force_unicode(environ.get('PATH_INFO', u'/'))
if not path_info or path_info == script_name:
# Sometimes PATH_INFO exists, but is empty (e.g. accessing
# the SCRIPT_NAME URL without a trailing slash). We really need to
# operate as if they'd requested '/'. Not amazingly nice to force
# the path like this, but should be harmless.
#
# (The comparison of path_info to script_name is to work around an
# apparent bug in flup 1.0.1. See Django ticket #8490).
path_info = u'/'
self.environ = environ
self.path_info = path_info
self.path = '%s%s' % (script_name, path_info)
self.META = environ
self.META['PATH_INFO'] = path_info
self.META['SCRIPT_NAME'] = script_name
self.method = environ['REQUEST_METHOD'].upper()
self._post_parse_error = False
try:
content_length = int(self.environ.get('CONTENT_LENGTH'))
except (ValueError, TypeError):
content_length = 0
self._stream = LimitedStream(self.environ['wsgi.input'], content_length)
self._read_started = False
def get_full_path(self):
# RFC 3986 requires query string arguments to be in the ASCII range.
# Rather than crash if this doesn't happen, we encode defensively.
return '%s%s' % (self.path, self.environ.get('QUERY_STRING', '') and ('?' + iri_to_uri(self.environ.get('QUERY_STRING', ''))) or '')
def _is_secure(self):
return 'wsgi.url_scheme' in self.environ and self.environ['wsgi.url_scheme'] == 'https'
def _get_request(self):
if not hasattr(self, '_request'):
self._request = datastructures.MergeDict(self.POST, self.GET)
return self._request
def _get_get(self):
if not hasattr(self, '_get'):
# The WSGI spec says 'QUERY_STRING' may be absent.
self._get = http.QueryDict(self.environ.get('QUERY_STRING', ''), encoding=self._encoding)
return self._get
def _set_get(self, get):
self._get = get
def _get_post(self):
if not hasattr(self, '_post'):
self._load_post_and_files()
return self._post
def _set_post(self, post):
self._post = post
def _get_cookies(self):
if not hasattr(self, '_cookies'):
self._cookies = http.parse_cookie(self.environ.get('HTTP_COOKIE', ''))
return self._cookies
def _set_cookies(self, cookies):
self._cookies = cookies
def _get_files(self):
if not hasattr(self, '_files'):
self._load_post_and_files()
return self._files
GET = property(_get_get, _set_get)
POST = property(_get_post, _set_post)
COOKIES = property(_get_cookies, _set_cookies)
FILES = property(_get_files)
REQUEST = property(_get_request)
class WSGIHandler(base.BaseHandler):
initLock = Lock()
request_class = WSGIRequest
def __call__(self, environ, start_response):
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._request_middleware is None:
self.initLock.acquire()
try:
try:
# Check that middleware is still uninitialised.
if self._request_middleware is None:
self.load_middleware()
except:
# Unload whatever middleware we got
self._request_middleware = None
raise
finally:
self.initLock.release()
set_script_prefix(base.get_script_name(environ))
signals.request_started.send(sender=self.__class__)
try:
try:
request = self.request_class(environ)
except UnicodeDecodeError:
logger.warning('Bad Request (UnicodeDecodeError)',
exc_info=sys.exc_info(),
extra={
'status_code': 400,
}
)
response = http.HttpResponseBadRequest()
else:
response = self.get_response(request)
finally:
signals.request_finished.send(sender=self.__class__)
try:
status_text = STATUS_CODE_TEXT[response.status_code]
except KeyError:
status_text = 'UNKNOWN STATUS CODE'
status = '%s %s' % (response.status_code, status_text)
response_headers = [(str(k), str(v)) for k, v in response.items()]
for c in response.cookies.values():
response_headers.append(('Set-Cookie', str(c.output(header=''))))
start_response(status, response_headers)
return response
|
mit
|
unsiloai/syntaxnet-ops-hack
|
tensorflow/contrib/seq2seq/python/ops/basic_decoder.py
|
31
|
5521
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A class of Decoders that may sample to generate the next input.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.contrib.seq2seq.python.ops import decoder
from tensorflow.contrib.seq2seq.python.ops import helper as helper_py
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import base as layers_base
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.util import nest
__all__ = [
"BasicDecoderOutput",
"BasicDecoder",
]
class BasicDecoderOutput(
collections.namedtuple("BasicDecoderOutput", ("rnn_output", "sample_id"))):
pass
class BasicDecoder(decoder.Decoder):
"""Basic sampling decoder."""
def __init__(self, cell, helper, initial_state, output_layer=None):
"""Initialize BasicDecoder.
Args:
cell: An `RNNCell` instance.
helper: A `Helper` instance.
initial_state: A (possibly nested tuple of...) tensors and TensorArrays.
The initial state of the RNNCell.
output_layer: (Optional) An instance of `tf.layers.Layer`, i.e.,
`tf.layers.Dense`. Optional layer to apply to the RNN output prior
to storing the result or sampling.
Raises:
TypeError: if `cell`, `helper` or `output_layer` have an incorrect type.
"""
if not rnn_cell_impl._like_rnncell(cell): # pylint: disable=protected-access
raise TypeError("cell must be an RNNCell, received: %s" % type(cell))
if not isinstance(helper, helper_py.Helper):
raise TypeError("helper must be a Helper, received: %s" % type(helper))
if (output_layer is not None
and not isinstance(output_layer, layers_base.Layer)):
raise TypeError(
"output_layer must be a Layer, received: %s" % type(output_layer))
self._cell = cell
self._helper = helper
self._initial_state = initial_state
self._output_layer = output_layer
@property
def batch_size(self):
return self._helper.batch_size
def _rnn_output_size(self):
size = self._cell.output_size
if self._output_layer is None:
return size
else:
# To use layer's compute_output_shape, we need to convert the
# RNNCell's output_size entries into shapes with an unknown
# batch size. We then pass this through the layer's
# compute_output_shape and read off all but the first (batch)
# dimensions to get the output size of the rnn with the layer
# applied to the top.
output_shape_with_unknown_batch = nest.map_structure(
lambda s: tensor_shape.TensorShape([None]).concatenate(s),
size)
layer_output_shape = self._output_layer._compute_output_shape( # pylint: disable=protected-access
output_shape_with_unknown_batch)
return nest.map_structure(lambda s: s[1:], layer_output_shape)
@property
def output_size(self):
# Return the cell output and the id
return BasicDecoderOutput(
rnn_output=self._rnn_output_size(),
sample_id=tensor_shape.TensorShape([]))
@property
def output_dtype(self):
# Assume the dtype of the cell is the output_size structure
# containing the input_state's first component's dtype.
# Return that structure and int32 (the id)
dtype = nest.flatten(self._initial_state)[0].dtype
return BasicDecoderOutput(
nest.map_structure(lambda _: dtype, self._rnn_output_size()),
dtypes.int32)
def initialize(self, name=None):
"""Initialize the decoder.
Args:
name: Name scope for any created operations.
Returns:
`(finished, first_inputs, initial_state)`.
"""
return self._helper.initialize() + (self._initial_state,)
def step(self, time, inputs, state, name=None):
"""Perform a decoding step.
Args:
time: scalar `int32` tensor.
inputs: A (structure of) input tensors.
state: A (structure of) state tensors and TensorArrays.
name: Name scope for any created operations.
Returns:
`(outputs, next_state, next_inputs, finished)`.
"""
with ops.name_scope(name, "BasicDecoderStep", (time, inputs, state)):
cell_outputs, cell_state = self._cell(inputs, state)
if self._output_layer is not None:
cell_outputs = self._output_layer(cell_outputs)
sample_ids = self._helper.sample(
time=time, outputs=cell_outputs, state=cell_state)
(finished, next_inputs, next_state) = self._helper.next_inputs(
time=time,
outputs=cell_outputs,
state=cell_state,
sample_ids=sample_ids)
outputs = BasicDecoderOutput(cell_outputs, sample_ids)
return (outputs, next_state, next_inputs, finished)
|
apache-2.0
|
rockfruit/bika.lims
|
bika/lims/controlpanel/bika_containertypes.py
|
1
|
3518
|
# This file is part of Bika LIMS
#
# Copyright 2011-2016 by it's authors.
# Some rights reserved. See LICENSE.txt, AUTHORS.txt.
from AccessControl.SecurityInfo import ClassSecurityInfo
from Products.ATContentTypes.content import schemata
from Products.Archetypes import atapi
from Products.Archetypes.ArchetypeTool import registerType
from Products.CMFCore.utils import getToolByName
from bika.lims.browser.bika_listing import BikaListingView
from bika.lims.config import PROJECTNAME
from bika.lims import bikaMessageFactory as _
from bika.lims.utils import t
from bika.lims.content.bikaschema import BikaFolderSchema
from bika.lims.interfaces import IContainerTypes
from plone.app.layout.globals.interfaces import IViewView
from plone.app.content.browser.interfaces import IFolderContentsView
from plone.app.folder.folder import ATFolder, ATFolderSchema
from zope.interface.declarations import implements
from operator import itemgetter
class ContainerTypesView(BikaListingView):
implements(IFolderContentsView, IViewView)
def __init__(self, context, request):
super(ContainerTypesView, self).__init__(context, request)
self.catalog = 'bika_setup_catalog'
self.contentFilter = {'portal_type': 'ContainerType',
'sort_on': 'sortable_title'}
self.context_actions = {_('Add'):
{'url': 'createObject?type_name=ContainerType',
'icon': '++resource++bika.lims.images/add.png'}}
self.title = self.context.translate(_("Container Types"))
self.icon = self.portal_url + "/++resource++bika.lims.images/container_big.png"
self.description = ""
self.show_sort_column = False
self.show_select_row = False
self.show_select_column = True
self.pagesize = 25
self.columns = {
'Title': {'title': _('Container Type'),
'index':'sortable_title'},
'Description': {'title': _('Description'),
'index': 'description',
'toggle': True},
}
self.review_states = [
{'id':'default',
'title': _('Active'),
'contentFilter': {'inactive_state': 'active'},
'transitions': [{'id':'deactivate'}, ],
'columns': ['Title',
'Description']},
{'id':'inactive',
'title': _('Dormant'),
'contentFilter': {'inactive_state': 'inactive'},
'transitions': [{'id':'activate'}, ],
'columns': ['Title',
'Description']},
{'id':'all',
'title': _('All'),
'contentFilter':{},
'columns': ['Title',
'Description']},
]
def folderitems(self):
items = BikaListingView.folderitems(self)
for x in range(len(items)):
if not items[x].has_key('obj'): continue
obj = items[x]['obj']
items[x]['Description'] = obj.Description()
items[x]['replace']['Title'] = "<a href='%s'>%s</a>" % \
(items[x]['url'], items[x]['Title'])
return items
schema = ATFolderSchema.copy()
class ContainerTypes(ATFolder):
implements(IContainerTypes)
displayContentsTab = False
schema = schema
schemata.finalizeATCTSchema(schema, folderish = True, moveDiscussion = False)
atapi.registerType(ContainerTypes, PROJECTNAME)
|
agpl-3.0
|
morrillo/partner_id_validation
|
__openerp__.py
|
2
|
1489
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'partner_id_validation',
'version': '0.1',
'category': 'General',
'description': "Module that validates that sale orders, purchase orders and invoices are assigned a partner that is a company.",
'author': 'Moldeo Interactive',
'website': 'http://business.moldeo.coop/',
'images': [],
'depends': ['sale','account','purchase'],
'demo': [],
'data': [],
'test': [],
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
gpl-2.0
|
theoryno3/scikit-learn
|
sklearn/feature_extraction/image.py
|
32
|
17167
|
"""
The :mod:`sklearn.feature_extraction.image` submodule gathers utilities to
extract features from images.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Olivier Grisel
# Vlad Niculae
# License: BSD 3 clause
from itertools import product
import numbers
import numpy as np
from scipy import sparse
from numpy.lib.stride_tricks import as_strided
from ..utils import check_array, check_random_state
from ..utils.fixes import astype
from ..base import BaseEstimator
__all__ = ['PatchExtractor',
'extract_patches_2d',
'grid_to_graph',
'img_to_graph',
'reconstruct_from_patches_2d']
###############################################################################
# From an image to a graph
def _make_edges_3d(n_x, n_y, n_z=1):
"""Returns a list of edges for a 3D image.
Parameters
===========
n_x: integer
The size of the grid in the x direction.
n_y: integer
The size of the grid in the y direction.
n_z: integer, optional
The size of the grid in the z direction, defaults to 1
"""
vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z))
edges_deep = np.vstack((vertices[:, :, :-1].ravel(),
vertices[:, :, 1:].ravel()))
edges_right = np.vstack((vertices[:, :-1].ravel(),
vertices[:, 1:].ravel()))
edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel()))
edges = np.hstack((edges_deep, edges_right, edges_down))
return edges
def _compute_gradient_3d(edges, img):
n_x, n_y, n_z = img.shape
gradient = np.abs(img[edges[0] // (n_y * n_z),
(edges[0] % (n_y * n_z)) // n_z,
(edges[0] % (n_y * n_z)) % n_z] -
img[edges[1] // (n_y * n_z),
(edges[1] % (n_y * n_z)) // n_z,
(edges[1] % (n_y * n_z)) % n_z])
return gradient
# XXX: Why mask the image after computing the weights?
def _mask_edges_weights(mask, edges, weights=None):
"""Apply a mask to edges (weighted or not)"""
inds = np.arange(mask.size)
inds = inds[mask.ravel()]
ind_mask = np.logical_and(np.in1d(edges[0], inds),
np.in1d(edges[1], inds))
edges = edges[:, ind_mask]
if weights is not None:
weights = weights[ind_mask]
if len(edges.ravel()):
maxval = edges.max()
else:
maxval = 0
order = np.searchsorted(np.unique(edges.ravel()), np.arange(maxval + 1))
edges = order[edges]
if weights is None:
return edges
else:
return edges, weights
def _to_graph(n_x, n_y, n_z, mask=None, img=None,
return_as=sparse.coo_matrix, dtype=None):
"""Auxiliary function for img_to_graph and grid_to_graph
"""
edges = _make_edges_3d(n_x, n_y, n_z)
if dtype is None:
if img is None:
dtype = np.int
else:
dtype = img.dtype
if img is not None:
img = np.atleast_3d(img)
weights = _compute_gradient_3d(edges, img)
if mask is not None:
edges, weights = _mask_edges_weights(mask, edges, weights)
diag = img.squeeze()[mask]
else:
diag = img.ravel()
n_voxels = diag.size
else:
if mask is not None:
mask = astype(mask, dtype=np.bool, copy=False)
mask = np.asarray(mask, dtype=np.bool)
edges = _mask_edges_weights(mask, edges)
n_voxels = np.sum(mask)
else:
n_voxels = n_x * n_y * n_z
weights = np.ones(edges.shape[1], dtype=dtype)
diag = np.ones(n_voxels, dtype=dtype)
diag_idx = np.arange(n_voxels)
i_idx = np.hstack((edges[0], edges[1]))
j_idx = np.hstack((edges[1], edges[0]))
graph = sparse.coo_matrix((np.hstack((weights, weights, diag)),
(np.hstack((i_idx, diag_idx)),
np.hstack((j_idx, diag_idx)))),
(n_voxels, n_voxels),
dtype=dtype)
if return_as is np.ndarray:
return graph.toarray()
return return_as(graph)
def img_to_graph(img, mask=None, return_as=sparse.coo_matrix, dtype=None):
"""Graph of the pixel-to-pixel gradient connections
Edges are weighted with the gradient values.
Parameters
===========
img: ndarray, 2D or 3D
2D or 3D image
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as: np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype: None or dtype, optional
The data of the returned sparse matrix. By default it is the
dtype of img
Notes
=====
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
img = np.atleast_3d(img)
n_x, n_y, n_z = img.shape
return _to_graph(n_x, n_y, n_z, mask, img, return_as, dtype)
def grid_to_graph(n_x, n_y, n_z=1, mask=None, return_as=sparse.coo_matrix,
dtype=np.int):
"""Graph of the pixel-to-pixel connections
Edges exist if 2 voxels are connected.
Parameters
===========
n_x: int
Dimension in x axis
n_y: int
Dimension in y axis
n_z: int, optional, default 1
Dimension in z axis
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as: np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype: dtype, optional, default int
The data of the returned sparse matrix. By default it is int
Notes
=====
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
return _to_graph(n_x, n_y, n_z, mask=mask, return_as=return_as,
dtype=dtype)
###############################################################################
# From an image to a set of small image patches
def _compute_n_patches(i_h, i_w, p_h, p_w, max_patches=None):
"""Compute the number of patches that will be extracted in an image.
Parameters
===========
i_h: int
The image height
i_w: int
The image with
p_h: int
The height of a patch
p_w: int
The width of a patch
max_patches: integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
"""
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
all_patches = n_h * n_w
if max_patches:
if (isinstance(max_patches, (numbers.Integral))
and max_patches < all_patches):
return max_patches
elif (isinstance(max_patches, (numbers.Real))
and 0 < max_patches < 1):
return int(max_patches * all_patches)
else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
else:
return all_patches
def extract_patches(arr, patch_shape=8, extraction_step=1):
"""Extracts patches of any n-dimensional array in place using strides.
Given an n-dimensional array it will return a 2n-dimensional array with
the first n dimensions indexing patch position and the last n indexing
the patch content. This operation is immediate (O(1)). A reshape
performed on the first n dimensions will cause numpy to copy data, leading
to a list of extracted patches.
Parameters
----------
arr: ndarray
n-dimensional array of which patches are to be extracted
patch_shape: integer or tuple of length arr.ndim
Indicates the shape of the patches to be extracted. If an
integer is given, the shape will be a hypercube of
sidelength given by its value.
extraction_step: integer or tuple of length arr.ndim
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
Returns
-------
patches: strided ndarray
2n-dimensional array indexing patches on first n dimensions and
containing patches on the last n dimensions. These dimensions
are fake, but this way no data is copied. A simple reshape invokes
a copying operation to obtain a list of patches:
result.reshape([-1] + list(patch_shape))
"""
arr_ndim = arr.ndim
if isinstance(patch_shape, numbers.Number):
patch_shape = tuple([patch_shape] * arr_ndim)
if isinstance(extraction_step, numbers.Number):
extraction_step = tuple([extraction_step] * arr_ndim)
patch_strides = arr.strides
slices = [slice(None, None, st) for st in extraction_step]
indexing_strides = arr[slices].strides
patch_indices_shape = ((np.array(arr.shape) - np.array(patch_shape)) //
np.array(extraction_step)) + 1
shape = tuple(list(patch_indices_shape) + list(patch_shape))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(arr, shape=shape, strides=strides)
return patches
def extract_patches_2d(image, patch_size, max_patches=None, random_state=None):
"""Reshape a 2D image into a collection of patches
The resulting patches are allocated in a dedicated array.
Parameters
----------
image: array, shape = (image_height, image_width) or
(image_height, image_width, n_channels)
The original image data. For color images, the last dimension specifies
the channel: a RGB image would have `n_channels=3`.
patch_size: tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches: integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
random_state: int or RandomState
Pseudo number generator state used for random sampling to use if
`max_patches` is not None.
Returns
-------
patches: array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the image, where `n_patches`
is either `max_patches` or the total number of patches that can be
extracted.
Examples
--------
>>> from sklearn.feature_extraction import image
>>> one_image = np.arange(16).reshape((4, 4))
>>> one_image
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> patches = image.extract_patches_2d(one_image, (2, 2))
>>> print(patches.shape)
(9, 2, 2)
>>> patches[0]
array([[0, 1],
[4, 5]])
>>> patches[1]
array([[1, 2],
[5, 6]])
>>> patches[8]
array([[10, 11],
[14, 15]])
"""
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
if p_h > i_h:
raise ValueError("Height of the patch should be less than the height"
" of the image.")
if p_w > i_w:
raise ValueError("Width of the patch should be less than the width"
" of the image.")
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
extracted_patches = extract_patches(image,
patch_shape=(p_h, p_w, n_colors),
extraction_step=1)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches)
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint(i_h - p_h + 1, size=n_patches)
j_s = rng.randint(i_w - p_w + 1, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
# remove the color dimension if useless
if patches.shape[-1] == 1:
return patches.reshape((n_patches, p_h, p_w))
else:
return patches
def reconstruct_from_patches_2d(patches, image_size):
"""Reconstruct the image from all of its patches.
Patches are assumed to overlap and the image is constructed by filling in
the patches from left to right, top to bottom, averaging the overlapping
regions.
Parameters
----------
patches: array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The complete set of patches. If the patches contain colour information,
channels are indexed along the last dimension: RGB patches would
have `n_channels=3`.
image_size: tuple of ints (image_height, image_width) or
(image_height, image_width, n_channels)
the size of the image that will be reconstructed
Returns
-------
image: array, shape = image_size
the reconstructed image
"""
i_h, i_w = image_size[:2]
p_h, p_w = patches.shape[1:3]
img = np.zeros(image_size)
# compute the dimensions of the patches array
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
for p, (i, j) in zip(patches, product(range(n_h), range(n_w))):
img[i:i + p_h, j:j + p_w] += p
for i in range(i_h):
for j in range(i_w):
# divide by the amount of overlap
# XXX: is this the most efficient way? memory-wise yes, cpu wise?
img[i, j] /= float(min(i + 1, p_h, i_h - i) *
min(j + 1, p_w, i_w - j))
return img
class PatchExtractor(BaseEstimator):
"""Extracts patches from a collection of images
Parameters
----------
patch_size: tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches: integer or float, optional default is None
The maximum number of patches per image to extract. If max_patches is a
float in (0, 1), it is taken to mean a proportion of the total number
of patches.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
"""
def __init__(self, patch_size=None, max_patches=None, random_state=None):
self.patch_size = patch_size
self.max_patches = max_patches
self.random_state = random_state
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
def transform(self, X):
"""Transforms the image samples in X into a matrix of patch data.
Parameters
----------
X : array, shape = (n_samples, image_height, image_width) or
(n_samples, image_height, image_width, n_channels)
Array of images from which to extract patches. For color images,
the last dimension specifies the channel: a RGB image would have
`n_channels=3`.
Returns
-------
patches: array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the images, where
`n_patches` is either `n_samples * max_patches` or the total
number of patches that can be extracted.
"""
self.random_state = check_random_state(self.random_state)
n_images, i_h, i_w = X.shape[:3]
X = np.reshape(X, (n_images, i_h, i_w, -1))
n_channels = X.shape[-1]
if self.patch_size is None:
patch_size = i_h // 10, i_w // 10
else:
patch_size = self.patch_size
# compute the dimensions of the patches array
p_h, p_w = patch_size
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, self.max_patches)
patches_shape = (n_images * n_patches,) + patch_size
if n_channels > 1:
patches_shape += (n_channels,)
# extract the patches
patches = np.empty(patches_shape)
for ii, image in enumerate(X):
patches[ii * n_patches:(ii + 1) * n_patches] = extract_patches_2d(
image, patch_size, self.max_patches, self.random_state)
return patches
|
bsd-3-clause
|
deonwu/robotframework-debuger
|
src/rdb/interface/web/standalone.py
|
1
|
8730
|
"""
this module is support to run RDB web interface in standalone mode. the RDB is running
with robot, it's not available after the robot is stopped. The user may be confused
"what's happened RDB? Does crushed?"
The RDB will switch over to stand alone mode to notify user the robot is stopped friendly.
"""
from wsgiref.simple_server import make_server, demo_app
from wsgiref.simple_server import WSGIRequestHandler
import re, sys, os, logging, urllib2, traceback, socket
import autoreload
from wsgi_proxy import WSGIProxyApplication
class HttpServletApp(object):
def __init__(self, environ, start_response):
""" URL? """
self.environ = environ
self.output = []
self.logger = logging.getLogger("http")
actions = ManageActions()
url = environ['PATH_INFO']
command = url.split("/")[2]
if command and hasattr(actions, command):
action = getattr(actions, command)
else:
action = actions.status
result = ()
self.params = self.__parse_param(environ['QUERY_STRING'])
if action.func_defaults:
reqiured_args_count = action.func_code.co_argcount - len(action.func_defaults)
else:
reqiured_args_count = action.func_code.co_argcount
var_names = action.func_code.co_varnames
reqiured_args, options_args = var_names[:reqiured_args_count], var_names[reqiured_args_count:]
try:
args, kw_args = self.__parse_args(self.params, reqiured_args, options_args)
result = action(*args, **kw_args)
except Exception, e:
self.logger.exception(e)
result = "Exception:%s\n%s" % (e, traceback.format_exc())
self.render_output(start_response, result)
def render_output(self, start_response, result):
import types
if self.params.get("DEBUG", '') == 'Y':
env_list = ( "%s=%s\n" % (k, v) for k, v in self.environ.iteritems() )
self.output.append("<!--%s-->" % "".join(env_list))
if isinstance(result, basestring):
self.output.append(result)
start_response("200 OK", [('Content-Type','text/plain'), ])
elif isinstance(result, types.TupleType):
template_name, param = result[:2]
import templates as t
from robot.serializing import Template, Namespace
from robot.running import NAMESPACES
template = getattr(t, template_name)
self.output.append(Template(template=template).generate(param))
start_response("200 OK", [('Content-Type','text/html'), ])
else:
self.output.append(str(result))
start_response("200 OK", [('Content-Type','text/plain'), ])
def __parse_param(self, param):
p = {}
for e in param.split("&"):
if "=" not in e: continue
k, v = e.split("=", 1)
p[k] = urllib2.unquote(v)
return p
def __parse_args(self, args, reqiured_args, options_args):
param = []
for name in reqiured_args:
if 'self' == name:continue
if not args.has_key(name):
raise RuntimeError, "Not found required parameter '%s'" % name
param.append(args[name])
kw_param = {}
for name in options_args:
if args.has_key(name):
kw_param[str(name)] = args[name]
return (param, kw_param)
def __iter__(self):
return iter(self.output)
class ManageActions(object):
def start_rdb(self, host, port):
rdb = SERVER_CONTEXT.rdb
rdb.server_name = host
rdb.server_port = port
rdb.status = 'running'
return 'OK'
def done_rdb(self):
rdb = SERVER_CONTEXT.rdb
rdb.status = 'closed'
return 'OK'
def stop_proxy(self):
sys.exit(0)
def proxy_status(self):
return self.status()
def proxy_help(self):
pass
def status(self):
return ["root_path:%s" % SERVER_CONTEXT.root_path,
"rdb_port:%s" % SERVER_CONTEXT.rdb.server_port,
"rdb_host:%s" % SERVER_CONTEXT.rdb.server_name,
"rdb_status:%s" % SERVER_CONTEXT.rdb.status,
]
class StaticWebApp(object):
def __init__(self, environ, start_response):
""" URL? """
self.http_url = environ['PATH_INFO']
start_response("404 Not Found", [('Content-Type','text/plain')])
self.env = environ
def __iter__(self):
return iter([])
class ApplicationContext(object):
"""A global standalone object, it's keep a web server running context."""
def __init__(self, root_path='', app_setting=None):
self.root_path = root_path
self.app_setting = app_setting
self.active_rdb = RDBInfo()
self.proxy_exception = None
@property
def rdb(self): return self.active_rdb
class RDBInfo(object):
"""A RDB interface infomation."""
STATUS = ['running', 'closed', ]
def __init__(self, host='127.0.0.1', port=0):
self.server_name = host
self.server_port = port
self.status = 'closed'
self.info = []
self.start_time = []
def wsgi_global_app(environ, start_response):
#proxy = context.rdb_proxy()
path_info = environ['PATH_INFO']
script = path_info.split("/")[1]
logger = logging.getLogger("rdb.proxy")
if re.search(r"\.(?:html|css|js|jpg|gif|png|ico)$", path_info, re.I):
return StaticWebApp(environ, start_response)
elif script in ['manage', ]:
return HttpServletApp(environ, start_response)
elif script in ['alive', ]:
start_response("200 OK", [('Content-Type','text/plain')])
return ['OK', ]
elif SERVER_CONTEXT.rdb.status == 'running':
socket.setdefaulttimeout(5)
rdb = SERVER_CONTEXT.rdb
environ['HTTP_HOST'] = "%s:%s" % (rdb.server_name, rdb.server_port)
environ['SERVER_NAME'] = rdb.server_name
environ['SERVER_PORT'] = rdb.server_port
proxy = WSGIProxyApplication()
try:
logger.info("HTTP_HOST:%s" % environ['HTTP_HOST'])
logger.info("url:%s" % path_info)
return proxy(environ, start_response)
except BaseException, e:
start_response("302 Found", [('Location','/manage/status')])
SERVER_CONTEXT.rdb.status = 'error'
logger.exception(e)
return []
else:
#status_code = 302
start_response("302 Found", [('Location','/manage/status')])
return []
class RDBProxyWSGIHandler(WSGIRequestHandler):
def log_message(self, format, *args):
logging.getLogger("rdb.proxy").debug(format % args)
def main(config_file='', ):
import logging
def init_sys_logging(cfg):
level = getattr(logging, cfg.LOGGING_LEVEL)
logging.basicConfig(level=level,
format='%(asctime)s %(name)-8s %(levelname)-6s %(message)s',
datefmt='%m-%d %H:%M:%S',
filename=cfg.PROXY_LOGGING_FILE,
filemode='a')
def start_wsgi_server():
from rdb.RobotDebuger import DebugSetting
app_settings = DebugSetting()
work_root = os.getcwd()
config_path = os.path.abspath(config_file)
if os.path.isfile(config_path):
app_settings.load_from_file(config_path)
init_sys_logging(app_settings)
logger = logging.getLogger("rdb.proxy")
logger.info("Loading RDB proxy at %s" % work_root)
try:
SERVER_CONTEXT = ApplicationContext(work_root, app_settings)
globals()['SERVER_CONTEXT'] = SERVER_CONTEXT
from wsgiref.simple_server import WSGIServer
server_address = (app_settings.WEB_BIND, int(app_settings.WEB_PORT))
server = WSGIServer(server_address, RDBProxyWSGIHandler)
server.set_app(wsgi_global_app)
SERVER_CONTEXT.server = server
logger.info("Serving HTTP on %s:%s..." %(app_settings.WEB_BIND,
app_settings.WEB_PORT))
server.serve_forever()
except BaseException, e:
logger.exception(e)
start_wsgi_server()
#autoreload.main(start_wsgi_server)
if __name__ == "__main__":
xx = os.path.dirname(__file__)
sys.path.insert(0, os.path.normpath(os.path.join(xx, "..", "..", "..")))
main(*sys.argv[1:])
|
gpl-2.0
|
codemonkey2841/tradebot
|
run.py
|
1
|
6871
|
#!/usr/bin/env python
""" It's a TradeBot """
import configparser
import curses
from httplib import HTTPException
import os
import signal
import socket
from ssl import SSLError
import sys
import time
from tradebot import TradeBot
def on_exit(sig, func=None):
curses.nocbreak()
stdscr.keypad(0)
curses.echo()
curses.endwin()
curses.curs_set(1)
sys.exit()
def initialize():
# Initialize curses screen
stdscr = curses.initscr()
curses.noecho()
curses.cbreak()
stdscr.keypad(1)
curses.curs_set(0)
# Initialize screen
stdscr.addstr(0, 0, "=" * 82)
stdscr.addstr(1, 36, "BTC-E BOT")
stdscr.addstr(2, 0, "-" * 81)
stdscr.addstr(4, 0, "=" * 81)
stdscr.addstr(21, 0, " " * 70)
stdscr.addstr(22, 0, "=" * 82)
for i in range(1, 22):
stdscr.addstr(i, 0, "|")
stdscr.addstr(i, 81, "|")
# Initialize top bar labels
stdscr.addstr(3, 2, "SIM", curses.A_BOLD)
stdscr.addstr(3, 12, "VERBOSE", curses.A_BOLD)
stdscr.addstr(3, 24, "WAIT", curses.A_BOLD)
stdscr.addstr(3, 35, "PAIR", curses.A_BOLD)
stdscr.addstr(3, 50, "THRESH", curses.A_BOLD)
stdscr.addstr(3, 65, "TRADE", curses.A_BOLD)
stdscr.addstr(3, 6, "[ ]")
stdscr.addstr(3, 20, "[ ]")
stdscr.addstr(3, 29, "[ ]")
stdscr.addstr(3, 40, "[ ]")
stdscr.addstr(3, 57, "[ ]")
stdscr.addstr(3, 71, "[ ]")
# Initialize main area labels
stdscr.addstr(5, 2, "BALANCE:", curses.A_BOLD)
stdscr.addstr(5, 67, "PRICE HISTORY", curses.A_UNDERLINE)
stdscr.addstr(6, 57, "CURRENT ->")
stdscr.addstr(7, 2, "STATE:", curses.A_BOLD)
stdscr.addstr(8, 2, "TRADE INCREMENT:", curses.A_BOLD)
stdscr.addstr(9, 2, "TRADE THRESHOLD:", curses.A_BOLD)
stdscr.addstr(10, 2, "AVERAGE PRICE:", curses.A_BOLD)
stdscr.addstr(19, 2, "ORDER LIST ( )", curses.A_UNDERLINE)
stdscr.addstr(12, 2, "TRADE HISTORY", curses.A_UNDERLINE)
stdscr.refresh()
return stdscr
def update(stdscr):
curr1 = tradebot.curr[0].upper()
curr2 = tradebot.curr[1].upper()
stdscr.addstr(1, 56, "%s" % time.asctime())
(state, thresh) = tradebot.get_state()
if state == "buy":
thresh = "< %0.06f" % thresh
elif state == "sell":
thresh = "> %0.06f" % thresh
elif state == "build":
thresh = "%0.06f" % thresh
stdscr.addstr(9, 20, "%s %s" % (thresh, curr2))
stdscr.addstr(10, 20, "%f %s" % (tradebot.average_price(), curr2))
stdscr.addstr(5, 12, "%f %s / %f %s"
% (tradebot.get_balance(1), curr1, tradebot.get_balance(2),curr2))
stdscr.addstr(7, 20, "%s " % state.upper())
stdscr.addstr(8, 20, "%f %s" % (tradebot.get_trade_cost(), curr1))
# Top Bar values
sim = "OFF"
if tradebot.simulation:
sim = "ON"
stdscr.addstr(3, 7, "%3s" % sim)
stdscr.addstr(3, 21, "%s" % args['verbosity'][:1])
stdscr.addstr(3, 30, "%3d" % tradebot.wait)
stdscr.addstr(3, 41, "%s_%s" % (tradebot.curr[0], tradebot.curr[1]))
stdscr.addstr(3, 58, "%.02f%%" % (tradebot.trade_threshold * 100))
stdscr.addstr(3, 72, "%6.02f%%" % (tradebot.trade_increment * 100))
# Price History
line = 6
history = tradebot.get_price_history()
for item in history:
stdscr.addstr(line, 68, "%f %s" % (item, curr2))
line += 1
if line > 21:
break
# Completed trades
history = tradebot.get_trade_history()
line = 13
for item in history:
stdscr.addstr(line, 2, "%s: %s %f @ %.05f %s " % (item.timestamp,
item.type,
item.amount,
item.rate,
curr2))
line += 1
# Order list
orders = tradebot.get_orders()
stdscr.addstr(19, 14, "%2d" % len(orders))
line = 20
stdscr.addstr(20, 2, " " * 40)
stdscr.addstr(21, 2, " " * 40)
for order in orders:
stdscr.addstr(line, 2, "%s %f @ %.05f %s" % (order.type,
order.amount,
order.rate,
curr2))
line += 1
if line > 21:
break
stdscr.refresh()
signal.signal(signal.SIGQUIT, on_exit)
signal.signal(signal.SIGTERM, on_exit)
signal.signal(signal.SIGINT, on_exit)
errlog = 'error.log'
config = configparser.ConfigParser()
config.read('tradebot.conf')
args = {}
if 'api_file' in config['BTC-E']:
args['api_file'] = str(config['BTC-E']['api_file'])
else:
sys.stderr.write('api_file not defined')
sys.exit(1)
with open(args['api_file']) as f:
args['api_key'] = f.readline().strip()
if 'increment' in config['TRADE']:
args['trade_increment'] = float(config['TRADE']['increment'])
else:
args['trade_increment'] = 0.012
if 'threshold' in config['TRADE']:
args['trade_threshold'] = float(config['TRADE']['threshold'])
else:
args['trade_threshold'] = 0.006
if 'pair' in config['BTC-E']:
args['pair'] = str(config['BTC-E']['pair'])
else:
args['pair'] = 'ltc_btc'
if 'wait' in config['TRADE']:
args['wait'] = int(config['TRADE']['refresh'])
else:
args['wait'] = 15
if 'simulation' in config['MAIN']:
args['simulation'] = str(config['MAIN']['simulation'])
else:
args['simulation'] = 'off'
if 'verbosity' in config['MAIN']:
args['verbosity'] = config['MAIN']['verbosity'].upper()
else:
args['verbosity'] = "ERROR"
if 'logfile' in config['MAIN']:
args['logfile'] = str(config['MAIN']['logfile'])
else:
args['logfile'] = 'tradebot.log'
if 'db' in config['MAIN']:
args['db'] = str(config['MAIN']['db'])
else:
args['db'] = 'tradebot.db'
sys.stderr = open(errlog, "w")
tradebot = TradeBot(args)
stdscr = initialize()
while True:
try:
stdscr.addstr(21, 2, " " * 70)
tradebot.refresh_price()
for i in range(tradebot.wait):
update(stdscr)
time.sleep(1)
except (SSLError, HTTPException, ValueError, socket.error):
curses.start_color()
curses.init_pair(1, curses.COLOR_RED, curses.COLOR_WHITE)
for i in range(60):
stdscr.addstr(21, 2, "Failed to connect to exchange. Retrying " \
"in %d" % i, curses.color_pair(1))
except Exception as e:
curses.nocbreak()
#stdscr.keypad(0)
curses.echo()
curses.endwin()
curses.curs_set(1)
import traceback
type_, value_, traceback_ = sys.exc_info()
for line in traceback.format_tb(traceback_):
sys.stderr.write(line)
sys.stderr.write(e.__class__.__name__ + ": ")
sys.stderr.write(e.message)
sys.exit()
|
mit
|
upliftaero/MissionPlanner
|
Lib/lib2to3/fixes/fix_filter.py
|
61
|
2183
|
# Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer that changes filter(F, X) into list(filter(F, X)).
We avoid the transformation if the filter() call is directly contained
in iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or
for V in <>:.
NOTE: This is still not correct if the original code was depending on
filter(F, X) to return a string if X is a string and a tuple if X is a
tuple. That would require type inference, which we don't do. Let
Python 2.6 figure it out.
"""
# Local imports
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, Call, ListComp, in_special_context
class FixFilter(fixer_base.ConditionalFix):
BM_compatible = True
PATTERN = """
filter_lambda=power<
'filter'
trailer<
'('
arglist<
lambdef< 'lambda'
(fp=NAME | vfpdef< '(' fp=NAME ')'> ) ':' xp=any
>
','
it=any
>
')'
>
>
|
power<
'filter'
trailer< '(' arglist< none='None' ',' seq=any > ')' >
>
|
power<
'filter'
args=trailer< '(' [any] ')' >
>
"""
skip_on = "future_builtins.filter"
def transform(self, node, results):
if self.should_skip(node):
return
if "filter_lambda" in results:
new = ListComp(results.get("fp").clone(),
results.get("fp").clone(),
results.get("it").clone(),
results.get("xp").clone())
elif "none" in results:
new = ListComp(Name(u"_f"),
Name(u"_f"),
results["seq"].clone(),
Name(u"_f"))
else:
if in_special_context(node):
return None
new = node.clone()
new.prefix = u""
new = Call(Name(u"list"), [new])
new.prefix = node.prefix
return new
|
gpl-3.0
|
nkgilley/home-assistant
|
tests/components/blebox/test_sensor.py
|
13
|
2739
|
"""Blebox sensors tests."""
import logging
import blebox_uniapi
import pytest
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_UNIT_OF_MEASUREMENT,
DEVICE_CLASS_TEMPERATURE,
STATE_UNKNOWN,
TEMP_CELSIUS,
)
from .conftest import async_setup_entity, mock_feature
from tests.async_mock import AsyncMock, PropertyMock
@pytest.fixture(name="tempsensor")
def tempsensor_fixture():
"""Return a default sensor mock."""
feature = mock_feature(
"sensors",
blebox_uniapi.sensor.Temperature,
unique_id="BleBox-tempSensor-1afe34db9437-0.temperature",
full_name="tempSensor-0.temperature",
device_class="temperature",
unit="celsius",
current=None,
)
product = feature.product
type(product).name = PropertyMock(return_value="My temperature sensor")
type(product).model = PropertyMock(return_value="tempSensor")
return (feature, "sensor.tempsensor_0_temperature")
async def test_init(tempsensor, hass, config):
"""Test sensor default state."""
_, entity_id = tempsensor
entry = await async_setup_entity(hass, config, entity_id)
assert entry.unique_id == "BleBox-tempSensor-1afe34db9437-0.temperature"
state = hass.states.get(entity_id)
assert state.name == "tempSensor-0.temperature"
assert state.attributes[ATTR_DEVICE_CLASS] == DEVICE_CLASS_TEMPERATURE
assert state.attributes[ATTR_UNIT_OF_MEASUREMENT] == TEMP_CELSIUS
assert state.state == STATE_UNKNOWN
device_registry = await hass.helpers.device_registry.async_get_registry()
device = device_registry.async_get(entry.device_id)
assert device.name == "My temperature sensor"
assert device.identifiers == {("blebox", "abcd0123ef5678")}
assert device.manufacturer == "BleBox"
assert device.model == "tempSensor"
assert device.sw_version == "1.23"
async def test_update(tempsensor, hass, config):
"""Test sensor update."""
feature_mock, entity_id = tempsensor
def initial_update():
feature_mock.current = 25.18
feature_mock.async_update = AsyncMock(side_effect=initial_update)
await async_setup_entity(hass, config, entity_id)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_UNIT_OF_MEASUREMENT] == TEMP_CELSIUS
assert state.state == "25.18"
async def test_update_failure(tempsensor, hass, config, caplog):
"""Test that update failures are logged."""
caplog.set_level(logging.ERROR)
feature_mock, entity_id = tempsensor
feature_mock.async_update = AsyncMock(side_effect=blebox_uniapi.error.ClientError)
await async_setup_entity(hass, config, entity_id)
assert f"Updating '{feature_mock.full_name}' failed: " in caplog.text
|
apache-2.0
|
alqfahad/odoo
|
addons/procurement_jit_stock/procurement_jit_stock.py
|
130
|
2001
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2013 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class procurement_order(osv.osv):
_inherit = "procurement.order"
def run(self, cr, uid, ids, autocommit=False, context=None):
context = dict(context or {}, procurement_autorun_defer=True)
res = super(procurement_order, self).run(cr, uid, ids, autocommit=autocommit, context=context)
procurement_ids = self.search(cr, uid, [('move_dest_id.procurement_id', 'in', ids)], order='id', context=context)
if procurement_ids:
return self.run(cr, uid, procurement_ids, autocommit=autocommit, context=context)
return res
class stock_move(osv.osv):
_inherit = "stock.move"
def _create_procurements(self, cr, uid, moves, context=None):
res = super(stock_move, self)._create_procurements(cr, uid, moves, context=dict(context or {}, procurement_autorun_defer=True))
self.pool['procurement.order'].run(cr, uid, res, context=context)
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
lizardsystem/lizard-levee
|
lizard_levee/migrations/0011_auto__add_segment__add_field_area_segments.py
|
1
|
7042
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Segment'
db.create_table('lizard_levee_segment', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('poly', self.gf('django.contrib.gis.db.models.fields.PolygonField')()),
))
db.send_create_signal('lizard_levee', ['Segment'])
# Adding field 'Area.segments'
db.add_column('lizard_levee_area', 'segments', self.gf('jsonfield.fields.JSONField')(null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting model 'Segment'
db.delete_table('lizard_levee_segment')
# Deleting field 'Area.segments'
db.delete_column('lizard_levee_area', 'segments')
models = {
'lizard_levee.area': {
'Meta': {'object_name': 'Area'},
'cross_section_image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'information_pointers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lizard_levee.InformationPointer']", 'null': 'True', 'blank': 'True'}),
'links': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lizard_levee.Link']", 'null': 'True', 'blank': 'True'}),
'longitudinal_cross_section_image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'segments': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'wms_layers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lizard_wms.WMSSource']", 'null': 'True', 'blank': 'True'})
},
'lizard_levee.informationpointer': {
'Meta': {'object_name': 'InformationPointer'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'more_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'lizard_levee.link': {
'Meta': {'object_name': 'Link'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'lizard_levee.segment': {
'Meta': {'object_name': 'Segment'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'poly': ('django.contrib.gis.db.models.fields.PolygonField', [], {})
},
'lizard_maptree.category': {
'Meta': {'ordering': "('name',)", 'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_maptree.Category']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'})
},
'lizard_wms.wmsconnection': {
'Meta': {'object_name': 'WMSConnection'},
'category': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lizard_maptree.Category']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'options': ('django.db.models.fields.TextField', [], {'default': '\'{"buffer": 0, "reproject": true, "isBaseLayer": false, "opacity": 0.5}\''}),
'params': ('django.db.models.fields.TextField', [], {'default': '\'{"height": "256", "width": "256", "layers": "%s", "styles": "", "format": "image/png", "tiled": "true", "transparent": "true"}\''}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'version': ('django.db.models.fields.CharField', [], {'default': "'1.3.0'", 'max_length': '20'}),
'xml': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'})
},
'lizard_wms.wmssource': {
'Meta': {'ordering': "('name',)", 'object_name': 'WMSSource'},
'bbox': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lizard_maptree.Category']", 'null': 'True', 'blank': 'True'}),
'connection': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_wms.WMSConnection']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'legend_url': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'options': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
}
}
complete_apps = ['lizard_levee']
|
gpl-3.0
|
40223227/40223227
|
static/Brython3.1.1-20150328-091302/Lib/VFS_import.py
|
738
|
3059
|
import os
from browser import doc
#_scripts=doc.createElement('script')
#_scripts.src="/src/py_VFS.js"
#_scripts.type="text/javascript"
#doc.get(tag='head')[0].appendChild(_scripts)
VFS=dict(JSObject(__BRYTHON__.py_VFS))
class VFSModuleFinder:
def __init__(self, path_entry):
print("in VFSModuleFinder")
if path_entry.startswith('/libs') or path_entry.startswith('/Lib'):
self.path_entry=path_entry
else:
raise ImportError()
def __str__(self):
return '<%s for "%s">' % (self.__class__.__name__, self.path_entry)
def find_module(self, fullname, path=None):
path = path or self.path_entry
#print('looking for "%s" in %s ...' % (fullname, path))
for _ext in ['js', 'pyj', 'py']:
_filepath=os.path.join(self.path_entry, '%s.%s' % (fullname, _ext))
if _filepath in VFS:
print("module found at %s:%s" % (_filepath, fullname))
return VFSModuleLoader(_filepath, fullname)
print('module %s not found' % fullname)
raise ImportError()
return None
class VFSModuleLoader:
"""Load source for modules"""
def __init__(self, filepath, name):
self._filepath=filepath
self._name=name
def get_source(self):
if self._filepath in VFS:
return JSObject(readFromVFS(self._filepath))
raise ImportError('could not find source for %s' % fullname)
def is_package(self):
return '.' in self._name
def load_module(self):
if self._name in sys.modules:
#print('reusing existing module from previous import of "%s"' % fullname)
mod = sys.modules[self._name]
return mod
_src=self.get_source()
if self._filepath.endswith('.js'):
mod=JSObject(import_js_module(_src, self._filepath, self._name))
elif self._filepath.endswith('.py'):
mod=JSObject(import_py_module(_src, self._filepath, self._name))
elif self._filepath.endswith('.pyj'):
mod=JSObject(import_pyj_module(_src, self._filepath, self._name))
else:
raise ImportError('Invalid Module: %s' % self._filepath)
# Set a few properties required by PEP 302
mod.__file__ = self._filepath
mod.__name__ = self._name
mod.__path__ = os.path.abspath(self._filepath)
mod.__loader__ = self
mod.__package__ = '.'.join(self._name.split('.')[:-1])
if self.is_package():
print('adding path for package')
# Set __path__ for packages
# so we can find the sub-modules.
mod.__path__ = [ self.path_entry ]
else:
print('imported as regular module')
print('creating a new module object for "%s"' % self._name)
sys.modules.setdefault(self._name, mod)
JSObject(__BRYTHON__.imported)[self._name]=mod
return mod
JSObject(__BRYTHON__.path_hooks.insert(0, VFSModuleFinder))
|
gpl-3.0
|
mread/buck
|
src/com/facebook/buck/command/intellij.py
|
2
|
19662
|
import errno
import fnmatch
import json
import os
import re
import subprocess
import sys
from collections import defaultdict
MODULE_XML_START = """<?xml version="1.0" encoding="UTF-8"?>
<module type="%(type)s" version="4">"""
MODULE_XML_END = """
</module>
"""
ANDROID_FACET = """
<component name="FacetManager">
<facet type="android" name="Android">
<configuration>
<option name="GEN_FOLDER_RELATIVE_PATH_APT" value="%(module_gen_path)s" />
<option name="GEN_FOLDER_RELATIVE_PATH_AIDL" value="%(module_gen_path)s" />
<option name="MANIFEST_FILE_RELATIVE_PATH" value="%(android_manifest)s" />
<option name="RES_FOLDER_RELATIVE_PATH" value="%(res)s" />
<option name="ASSETS_FOLDER_RELATIVE_PATH" value="/assets" />
<option name="LIBS_FOLDER_RELATIVE_PATH" value="%(libs_path)s" />
<option name="USE_CUSTOM_APK_RESOURCE_FOLDER" value="false" />
<option name="CUSTOM_APK_RESOURCE_FOLDER" value="" />
<option name="USE_CUSTOM_COMPILER_MANIFEST" value="false" />
<option name="CUSTOM_COMPILER_MANIFEST" value="" />
<option name="APK_PATH" value="" />
<option name="LIBRARY_PROJECT" value="%(is_android_library_project)s" />
<option name="RUN_PROCESS_RESOURCES_MAVEN_TASK" value="true" />
<option name="GENERATE_UNSIGNED_APK" value="false" />
<option name="CUSTOM_DEBUG_KEYSTORE_PATH" value="%(keystore)s" />
<option name="PACK_TEST_CODE" value="false" />
<option name="RUN_PROGUARD" value="%(run_proguard)s" />
<option name="PROGUARD_CFG_PATH" value="%(proguard_config)s" />
<resOverlayFolders />
<includeSystemProguardFile>false</includeSystemProguardFile>
<includeAssetsFromLibraries>true</includeAssetsFromLibraries>
<additionalNativeLibs />
</configuration>
</facet>
</component>"""
ALL_MODULES_XML_START = """<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>"""
ALL_MODULES_XML_END = """
</modules>
</component>
</project>
"""
LIBRARY_XML_START = """<component name="libraryTable">
<library name="%(name)s">
<CLASSES>
<root url="jar://$PROJECT_DIR$/%(binary_jar)s!/" />
</CLASSES>"""
LIBRARY_XML_WITH_JAVADOC = """
<JAVADOC>
<root url="%(javadoc_url)s" />
</JAVADOC>"""
LIBRARY_XML_NO_JAVADOC = """
<JAVADOC />"""
LIBRARY_XML_WITH_SOURCES = """
<SOURCES>
<root url="jar://$PROJECT_DIR$/%(source_jar)s!/" />
</SOURCES>"""
LIBRARY_XML_NO_SOURCES = """
<SOURCES />"""
LIBRARY_XML_END = """
</library>
</component>
"""
RUN_CONFIG_XML_START = """<component name="ProjectRunConfigurationManager">"""
RUN_CONFIG_XML_END = "</component>"
REMOTE_RUN_CONFIG_XML = """
<configuration default="false" name="%(name)s" type="Remote" factoryName="Remote">
<option name="USE_SOCKET_TRANSPORT" value="true" />
<option name="SERVER_MODE" value="false" />
<option name="SHMEM_ADDRESS" value="javadebug" />
<option name="HOST" value="localhost" />
<option name="PORT" value="5005" />
<RunnerSettings RunnerId="Debug">
<option name="DEBUG_PORT" value="5005" />
<option name="TRANSPORT" value="0" />
<option name="LOCAL" value="false" />
</RunnerSettings>
<ConfigurationWrapper RunnerId="Debug" />
<method />
</configuration>
"""
# Files that were written by this script.
# If `buck project` is working properly, most of the time it will be a no-op
# and no files will need to be written.
MODIFIED_FILES = []
# Files that are part of the project being run. We will delete all .iml files
# that are not checked in and not in this set.
PROJECT_FILES = set()
# Marker for a directory in the module tree that contains an .iml file.
# Intentionally chosen to be an illegal file name in both unix and windows.
CONTAINS_IML_MARKER = '/*contains_iml*/'
def tree():
""" Create an autovivification tree """
return defaultdict(tree)
def create_additional_excludes(modules):
"""Create set of directories to also be excluded."""
# Tree representation of all modules.
module_tree = tree()
additional_excludes = defaultdict(list)
for module in modules:
normalized_iml = os.path.dirname(os.path.normpath(
module['pathToImlFile']))
# Add this path to our build tree
current_directory = module_tree
if normalized_iml:
for part in normalized_iml.split(os.path.sep):
current_directory = current_directory[part]
current_directory[CONTAINS_IML_MARKER] = module
for root, dirs, _files in os.walk('.', topdown=True, followlinks=True):
current_directory = module_tree
normalized_root = os.path.normpath(root)
if normalized_root == '.':
continue
highest_iml_file = None
for part in normalized_root.split(os.path.sep):
if CONTAINS_IML_MARKER in current_directory:
module = current_directory[CONTAINS_IML_MARKER]
found_relevant_source_folder = False
for source_folder in module['sourceFolders']:
# If we find a module that specifies the directory as the
# source folder, then keep all folders under that module.
#
# TODO(royw): Be smarter here and actually keep track of
# the additional directories being tracked by sub modules.
if source_folder['url'] != 'file://$MODULE_DIR$/gen':
found_relevant_source_folder = True
break
# If we found a module containing subdirectories as
# sourceFolders, bail on trying to find a higher IML file.
if found_relevant_source_folder:
break
highest_iml_file = module['pathToImlFile']
if part not in current_directory:
if part != 'res' and highest_iml_file:
additional_excludes[highest_iml_file].append(
normalized_root)
dirs[:] = []
break
else:
current_directory = current_directory[part]
return additional_excludes
def write_modules(modules, generate_minimum_project):
"""Writes one XML file for each module."""
additional_excludes = defaultdict(list)
if generate_minimum_project:
additional_excludes = create_additional_excludes(modules)
for module in modules:
# Build up the XML.
module_type = 'JAVA_MODULE'
if 'isIntelliJPlugin' in module and module['isIntelliJPlugin']:
module_type = 'PLUGIN_MODULE'
xml = MODULE_XML_START % {
'type': module_type,
}
# Android facet, if appropriate.
if module.get('hasAndroidFacet') is True:
if 'keystorePath' in module:
keystore = 'file://$MODULE_DIR$/%s' % module['keystorePath']
else:
keystore = ''
if 'androidManifest' in module:
android_manifest = module['androidManifest']
else:
android_manifest = '/AndroidManifest.xml'
is_library_project = module['isAndroidLibraryProject']
android_params = {
'android_manifest': android_manifest,
'res': '/res',
'is_android_library_project': str(is_library_project).lower(),
'run_proguard': 'false',
'module_gen_path': module['moduleGenPath'],
'proguard_config': '/proguard.cfg',
'keystore': keystore,
'libs_path': '/%s' % module.get('nativeLibs', 'libs'),
}
xml += ANDROID_FACET % android_params
# Source code and libraries component.
xml += '\n <component name="NewModuleRootManager" inherit-compiler-output="true">'
# Empirically, if there are multiple source folders, then the
# <content> element for the buck-out/android/gen folder should be
# listed before the other source folders.
num_source_folders = len(module['sourceFolders'])
if num_source_folders > 1:
xml = add_buck_android_source_folder(xml, module)
# Source folders.
xml += '\n <content url="file://$MODULE_DIR$">'
for source_folder in module['sourceFolders']:
if 'packagePrefix' in source_folder:
package_prefix = 'packagePrefix="%s" ' % source_folder['packagePrefix']
else:
package_prefix = ''
xml += '\n <sourceFolder url="%(url)s" isTestSource="%(is_test_source)s" %(package_prefix)s/>' % {
'url': source_folder['url'],
'is_test_source': str(source_folder['isTestSource']).lower(),
'package_prefix': package_prefix
}
for exclude_folder in module['excludeFolders']:
xml += '\n <excludeFolder url="%s" />' % exclude_folder['url']
for exclude_folder in sorted(additional_excludes[module['pathToImlFile']]):
normalized_dir = os.path.dirname(os.path.normpath(
module['pathToImlFile']))
xml += '\n <excludeFolder url="file://$MODULE_DIR$/%s" />' % os.path.relpath(exclude_folder, normalized_dir)
xml += '\n </content>'
xml = add_annotation_generated_source_folder(xml, module)
# Empirically, if there is one source folder, then the <content>
# element for the buck-out/android/gen folder should be listed after
# the other source folders.
if num_source_folders <= 1:
xml = add_buck_android_source_folder(xml, module)
# Dependencies.
dependencies = module['dependencies']
module_name = module['name']
# We need to filter out some of the modules in the dependency list:
# (1) The module may list itself as a dependency with scope="TEST",
# which is bad.
# (2) The module may list another module as a dependency with both
# COMPILE and TEST scopes, in which case the COMPILE scope should
# win.
# compile_dependencies will be the set of names of dependent modules
# that do not have scope="TEST"
compile_dependencies = filter(
lambda dep: dep['type'] == 'module' and
((not ('scope' in dep)) or dep['scope'] != 'TEST'),
dependencies)
compile_dependencies = map(
lambda dep: dep['moduleName'], compile_dependencies)
compile_dependencies = set(compile_dependencies)
# Filter dependencies to satisfy (1) and (2) defined above.
filtered_dependencies = []
for dep in dependencies:
if dep['type'] != 'module':
# Non-module dependencies should still be included.
filtered_dependencies.append(dep)
else:
# dep must be a module
dep_module_name = dep['moduleName']
if dep_module_name == module_name:
# Exclude self-references!
continue
elif 'scope' in dep and dep['scope'] == 'TEST':
# If this is a scope="TEST" module and the module is going
# to be included as a scope="COMPILE" module, then exclude
# it.
if not (dep_module_name in compile_dependencies):
filtered_dependencies.append(dep)
else:
# Non-test modules should still be included.
filtered_dependencies.append(dep)
# Now that we have filtered the dependencies, we can convert the
# remaining ones directly into XML.
excluded_deps_names = set()
if module_type == 'PLUGIN_MODULE':
# all the jars below are parts of IntelliJ SDK and even though they
# are required for language plugins to work standalone, they cannot
# be included as the plugin module dependency because they would
# clash with IntelliJ
excluded_deps_names = set([
'annotations', # org/intellij/lang/annotations, org/jetbrains/annotations
'extensions', # com/intellij/openapi/extensions/
'idea', # org/intellij, com/intellij
'jdom', # org/jdom
'junit', # junit/
'light_psi_all', # light psi library
'openapi', # com/intellij/openapi
'picocontainer', # org/picocontainer
'trove4j', # gnu/trove
'util', # com/intellij/util
])
for dep in filtered_dependencies:
if 'scope' in dep:
dep_scope = 'scope="%s" ' % dep['scope']
else:
dep_scope = ''
dep_type = dep['type']
if dep_type == 'library':
if dep['name'] in excluded_deps_names:
continue
xml += '\n <orderEntry type="library" exported="" %sname="%s" level="project" />' % (dep_scope, dep['name'])
elif dep_type == 'module':
dep_module_name = dep['moduleName']
# TODO(mbolin): Eliminate this special-case for jackson. It
# exists because jackson is not an ordinary module: it is a
# module that functions as a library. Project.java should add
# it as such in project.json to eliminate this special case.
if dep_module_name == 'module_first_party_orca_third_party_jackson':
exported = 'exported="" '
else:
exported = ''
xml += '\n <orderEntry type="module" module-name="%s" %s%s/>' % (dep_module_name, exported, dep_scope)
elif dep_type == 'inheritedJdk':
xml += '\n <orderEntry type="inheritedJdk" />'
elif dep_type == 'jdk':
xml += '\n <orderEntry type="jdk" jdkName="%s" jdkType="%s" />' % (dep['jdkName'], dep['jdkType'])
elif dep_type == 'sourceFolder':
xml += '\n <orderEntry type="sourceFolder" forTests="false" />'
# Close source code and libraries component.
xml += '\n </component>'
# Close XML.
xml += MODULE_XML_END
# Write the module to a file.
write_file_if_changed(module['pathToImlFile'], xml)
def add_buck_android_source_folder(xml, module):
# Apparently if we write R.java and friends to a gen/ directory under
# buck-out/android/ then IntelliJ wants that to be included as a separate
# source root.
if 'moduleGenPath' in module:
xml += '\n <content url="file://$MODULE_DIR$%s">' % module['moduleGenPath']
xml += '\n <sourceFolder url="file://$MODULE_DIR$%s" isTestSource="false" />' % module['moduleGenPath']
xml += '\n </content>'
return xml
def add_annotation_generated_source_folder(xml, module):
if 'annotationGenPath' in module:
annotation_gen_is_for_test = ('annotationGenIsForTest' in module and
module['annotationGenIsForTest'])
is_test_source = str(annotation_gen_is_for_test).lower()
xml += '\n <content url="file://$MODULE_DIR$%s">' % module['annotationGenPath']
xml += '\n <sourceFolder url="file://$MODULE_DIR$%s" isTestSource="%s" />' % (module['annotationGenPath'], is_test_source)
xml += '\n </content>'
return xml
def write_all_modules(modules):
"""Writes a modules.xml file that defines all of the modules in the project."""
# Build up the XML.
xml = ALL_MODULES_XML_START
# Alpha-sort modules by path before writing them out.
# This ensures that the ordering within modules.xml is stable.
modules.sort(key=lambda module: module['pathToImlFile'])
for module in modules:
relative_path = module['pathToImlFile']
xml += '\n <module fileurl="file://$PROJECT_DIR$/%s" filepath="$PROJECT_DIR$/%s" %s/>' % (
relative_path,
relative_path,
'group="modules"' if not module['isRootModule'] else '')
xml += ALL_MODULES_XML_END
# Write the modules to a file.
write_file_if_changed('.idea/modules.xml', xml)
def write_libraries(libraries):
"""Writes an XML file to define each library."""
mkdir_p('.idea/libraries')
for library in libraries:
# Build up the XML.
name = library['name']
xml = LIBRARY_XML_START % {
'name': name,
'binary_jar': library['binaryJar'],
}
if 'javadocUrl' in library:
xml += LIBRARY_XML_WITH_JAVADOC % {
'javadoc_url': library['javadocUrl'],
}
else:
xml += LIBRARY_XML_NO_JAVADOC
if 'sourceJar' in library:
xml += LIBRARY_XML_WITH_SOURCES % {
'source_jar': library['sourceJar'],
}
else:
xml += LIBRARY_XML_NO_SOURCES
xml += LIBRARY_XML_END
# Write the library to a file
write_file_if_changed('.idea/libraries/%s.xml' % name, xml)
def write_run_configs():
"""Writes the run configurations that should be available"""
mkdir_p('.idea/runConfigurations')
xml = RUN_CONFIG_XML_START
xml += REMOTE_RUN_CONFIG_XML % {'name': "Debug Buck test"}
xml += RUN_CONFIG_XML_END
write_file_if_changed('.idea/runConfigurations/Debug_Buck_test.xml', xml)
def write_file_if_changed(path, content):
PROJECT_FILES.add(path)
if os.path.exists(path):
file_content_as_string = open(path, 'r').read()
needs_update = content.strip() != file_content_as_string.strip()
else:
needs_update = True
if needs_update:
out = open(path, 'wb')
out.write(content)
MODIFIED_FILES.append(path)
def mkdir_p(path):
"""Runs the equivalent of `mkdir -p`
Taken from http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
Args:
path: an absolute path
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def clean_old_files():
if os.path.isdir('.git'):
try:
files_to_clean = subprocess.check_output([
'git',
'ls-files',
'--other'])
for file_name in files_to_clean.splitlines():
if (file_name.endswith('.iml') and
file_name not in PROJECT_FILES):
os.remove(file_name)
return
except Exception as e:
pass
if __name__ == '__main__':
json_file = sys.argv[1]
generate_minimum_project = False
if len(sys.argv) == 3:
generate_minimum_project = sys.argv[2] == '--generate_minimum_project'
parsed_json = json.load(open(json_file, 'r'))
libraries = parsed_json['libraries']
write_libraries(libraries)
modules = parsed_json['modules']
write_modules(modules, generate_minimum_project)
write_all_modules(modules)
write_run_configs()
# Write the list of modified files to stdout
for path in MODIFIED_FILES:
print path
print >> sys.stderr, ' :: Please close and re-open IntelliJ.'
|
apache-2.0
|
megaumi/django
|
django/db/migrations/loader.py
|
165
|
16926
|
from __future__ import unicode_literals
import os
import sys
from importlib import import_module
from django.apps import apps
from django.conf import settings
from django.db.migrations.graph import MigrationGraph
from django.db.migrations.recorder import MigrationRecorder
from django.utils import six
from .exceptions import AmbiguityError, BadMigrationError, NodeNotFoundError
MIGRATIONS_MODULE_NAME = 'migrations'
class MigrationLoader(object):
"""
Loads migration files from disk, and their status from the database.
Migration files are expected to live in the "migrations" directory of
an app. Their names are entirely unimportant from a code perspective,
but will probably follow the 1234_name.py convention.
On initialization, this class will scan those directories, and open and
read the python files, looking for a class called Migration, which should
inherit from django.db.migrations.Migration. See
django.db.migrations.migration for what that looks like.
Some migrations will be marked as "replacing" another set of migrations.
These are loaded into a separate set of migrations away from the main ones.
If all the migrations they replace are either unapplied or missing from
disk, then they are injected into the main set, replacing the named migrations.
Any dependency pointers to the replaced migrations are re-pointed to the
new migration.
This does mean that this class MUST also talk to the database as well as
to disk, but this is probably fine. We're already not just operating
in memory.
"""
def __init__(self, connection, load=True, ignore_no_migrations=False):
self.connection = connection
self.disk_migrations = None
self.applied_migrations = None
self.ignore_no_migrations = ignore_no_migrations
if load:
self.build_graph()
@classmethod
def migrations_module(cls, app_label):
if app_label in settings.MIGRATION_MODULES:
return settings.MIGRATION_MODULES[app_label]
else:
app_package_name = apps.get_app_config(app_label).name
return '%s.%s' % (app_package_name, MIGRATIONS_MODULE_NAME)
def load_disk(self):
"""
Loads the migrations from all INSTALLED_APPS from disk.
"""
self.disk_migrations = {}
self.unmigrated_apps = set()
self.migrated_apps = set()
for app_config in apps.get_app_configs():
# Get the migrations module directory
module_name = self.migrations_module(app_config.label)
was_loaded = module_name in sys.modules
try:
module = import_module(module_name)
except ImportError as e:
# I hate doing this, but I don't want to squash other import errors.
# Might be better to try a directory check directly.
if "No module named" in str(e) and MIGRATIONS_MODULE_NAME in str(e):
self.unmigrated_apps.add(app_config.label)
continue
raise
else:
# PY3 will happily import empty dirs as namespaces.
if not hasattr(module, '__file__'):
self.unmigrated_apps.add(app_config.label)
continue
# Module is not a package (e.g. migrations.py).
if not hasattr(module, '__path__'):
self.unmigrated_apps.add(app_config.label)
continue
# Force a reload if it's already loaded (tests need this)
if was_loaded:
six.moves.reload_module(module)
self.migrated_apps.add(app_config.label)
directory = os.path.dirname(module.__file__)
# Scan for .py files
migration_names = set()
for name in os.listdir(directory):
if name.endswith(".py"):
import_name = name.rsplit(".", 1)[0]
if import_name[0] not in "_.~":
migration_names.add(import_name)
# Load them
south_style_migrations = False
for migration_name in migration_names:
try:
migration_module = import_module("%s.%s" % (module_name, migration_name))
except ImportError as e:
# Ignore South import errors, as we're triggering them
if "south" in str(e).lower():
south_style_migrations = True
break
raise
if not hasattr(migration_module, "Migration"):
raise BadMigrationError(
"Migration %s in app %s has no Migration class" % (migration_name, app_config.label)
)
# Ignore South-style migrations
if hasattr(migration_module.Migration, "forwards"):
south_style_migrations = True
break
self.disk_migrations[app_config.label, migration_name] = migration_module.Migration(migration_name, app_config.label)
if south_style_migrations:
self.unmigrated_apps.add(app_config.label)
def get_migration(self, app_label, name_prefix):
"Gets the migration exactly named, or raises `graph.NodeNotFoundError`"
return self.graph.nodes[app_label, name_prefix]
def get_migration_by_prefix(self, app_label, name_prefix):
"Returns the migration(s) which match the given app label and name _prefix_"
# Do the search
results = []
for l, n in self.disk_migrations:
if l == app_label and n.startswith(name_prefix):
results.append((l, n))
if len(results) > 1:
raise AmbiguityError(
"There is more than one migration for '%s' with the prefix '%s'" % (app_label, name_prefix)
)
elif len(results) == 0:
raise KeyError("There no migrations for '%s' with the prefix '%s'" % (app_label, name_prefix))
else:
return self.disk_migrations[results[0]]
def check_key(self, key, current_app):
if (key[1] != "__first__" and key[1] != "__latest__") or key in self.graph:
return key
# Special-case __first__, which means "the first migration" for
# migrated apps, and is ignored for unmigrated apps. It allows
# makemigrations to declare dependencies on apps before they even have
# migrations.
if key[0] == current_app:
# Ignore __first__ references to the same app (#22325)
return
if key[0] in self.unmigrated_apps:
# This app isn't migrated, but something depends on it.
# The models will get auto-added into the state, though
# so we're fine.
return
if key[0] in self.migrated_apps:
try:
if key[1] == "__first__":
return list(self.graph.root_nodes(key[0]))[0]
else: # "__latest__"
return list(self.graph.leaf_nodes(key[0]))[0]
except IndexError:
if self.ignore_no_migrations:
return None
else:
raise ValueError("Dependency on app with no migrations: %s" % key[0])
raise ValueError("Dependency on unknown app: %s" % key[0])
def build_graph(self):
"""
Builds a migration dependency graph using both the disk and database.
You'll need to rebuild the graph if you apply migrations. This isn't
usually a problem as generally migration stuff runs in a one-shot process.
"""
# Load disk data
self.load_disk()
# Load database data
if self.connection is None:
self.applied_migrations = set()
else:
recorder = MigrationRecorder(self.connection)
self.applied_migrations = recorder.applied_migrations()
# Do a first pass to separate out replacing and non-replacing migrations
normal = {}
replacing = {}
for key, migration in self.disk_migrations.items():
if migration.replaces:
replacing[key] = migration
else:
normal[key] = migration
# Calculate reverse dependencies - i.e., for each migration, what depends on it?
# This is just for dependency re-pointing when applying replacements,
# so we ignore run_before here.
reverse_dependencies = {}
for key, migration in normal.items():
for parent in migration.dependencies:
reverse_dependencies.setdefault(parent, set()).add(key)
# Remember the possible replacements to generate more meaningful error
# messages
reverse_replacements = {}
for key, migration in replacing.items():
for replaced in migration.replaces:
reverse_replacements.setdefault(replaced, set()).add(key)
# Carry out replacements if we can - that is, if all replaced migrations
# are either unapplied or missing.
for key, migration in replacing.items():
# Ensure this replacement migration is not in applied_migrations
self.applied_migrations.discard(key)
# Do the check. We can replace if all our replace targets are
# applied, or if all of them are unapplied.
applied_statuses = [(target in self.applied_migrations) for target in migration.replaces]
can_replace = all(applied_statuses) or (not any(applied_statuses))
if not can_replace:
continue
# Alright, time to replace. Step through the replaced migrations
# and remove, repointing dependencies if needs be.
for replaced in migration.replaces:
if replaced in normal:
# We don't care if the replaced migration doesn't exist;
# the usage pattern here is to delete things after a while.
del normal[replaced]
for child_key in reverse_dependencies.get(replaced, set()):
if child_key in migration.replaces:
continue
# List of migrations whose dependency on `replaced` needs
# to be updated to a dependency on `key`.
to_update = []
# Child key may itself be replaced, in which case it might
# not be in `normal` anymore (depending on whether we've
# processed its replacement yet). If it's present, we go
# ahead and update it; it may be deleted later on if it is
# replaced, but there's no harm in updating it regardless.
if child_key in normal:
to_update.append(normal[child_key])
# If the child key is replaced, we update its replacement's
# dependencies too, if necessary. (We don't know if this
# replacement will actually take effect or not, but either
# way it's OK to update the replacing migration).
if child_key in reverse_replacements:
for replaces_child_key in reverse_replacements[child_key]:
if replaced in replacing[replaces_child_key].dependencies:
to_update.append(replacing[replaces_child_key])
# Actually perform the dependency update on all migrations
# that require it.
for migration_needing_update in to_update:
migration_needing_update.dependencies.remove(replaced)
migration_needing_update.dependencies.append(key)
normal[key] = migration
# Mark the replacement as applied if all its replaced ones are
if all(applied_statuses):
self.applied_migrations.add(key)
# Store the replacement migrations for later checks
self.replacements = replacing
# Finally, make a graph and load everything into it
self.graph = MigrationGraph()
for key, migration in normal.items():
self.graph.add_node(key, migration)
def _reraise_missing_dependency(migration, missing, exc):
"""
Checks if ``missing`` could have been replaced by any squash
migration but wasn't because the the squash migration was partially
applied before. In that case raise a more understandable exception.
#23556
"""
if missing in reverse_replacements:
candidates = reverse_replacements.get(missing, set())
is_replaced = any(candidate in self.graph.nodes for candidate in candidates)
if not is_replaced:
tries = ', '.join('%s.%s' % c for c in candidates)
exc_value = NodeNotFoundError(
"Migration {0} depends on nonexistent node ('{1}', '{2}'). "
"Django tried to replace migration {1}.{2} with any of [{3}] "
"but wasn't able to because some of the replaced migrations "
"are already applied.".format(
migration, missing[0], missing[1], tries
),
missing)
exc_value.__cause__ = exc
six.reraise(NodeNotFoundError, exc_value, sys.exc_info()[2])
raise exc
# Add all internal dependencies first to ensure __first__ dependencies
# find the correct root node.
for key, migration in normal.items():
for parent in migration.dependencies:
if parent[0] != key[0] or parent[1] == '__first__':
# Ignore __first__ references to the same app (#22325)
continue
try:
self.graph.add_dependency(migration, key, parent)
except NodeNotFoundError as e:
# Since we added "key" to the nodes before this implies
# "parent" is not in there. To make the raised exception
# more understandable we check if parent could have been
# replaced but hasn't (eg partially applied squashed
# migration)
_reraise_missing_dependency(migration, parent, e)
for key, migration in normal.items():
for parent in migration.dependencies:
if parent[0] == key[0]:
# Internal dependencies already added.
continue
parent = self.check_key(parent, key[0])
if parent is not None:
try:
self.graph.add_dependency(migration, key, parent)
except NodeNotFoundError as e:
# Since we added "key" to the nodes before this implies
# "parent" is not in there.
_reraise_missing_dependency(migration, parent, e)
for child in migration.run_before:
child = self.check_key(child, key[0])
if child is not None:
try:
self.graph.add_dependency(migration, child, key)
except NodeNotFoundError as e:
# Since we added "key" to the nodes before this implies
# "child" is not in there.
_reraise_missing_dependency(migration, child, e)
def detect_conflicts(self):
"""
Looks through the loaded graph and detects any conflicts - apps
with more than one leaf migration. Returns a dict of the app labels
that conflict with the migration names that conflict.
"""
seen_apps = {}
conflicting_apps = set()
for app_label, migration_name in self.graph.leaf_nodes():
if app_label in seen_apps:
conflicting_apps.add(app_label)
seen_apps.setdefault(app_label, set()).add(migration_name)
return {app_label: seen_apps[app_label] for app_label in conflicting_apps}
def project_state(self, nodes=None, at_end=True):
"""
Returns a ProjectState object representing the most recent state
that the migrations we loaded represent.
See graph.make_state for the meaning of "nodes" and "at_end"
"""
return self.graph.make_state(nodes=nodes, at_end=at_end, real_apps=list(self.unmigrated_apps))
|
bsd-3-clause
|
udrg/rpg_svo
|
svo_analysis/src/svo_analysis/analyse_dataset.py
|
17
|
1178
|
# -*- coding: utf-8 -*-
import associate
import numpy as np
import matplotlib.pyplot as plt
import yaml
def loadDataset(filename):
file = open(filename)
data = file.read()
lines = data.replace(","," ").replace("\t"," ").split("\n")
D = np.array([[v.strip() for v in line.split(" ") if v.strip()!=""] for line in lines if len(line)>0 and line[0]!="#"], dtype=np.float64)
return D
dataset_dir = '/home/cforster/Datasets/SlamBenchmark/px4_r2'
trajectory_data = dataset_dir+'/groundtruth.txt'
stepsize = 10
# load dataset
data = loadDataset(trajectory_data)
n = data.shape[0]
steps = np.arange(0,n,stepsize)
# compute trajectory length
last_pos = data[0,1:4]
trajectory_length = 0
for i in steps[1:]:
new_pos = data[i,1:4]
trajectory_length += np.linalg.norm(new_pos-last_pos)
last_pos = new_pos
print 'trajectory lenght = ' + str(trajectory_length) + 'm'
print 'height mean = ' + str(np.mean(data[:,3])) + 'm'
print 'height median = ' + str(np.median(data[:,3])) + 'm'
print 'height std = ' + str(np.std(data[:,3])) + 'm'
print 'duration = ' + str(data[-1,0]-data[0,0]) + 's'
print 'speed = ' + str(trajectory_length/(data[-1,0]-data[0,0])) + 'm/s'
|
gpl-3.0
|
dev-coop/machine-learning
|
playing_around5.py
|
1
|
3420
|
'''
Working with my personal data again, using sk-learn
Thanks mchan on freenode ##machine-learning for guiding me on rolling window and such
'''
from sklearn import tree, linear_model, neighbors, cross_validation
import pandas as pd
import numpy
data_labels = ["Happiness", "Motivation", "Flexibility", "Strength", "Endurance", "Relationships"]
data_frame = pd.read_csv("personal_stats2.csv")
data_frame = data_frame[data_labels + ["Datetime"]]
# Apply rolling window to data
rolling_window_size = 16
series = data_frame.set_index('Datetime')
series = pd.rolling_window(series, rolling_window_size, 'boxcar')
data_frame = pd.DataFrame(series, columns=data_labels)
# Get 80% of our dataset
index_at_80_percent = int(len(data_frame) * .8)
# Get the first 80% as input and the following day as the target result
# Skip first 6 as rolling window didn't apply to them
training_input = data_frame[rolling_window_size:index_at_80_percent]
training_target = data_frame[rolling_window_size + 1:index_at_80_percent + 1]
#=============================================================================
# Uncomment to select a method
#=============================================================================
# Score: 437 with 'blackman' rolling window
# Score: 338 with 'boxcar' rolling window
# Score: 385 with 'hamming' rolling window
# Score: 391 with 'bartlett' rolling window
# Score: 426 with 'parzen' rolling window
# Score: 444 with 'bohman' rolling window
# Score: 476 with 'blackmanharris' rolling window
# Score: 475 with 'nuttall' rolling window
# Score: 406 with 'barthann' rolling window
clf = linear_model.LinearRegression(copy_X=True, normalize=False, fit_intercept=True)
#clf = linear_model.RANSACRegressor(linear_model.LinearRegression())
#clf = linear_model.Lasso()
#clf = neighbors.KNeighborsRegressor()
#clf = linear_model.LassoLars(alpha=.1)
#clf = linear_model.OrthogonalMatchingPursuit()
#clf = linear_model.ElasticNet(alpha=.5, l1_ratio=0.5, tol=0.01)
#clf = linear_model.PassiveAggressiveRegressor()
# Implement the method and print the results, very simple, only takes into account previous days record
clf = clf.fit(training_input, training_target)
#=============================================================================
# Compare predictions to actual stats
#
# Uncomment to view!
#=============================================================================
# The final 20% of training data, current day as input next day as expected output
test_input = data_frame[index_at_80_percent + 1: len(data_frame) - 1]
test_target = data_frame[index_at_80_percent + 2:]
total_difference_less_than_5 = 0
total_difference_greater_than_15 = 0
total_difference = 0
for index, row in enumerate(test_input.values):
test_result = numpy.array([int(n) for n in clf.predict(row)[0]])
difference = sum(abs(x - y) for x, y in zip(test_result, test_target.values[index]))
total_difference += difference
print test_target.values[index]
print test_result
print "Difference:", difference
print ""
#print total_difference, difference
if difference < 5:
total_difference_less_than_5 += 1
if difference > 15:
total_difference_greater_than_15 += 1
print ""
print "Total differences < 5 -->", total_difference_less_than_5
print "Total differences > 15 -->", total_difference_greater_than_15
print "Total differences ==", total_difference
|
mit
|
jtux270/translate
|
ovirt/3.6_source/packaging/setup/ovirt_engine_setup/constants.py
|
2
|
14913
|
#
# ovirt-engine-setup -- ovirt engine setup
# Copyright (C) 2013-2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Constants."""
import gettext
import os
import sys
from otopi import util
from . import config
def _(m):
return gettext.dgettext(message=m, domain='ovirt-engine-setup')
def osetupattrsclass(o):
sys.modules[o.__module__].__dict__.setdefault(
'__osetup_attrs__', []
).append(o)
return o
class classproperty(property):
def __get__(self, cls, owner):
return classmethod(self.fget).__get__(None, owner)()
def osetupattrs(
answerfile=False,
summary=False,
description=None,
postinstallfile=False,
reconfigurable=False,
answerfile_condition=lambda env: True,
summary_condition=lambda env: True,
):
class decorator(classproperty):
def __init__(self, o):
super(decorator, self).__init__(o)
self.__osetup_attrs__ = dict(
answerfile=answerfile,
summary=summary,
description=description,
postinstallfile=postinstallfile,
reconfigurable=reconfigurable,
answerfile_condition=answerfile_condition,
summary_condition=summary_condition,
)
return decorator
@util.export
class FileLocations(object):
SYSCONFDIR = '/etc'
LOCALSTATEDIR = '/var'
DATADIR = '/usr/share'
OVIRT_SETUP_DATADIR = config.SETUP_DATADIR
OVIRT_SETUP_LOCALSTATEDIR = config.SETUP_LOCALSTATEDIR
OVIRT_SETUP_BINDIR = os.path.join(
OVIRT_SETUP_DATADIR,
'bin',
)
OVIRT_SETUP_LOGDIR = os.path.join(
config.SETUP_LOG,
'setup',
)
OVIRT_SETUP_SYSCONFDIR = config.SETUP_SYSCONFDIR
OVIRT_SETUP_DATADIR = config.SETUP_DATADIR
OVIRT_OVIRT_SETUP_LOG_PREFIX = 'ovirt-engine-setup'
OVIRT_OVIRT_REMOVE_LOG_PREFIX = 'ovirt-engine-remove'
OVIRT_OVIRT_RENAME_LOG_PREFIX = 'ovirt-engine-rename'
OVIRT_OVIRT_PROVISIONDB_LOG_PREFIX = 'ovirt-engine-provisiondb'
OVIRT_OVIRT_SETUP_CONFIG_FILE = config.ENGINE_SETUP_CONFIG
OVIRT_SETUP_OSINFO_REPOSITORY_DIR = os.path.join(
OVIRT_SETUP_SYSCONFDIR,
'osinfo.conf.d',
)
OVIRT_IPTABLES_EXAMPLE = os.path.join(
OVIRT_SETUP_SYSCONFDIR,
'iptables.example'
)
OVIRT_IPTABLES_DEFAULT = os.path.join(
OVIRT_SETUP_DATADIR,
'conf',
'iptables.default.in'
)
SYSCONFIG_IPTABLES = os.path.join(
SYSCONFDIR,
'sysconfig',
'iptables',
)
OVIRT_FIREWALLD_CONFIG = os.path.join(
OVIRT_SETUP_DATADIR,
'firewalld',
)
OVIRT_FIREWALLD_EXAMPLE_DIR = os.path.join(
OVIRT_SETUP_SYSCONFDIR,
'firewalld'
)
FIREWALLD_SERVICES_DIR = os.path.join(
SYSCONFDIR,
'firewalld',
'services',
)
OVIRT_SETUP_STATE_DIR = os.path.join(
OVIRT_SETUP_LOCALSTATEDIR,
'setup',
)
OVIRT_SETUP_ANSWERS_DIR = os.path.join(
OVIRT_SETUP_STATE_DIR,
'answers',
)
OVIRT_ENGINE_YUM_VERSIONLOCK = os.path.join(
SYSCONFDIR,
'yum',
'pluginconf.d',
'versionlock.list',
)
OVIRT_SETUP_POST_INSTALL_CONFIG = os.path.join(
'%s.d' % OVIRT_OVIRT_SETUP_CONFIG_FILE,
'20-setup-ovirt-post.conf'
)
VIRTIO_WIN_DIR = os.path.join(
DATADIR,
'virtio-win',
)
RHEV_GUEST_TOOLS_DIR = os.path.join(
DATADIR,
'rhev-guest-tools-iso',
)
OVIRT_GUEST_TOOLS_DIR = os.path.join(
DATADIR,
'ovirt-guest-tools-iso',
)
OVIRT_SETUP_UNINSTALL_DIR = os.path.join(
OVIRT_SETUP_SYSCONFDIR,
'uninstall.d'
)
@util.export
class Defaults(object):
DEFAULT_SYSTEM_USER_ENGINE = 'ovirt'
DEFAULT_SYSTEM_GROUP_ENGINE = 'ovirt'
DEFAULT_WEBSOCKET_PROXY_PORT = 6100
@util.export
class Stages(object):
NET_FIREWALL_MANAGER_AVAILABLE = 'osetup.net.firewallmanager.available'
CONFIG_PROTOCOLS_CUSTOMIZATION = 'osetup.config.protocols.customization'
CONFIG_APPLICATION_MODE_AVAILABLE = \
'osetup.config.applicationMode.available'
SSH_KEY_AVAILABLE = 'osetup.pki.ssh.available'
SYSTEM_SYSCTL_CONFIG_AVAILABLE = 'osetup.system.sysctl.available'
SYSTEM_HOSTILE_SERVICES_DETECTION = 'osetup.system.hostile.detection'
DISTRO_RPM_PACKAGE_UPDATE_CHECK = 'osetup.distro-rpm.package.update.check'
DIALOG_TITLES_S_FIREWALL = 'osetup.dialog.titles.firewall.start'
DIALOG_TITLES_S_MISC = 'osetup.dialog.titles.misc.start'
DIALOG_TITLES_S_NETWORK = 'osetup.dialog.titles.network.start'
DIALOG_TITLES_S_PACKAGES = 'osetup.dialog.titles.packaging.start'
DIALOG_TITLES_S_PRODUCT_OPTIONS = \
'osetup.dialog.titles.productoptions.start'
DIALOG_TITLES_S_SYSTEM = 'osetup.dialog.titles.system.start'
DIALOG_TITLES_E_FIREWALL = 'osetup.dialog.titles.firewall.end'
DIALOG_TITLES_E_MISC = 'osetup.dialog.titles.misc.end'
DIALOG_TITLES_E_NETWORK = 'osetup.dialog.titles.network.end'
DIALOG_TITLES_E_PACKAGES = 'osetup.dialog.titles.packages.end'
DIALOG_TITLES_E_PRODUCT_OPTIONS = 'osetup.dialog.titles.productoptions.end'
DIALOG_TITLES_E_SYSTEM = 'osetup.dialog.titles.system.end'
DIALOG_TITLES_S_SUMMARY = 'osetup.dialog.titles.summary.start'
DIALOG_TITLES_E_SUMMARY = 'osetup.dialog.titles.summary.end'
REMOVE_CUSTOMIZATION_COMMON = 'osetup.remove.customization.common'
REMOVE_CUSTOMIZATION_GROUPS = 'osetup.remove.customization.groups'
REMOVE_FIREWALLD_SERVICES = 'osetup.remove.firewalld.services'
KEEP_ONLY_VALID_FIREWALL_MANAGERS = \
'osetup.keep.only.valid.firewall.managers'
SETUP_SELINUX = 'osetup.setup.selinux'
@util.export
@util.codegen
class Const(object):
PACKAGE_NAME = config.PACKAGE_NAME
PACKAGE_VERSION = config.PACKAGE_VERSION
DISPLAY_VERSION = config.DISPLAY_VERSION
RPM_VERSION = config.RPM_VERSION
RPM_RELEASE = config.RPM_RELEASE
@classproperty
def RPM_LOCK_LIST_SUFFIXES(self):
return (
'',
'-backend',
'-dbscripts',
'-restapi',
'-tools',
'-userportal',
'-webadmin-portal',
)
FILE_GROUP_SECTION_PREFIX = 'file_group_'
ACTION_SETUP = 'setup'
ACTION_REMOVE = 'cleanup'
ACTION_RENAME = 'rename'
ACTION_PROVISIONDB = 'provisiondb'
FIREWALL_MANAGER_HUMAN = 'skip'
FIREWALL_MANAGER_IPTABLES = 'iptables'
FIREWALL_MANAGER_FIREWALLD = 'firewalld'
ISO_DOMAIN_NFS_DEFAULT_ACL_FORMAT = '{fqdn}(rw)'
REMOTE_ENGINE_SETUP_STYLE_AUTO_SSH = 'auto_ssh'
REMOTE_ENGINE_SETUP_STYLE_MANUAL_FILES = 'manual_files'
REMOTE_ENGINE_SETUP_STYLE_MANUAL_INLINE = 'manual_inline'
EXIT_CODE_REMOVE_WITHOUT_SETUP = 11
EXIT_CODE_PROVISIONING_NOT_SUPPORTED = 12
EXIT_CODE_PROVISIONING_EXISTING_RESOURCES_FOUND = 13
DWH_DOC_URI = (
'/docs/manual/en_US/html/Installation_Guide/'
'chap-History_and_Reports.html'
)
DWH_DOC_URL = 'http://www.ovirt.org/Ovirt_DWH'
REPORTS_DOC_URI = (
'/docs/manual/en_US/html/Installation_Guide/'
'chap-History_and_Reports.html'
)
REPORTS_DOC_URL = 'http://www.ovirt.org/Ovirt_Reports'
@util.export
@util.codegen
@osetupattrsclass
class CoreEnv(object):
OFFLINE_PACKAGER = 'OVESETUP_CORE/offlinePackager'
ANSWER_FILE = 'OVESETUP_CORE/answerFile'
DEVELOPER_MODE = 'OVESETUP_CORE/developerMode'
UNINSTALL_UNREMOVABLE_FILES = 'OVESETUP_CORE/uninstallUnremovableFiles'
GENERATE_POSTINSTALL = 'OVESETUP_CORE/generatePostInstall'
FILE_GROUP_PREFIX = 'OVESETUP_CORE_MODIFIED_FILE_GROUP/'
LINES_GROUP_PREFIX = 'OVESETUP_CORE_MODIFIED_LINES_GROUP/'
REGISTER_UNINSTALL_GROUPS = 'OVESETUP_CORE/registerUninstallGroups'
UPGRADE_SUPPORTED_VERSIONS = 'OVESETUP_CORE/upgradeSupportedVersions'
ACTION = 'OVESETUP_CORE/action'
@osetupattrs(
answerfile=True,
)
def REMOVE(self):
return 'OVESETUP_CORE/remove'
@osetupattrs(
postinstallfile=True,
)
def GENERATED_BY_VERSION(self):
return 'OVESETUP_CORE/generatedByVersion'
ORIGINAL_GENERATED_BY_VERSION = 'OVESETUP_CORE/originalGeneratedByVersion'
SETUP_ATTRS_MODULES = 'OVESETUP_CORE/setupAttributesModules'
REMOTE_ENGINE = 'OVESETUP_CORE/remoteEngine'
RECONFIGURE_OPTIONAL_COMPONENTS = \
'OVESETUP_CORE/reconfigureOptionalComponents'
@util.export
@util.codegen
@osetupattrsclass
class DocsEnv(object):
DOCS_LOCAL = 'OVESETUP_DOCS/docsAreLocal'
DWH_DOC_URL = 'OVESETUP_DOCS/dwhDocUrl'
REPORTS_DOC_URL = 'OVESETUP_DOCS/reportsDocUrl'
@util.export
@util.codegen
@osetupattrsclass
class DialogEnv(object):
@osetupattrs(
answerfile=True,
)
def CONFIRM_SETTINGS(self):
return 'OVESETUP_DIALOG/confirmSettings'
@util.export
@util.codegen
class NetEnv(object):
FIREWALLD_SERVICES = 'OVESETUP_NETWORK/firewalldServices'
FIREWALLD_SUBST = 'OVESETUP_NETWORK/firewalldSubst'
@util.export
@util.codegen
@osetupattrsclass
class SystemEnv(object):
USER_ENGINE = 'OVESETUP_SYSTEM/userEngine'
GROUP_ENGINE = 'OVESETUP_SYSTEM/groupEngine'
SELINUX_CONTEXTS = 'OVESETUP_SYSTEM/selinuxContexts'
SELINUX_RESTORE_PATHS = 'OVESETUP_SYSTEM/selinuxRestorePaths'
SELINUX_BOOLEANS = 'OVESETUP_SYSTEM/selinuxBooleans'
HOSTILE_SERVICES = 'OVESETUP_SYSTEM/hostileServices'
@util.export
@util.codegen
@osetupattrsclass
class ConfigEnv(object):
ADD_OVIRT_GLANCE_REPOSITORY = 'OVESETUP_CONFIG/addOvirtGlanceRepository'
@osetupattrs(
answerfile=True,
summary=True,
description=_('Application mode'),
)
def APPLICATION_MODE(self):
return 'OVESETUP_CONFIG/applicationMode'
@osetupattrs(
answerfile=True,
summary=False,
)
def STORAGE_TYPE(self):
return 'OVESETUP_CONFIG/storageType'
@osetupattrs(
answerfile=True,
summary=False,
)
def STORAGE_IS_LOCAL(self):
return 'OVESETUP_CONFIG/storageIsLocal'
@osetupattrs(
answerfile=True,
summary=True,
description=_('Default SAN wipe after delete'),
postinstallfile=True,
)
def SAN_WIPE_AFTER_DELETE(self):
return 'OVESETUP_CONFIG/sanWipeAfterDelete'
@osetupattrs(
answerfile=True,
summary=True,
description=_('Host FQDN'),
postinstallfile=True,
)
def FQDN(self):
return 'OVESETUP_CONFIG/fqdn'
@osetupattrs(
answerfile=True,
summary=True,
description=_('Firewall manager'),
postinstallfile=True,
)
def FIREWALL_MANAGER(self):
return 'OVESETUP_CONFIG/firewallManager'
@osetupattrs(
answerfile=True,
summary=True,
description=_('Update Firewall'),
)
def UPDATE_FIREWALL(self):
return 'OVESETUP_CONFIG/updateFirewall'
FIREWALL_MANAGERS = 'OVESETUP_CONFIG/firewallManagers'
@osetupattrs(
answerfile=True,
summary=False,
)
def FIREWALL_CHANGES_REVIEW(self):
return 'OVESETUP_CONFIG/firewallChangesReview'
VALID_FIREWALL_MANAGERS = 'OVESETUP_CONFIG/validFirewallManagers'
FQDN_REVERSE_VALIDATION = 'OVESETUP_CONFIG/fqdnReverseValidation'
FQDN_NON_LOOPBACK_VALIDATION = 'OVESETUP_CONFIG/fqdnNonLoopback'
REMOTE_ENGINE_SETUP_STYLES = 'OVESETUP_CONFIG/remoteEngineSetupStyles'
@osetupattrs(
answerfile=True,
)
def REMOTE_ENGINE_SETUP_STYLE(self):
return 'OVESETUP_CONFIG/remoteEngineSetupStyle'
@osetupattrs(
answerfile=True,
)
def REMOTE_ENGINE_HOST_SSH_PORT(self):
return 'OVESETUP_CONFIG/remoteEngineHostSshPort'
# Optional, used if supplied
REMOTE_ENGINE_HOST_CLIENT_KEY = 'OVESETUP_CONFIG/remoteEngineHostClientKey'
# Optional, used if supplied, currently only log if not there
REMOTE_ENGINE_HOST_KNOWN_HOSTS = \
'OVESETUP_CONFIG/remoteEngineHostKnownHosts'
@osetupattrs(
answerfile=True,
)
def REMOTE_ENGINE_HOST_ROOT_PASSWORD(self):
return 'OVESETUP_CONFIG/remoteEngineHostRootPassword'
ISO_PATHS_TO_UPLOAD = 'OVESETUP_CONFIG/isoPathsToUpload'
TOTAL_MEMORY_MB = 'OVESETUP_CONFIG/totalMemoryMB'
@util.export
@util.codegen
@osetupattrsclass
class RPMDistroEnv(object):
@osetupattrs(
answerfile=True,
summary=True,
description=_('Upgrade packages'),
)
def ENABLE_UPGRADE(self):
return 'OSETUP_RPMDISTRO/enableUpgrade'
@osetupattrs(
answerfile=True,
summary=True,
description=_('Require packages rollback'),
)
def REQUIRE_ROLLBACK(self):
return 'OSETUP_RPMDISTRO/requireRollback'
VERSION_LOCK_FILTER = 'OSETUP_RPMDISTRO/versionLockFilter'
VERSION_LOCK_APPLY = 'OSETUP_RPMDISTRO/versionLockApply'
PACKAGES_UPGRADE_LIST = 'OVESETUP_RPMDISTRO/packagesUpgradeList'
PACKAGES_SETUP = 'OVESETUP_RPMDISTRO/packagesSetup'
@util.export
@util.codegen
@osetupattrsclass
class RenameEnv(object):
@osetupattrs(
answerfile=True,
summary=True,
description=_('New FQDN'),
)
def FQDN(self):
return 'OSETUP_RENAME/fqdn'
FORCE_OVERWRITE = 'OSETUP_RENAME/forceOverwrite'
FORCE_IGNORE_AIA_IN_CA = 'OSETUP_RENAME/forceIgnoreAIAInCA'
FILES_TO_BE_MODIFIED = 'OVESETUP_CORE/filesToBeModified'
@util.export
@util.codegen
@osetupattrsclass
class RemoveEnv(object):
@osetupattrs(
answerfile=True,
)
def ASK_GROUPS(self):
return 'OVESETUP_REMOVE/confirmUninstallGroups'
@osetupattrs(
answerfile=True,
)
def REMOVE_GROUPS(self):
return 'OVESETUP_REMOVE/enabledFileGroups'
@osetupattrs(
answerfile=True,
)
def REMOVE_ALL(self):
return 'OVESETUP_REMOVE/removeAll'
@osetupattrs(
answerfile=True,
)
def REMOVE_CHANGED(self):
return 'OVESETUP_REMOVE/removeChanged'
@osetupattrs(
answerfile=True,
)
def REMOVE_OPTIONS(self):
return 'OVESETUP_REMOVE/removeOptions'
FILES_TO_REMOVE = 'OVESETUP_REMOVE/filesToRemove'
REMOVE_SPEC_OPTION_GROUP_LIST = 'OVESETUP_REMOVE/specOptionGroupList'
# vim: expandtab tabstop=4 shiftwidth=4
|
gpl-3.0
|
Hellrungj/CSC-412-Networking
|
Protocol_Buffers/venv/lib/python2.7/site-packages/setuptools/tests/test_easy_install.py
|
41
|
15704
|
"""Easy install Tests
"""
import sys
import os
import shutil
import tempfile
import unittest
import site
import contextlib
import textwrap
import tarfile
import logging
import distutils.core
from setuptools.compat import StringIO, BytesIO, urlparse
from setuptools.sandbox import run_setup, SandboxViolation
from setuptools.command.easy_install import (
easy_install, fix_jython_executable, get_script_args, nt_quote_arg)
from setuptools.command.easy_install import PthDistributions
from setuptools.command import easy_install as easy_install_pkg
from setuptools.dist import Distribution
from pkg_resources import working_set, VersionConflict
from pkg_resources import Distribution as PRDistribution
import setuptools.tests.server
import pkg_resources
from .py26compat import skipIf
class FakeDist(object):
def get_entry_map(self, group):
if group != 'console_scripts':
return {}
return {'name': 'ep'}
def as_requirement(self):
return 'spec'
WANTED = """\
#!%s
# EASY-INSTALL-ENTRY-SCRIPT: 'spec','console_scripts','name'
__requires__ = 'spec'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('spec', 'console_scripts', 'name')()
)
""" % nt_quote_arg(fix_jython_executable(sys.executable, ""))
SETUP_PY = """\
from setuptools import setup
setup(name='foo')
"""
class TestEasyInstallTest(unittest.TestCase):
def test_install_site_py(self):
dist = Distribution()
cmd = easy_install(dist)
cmd.sitepy_installed = False
cmd.install_dir = tempfile.mkdtemp()
try:
cmd.install_site_py()
sitepy = os.path.join(cmd.install_dir, 'site.py')
self.assertTrue(os.path.exists(sitepy))
finally:
shutil.rmtree(cmd.install_dir)
def test_get_script_args(self):
dist = FakeDist()
old_platform = sys.platform
try:
name, script = [i for i in next(get_script_args(dist))][0:2]
finally:
sys.platform = old_platform
self.assertEqual(script, WANTED)
def test_no_find_links(self):
# new option '--no-find-links', that blocks find-links added at
# the project level
dist = Distribution()
cmd = easy_install(dist)
cmd.check_pth_processing = lambda: True
cmd.no_find_links = True
cmd.find_links = ['link1', 'link2']
cmd.install_dir = os.path.join(tempfile.mkdtemp(), 'ok')
cmd.args = ['ok']
cmd.ensure_finalized()
self.assertEqual(cmd.package_index.scanned_urls, {})
# let's try without it (default behavior)
cmd = easy_install(dist)
cmd.check_pth_processing = lambda: True
cmd.find_links = ['link1', 'link2']
cmd.install_dir = os.path.join(tempfile.mkdtemp(), 'ok')
cmd.args = ['ok']
cmd.ensure_finalized()
keys = sorted(cmd.package_index.scanned_urls.keys())
self.assertEqual(keys, ['link1', 'link2'])
class TestPTHFileWriter(unittest.TestCase):
def test_add_from_cwd_site_sets_dirty(self):
'''a pth file manager should set dirty
if a distribution is in site but also the cwd
'''
pth = PthDistributions('does-not_exist', [os.getcwd()])
self.assertTrue(not pth.dirty)
pth.add(PRDistribution(os.getcwd()))
self.assertTrue(pth.dirty)
def test_add_from_site_is_ignored(self):
if os.name != 'nt':
location = '/test/location/does-not-have-to-exist'
else:
location = 'c:\\does_not_exist'
pth = PthDistributions('does-not_exist', [location, ])
self.assertTrue(not pth.dirty)
pth.add(PRDistribution(location))
self.assertTrue(not pth.dirty)
class TestUserInstallTest(unittest.TestCase):
def setUp(self):
self.dir = tempfile.mkdtemp()
setup = os.path.join(self.dir, 'setup.py')
f = open(setup, 'w')
f.write(SETUP_PY)
f.close()
self.old_cwd = os.getcwd()
os.chdir(self.dir)
self.old_enable_site = site.ENABLE_USER_SITE
self.old_file = easy_install_pkg.__file__
self.old_base = site.USER_BASE
site.USER_BASE = tempfile.mkdtemp()
self.old_site = site.USER_SITE
site.USER_SITE = tempfile.mkdtemp()
easy_install_pkg.__file__ = site.USER_SITE
def tearDown(self):
os.chdir(self.old_cwd)
shutil.rmtree(self.dir)
shutil.rmtree(site.USER_BASE)
shutil.rmtree(site.USER_SITE)
site.USER_BASE = self.old_base
site.USER_SITE = self.old_site
site.ENABLE_USER_SITE = self.old_enable_site
easy_install_pkg.__file__ = self.old_file
def test_user_install_implied(self):
site.ENABLE_USER_SITE = True # disabled sometimes
#XXX: replace with something meaningfull
dist = Distribution()
dist.script_name = 'setup.py'
cmd = easy_install(dist)
cmd.args = ['py']
cmd.ensure_finalized()
self.assertTrue(cmd.user, 'user should be implied')
def test_multiproc_atexit(self):
try:
__import__('multiprocessing')
except ImportError:
# skip the test if multiprocessing is not available
return
log = logging.getLogger('test_easy_install')
logging.basicConfig(level=logging.INFO, stream=sys.stderr)
log.info('this should not break')
def test_user_install_not_implied_without_usersite_enabled(self):
site.ENABLE_USER_SITE = False # usually enabled
#XXX: replace with something meaningfull
dist = Distribution()
dist.script_name = 'setup.py'
cmd = easy_install(dist)
cmd.args = ['py']
cmd.initialize_options()
self.assertFalse(cmd.user, 'NOT user should be implied')
def test_local_index(self):
# make sure the local index is used
# when easy_install looks for installed
# packages
new_location = tempfile.mkdtemp()
target = tempfile.mkdtemp()
egg_file = os.path.join(new_location, 'foo-1.0.egg-info')
f = open(egg_file, 'w')
try:
f.write('Name: foo\n')
finally:
f.close()
sys.path.append(target)
old_ppath = os.environ.get('PYTHONPATH')
os.environ['PYTHONPATH'] = os.path.pathsep.join(sys.path)
try:
dist = Distribution()
dist.script_name = 'setup.py'
cmd = easy_install(dist)
cmd.install_dir = target
cmd.args = ['foo']
cmd.ensure_finalized()
cmd.local_index.scan([new_location])
res = cmd.easy_install('foo')
actual = os.path.normcase(os.path.realpath(res.location))
expected = os.path.normcase(os.path.realpath(new_location))
self.assertEqual(actual, expected)
finally:
sys.path.remove(target)
for basedir in [new_location, target, ]:
if not os.path.exists(basedir) or not os.path.isdir(basedir):
continue
try:
shutil.rmtree(basedir)
except:
pass
if old_ppath is not None:
os.environ['PYTHONPATH'] = old_ppath
else:
del os.environ['PYTHONPATH']
def test_setup_requires(self):
"""Regression test for Distribute issue #318
Ensure that a package with setup_requires can be installed when
setuptools is installed in the user site-packages without causing a
SandboxViolation.
"""
test_pkg = create_setup_requires_package(self.dir)
test_setup_py = os.path.join(test_pkg, 'setup.py')
try:
with quiet_context():
with reset_setup_stop_context():
run_setup(test_setup_py, ['install'])
except SandboxViolation:
self.fail('Installation caused SandboxViolation')
except IndexError:
# Test fails in some cases due to bugs in Python
# See https://bitbucket.org/pypa/setuptools/issue/201
pass
class TestSetupRequires(unittest.TestCase):
def test_setup_requires_honors_fetch_params(self):
"""
When easy_install installs a source distribution which specifies
setup_requires, it should honor the fetch parameters (such as
allow-hosts, index-url, and find-links).
"""
# set up a server which will simulate an alternate package index.
p_index = setuptools.tests.server.MockServer()
p_index.start()
netloc = 1
p_index_loc = urlparse(p_index.url)[netloc]
if p_index_loc.endswith(':0'):
# Some platforms (Jython) don't find a port to which to bind,
# so skip this test for them.
return
with quiet_context():
# create an sdist that has a build-time dependency.
with TestSetupRequires.create_sdist() as dist_file:
with tempdir_context() as temp_install_dir:
with environment_context(PYTHONPATH=temp_install_dir):
ei_params = ['--index-url', p_index.url,
'--allow-hosts', p_index_loc,
'--exclude-scripts', '--install-dir', temp_install_dir,
dist_file]
with reset_setup_stop_context():
with argv_context(['easy_install']):
# attempt to install the dist. It should fail because
# it doesn't exist.
self.assertRaises(SystemExit,
easy_install_pkg.main, ei_params)
# there should have been two or three requests to the server
# (three happens on Python 3.3a)
self.assertTrue(2 <= len(p_index.requests) <= 3)
self.assertEqual(p_index.requests[0].path, '/does-not-exist/')
@staticmethod
@contextlib.contextmanager
def create_sdist():
"""
Return an sdist with a setup_requires dependency (of something that
doesn't exist)
"""
with tempdir_context() as dir:
dist_path = os.path.join(dir, 'setuptools-test-fetcher-1.0.tar.gz')
make_trivial_sdist(
dist_path,
textwrap.dedent("""
import setuptools
setuptools.setup(
name="setuptools-test-fetcher",
version="1.0",
setup_requires = ['does-not-exist'],
)
""").lstrip())
yield dist_path
def test_setup_requires_overrides_version_conflict(self):
"""
Regression test for issue #323.
Ensures that a distribution's setup_requires requirements can still be
installed and used locally even if a conflicting version of that
requirement is already on the path.
"""
pr_state = pkg_resources.__getstate__()
fake_dist = PRDistribution('does-not-matter', project_name='foobar',
version='0.0')
working_set.add(fake_dist)
try:
with tempdir_context() as temp_dir:
test_pkg = create_setup_requires_package(temp_dir)
test_setup_py = os.path.join(test_pkg, 'setup.py')
with quiet_context() as (stdout, stderr):
with reset_setup_stop_context():
try:
# Don't even need to install the package, just
# running the setup.py at all is sufficient
run_setup(test_setup_py, ['--name'])
except VersionConflict:
self.fail('Installing setup.py requirements '
'caused a VersionConflict')
lines = stdout.readlines()
self.assertTrue(len(lines) > 0)
self.assertTrue(lines[-1].strip(), 'test_pkg')
finally:
pkg_resources.__setstate__(pr_state)
def create_setup_requires_package(path):
"""Creates a source tree under path for a trivial test package that has a
single requirement in setup_requires--a tarball for that requirement is
also created and added to the dependency_links argument.
"""
test_setup_attrs = {
'name': 'test_pkg', 'version': '0.0',
'setup_requires': ['foobar==0.1'],
'dependency_links': [os.path.abspath(path)]
}
test_pkg = os.path.join(path, 'test_pkg')
test_setup_py = os.path.join(test_pkg, 'setup.py')
os.mkdir(test_pkg)
f = open(test_setup_py, 'w')
f.write(textwrap.dedent("""\
import setuptools
setuptools.setup(**%r)
""" % test_setup_attrs))
f.close()
foobar_path = os.path.join(path, 'foobar-0.1.tar.gz')
make_trivial_sdist(
foobar_path,
textwrap.dedent("""\
import setuptools
setuptools.setup(
name='foobar',
version='0.1'
)
"""))
return test_pkg
def make_trivial_sdist(dist_path, setup_py):
"""Create a simple sdist tarball at dist_path, containing just a
setup.py, the contents of which are provided by the setup_py string.
"""
setup_py_file = tarfile.TarInfo(name='setup.py')
try:
# Python 3 (StringIO gets converted to io module)
MemFile = BytesIO
except AttributeError:
MemFile = StringIO
setup_py_bytes = MemFile(setup_py.encode('utf-8'))
setup_py_file.size = len(setup_py_bytes.getvalue())
dist = tarfile.open(dist_path, 'w:gz')
try:
dist.addfile(setup_py_file, fileobj=setup_py_bytes)
finally:
dist.close()
@contextlib.contextmanager
def tempdir_context(cd=lambda dir:None):
temp_dir = tempfile.mkdtemp()
orig_dir = os.getcwd()
try:
cd(temp_dir)
yield temp_dir
finally:
cd(orig_dir)
shutil.rmtree(temp_dir)
@contextlib.contextmanager
def environment_context(**updates):
old_env = os.environ.copy()
os.environ.update(updates)
try:
yield
finally:
for key in updates:
del os.environ[key]
os.environ.update(old_env)
@contextlib.contextmanager
def argv_context(repl):
old_argv = sys.argv[:]
sys.argv[:] = repl
yield
sys.argv[:] = old_argv
@contextlib.contextmanager
def reset_setup_stop_context():
"""
When the setuptools tests are run using setup.py test, and then
one wants to invoke another setup() command (such as easy_install)
within those tests, it's necessary to reset the global variable
in distutils.core so that the setup() command will run naturally.
"""
setup_stop_after = distutils.core._setup_stop_after
distutils.core._setup_stop_after = None
yield
distutils.core._setup_stop_after = setup_stop_after
@contextlib.contextmanager
def quiet_context():
"""
Redirect stdout/stderr to StringIO objects to prevent console output from
distutils commands.
"""
old_stdout = sys.stdout
old_stderr = sys.stderr
new_stdout = sys.stdout = StringIO()
new_stderr = sys.stderr = StringIO()
try:
yield new_stdout, new_stderr
finally:
new_stdout.seek(0)
new_stderr.seek(0)
sys.stdout = old_stdout
sys.stderr = old_stderr
|
gpl-3.0
|
jadedsurfer/genx
|
node_modules/grunt/node_modules/js-yaml/support/pyyaml-src/representer.py
|
238
|
13528
|
__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
'RepresenterError']
from .error import *
from .nodes import *
import datetime, sys, copyreg, types, base64
class RepresenterError(YAMLError):
pass
class BaseRepresenter:
yaml_representers = {}
yaml_multi_representers = {}
def __init__(self, default_style=None, default_flow_style=None):
self.default_style = default_style
self.default_flow_style = default_flow_style
self.represented_objects = {}
self.object_keeper = []
self.alias_key = None
def represent(self, data):
node = self.represent_data(data)
self.serialize(node)
self.represented_objects = {}
self.object_keeper = []
self.alias_key = None
def represent_data(self, data):
if self.ignore_aliases(data):
self.alias_key = None
else:
self.alias_key = id(data)
if self.alias_key is not None:
if self.alias_key in self.represented_objects:
node = self.represented_objects[self.alias_key]
#if node is None:
# raise RepresenterError("recursive objects are not allowed: %r" % data)
return node
#self.represented_objects[alias_key] = None
self.object_keeper.append(data)
data_types = type(data).__mro__
if data_types[0] in self.yaml_representers:
node = self.yaml_representers[data_types[0]](self, data)
else:
for data_type in data_types:
if data_type in self.yaml_multi_representers:
node = self.yaml_multi_representers[data_type](self, data)
break
else:
if None in self.yaml_multi_representers:
node = self.yaml_multi_representers[None](self, data)
elif None in self.yaml_representers:
node = self.yaml_representers[None](self, data)
else:
node = ScalarNode(None, str(data))
#if alias_key is not None:
# self.represented_objects[alias_key] = node
return node
@classmethod
def add_representer(cls, data_type, representer):
if not 'yaml_representers' in cls.__dict__:
cls.yaml_representers = cls.yaml_representers.copy()
cls.yaml_representers[data_type] = representer
@classmethod
def add_multi_representer(cls, data_type, representer):
if not 'yaml_multi_representers' in cls.__dict__:
cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
cls.yaml_multi_representers[data_type] = representer
def represent_scalar(self, tag, value, style=None):
if style is None:
style = self.default_style
node = ScalarNode(tag, value, style=style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
return node
def represent_sequence(self, tag, sequence, flow_style=None):
value = []
node = SequenceNode(tag, value, flow_style=flow_style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
for item in sequence:
node_item = self.represent_data(item)
if not (isinstance(node_item, ScalarNode) and not node_item.style):
best_style = False
value.append(node_item)
if flow_style is None:
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
def represent_mapping(self, tag, mapping, flow_style=None):
value = []
node = MappingNode(tag, value, flow_style=flow_style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
if hasattr(mapping, 'items'):
mapping = list(mapping.items())
try:
mapping = sorted(mapping)
except TypeError:
pass
for item_key, item_value in mapping:
node_key = self.represent_data(item_key)
node_value = self.represent_data(item_value)
if not (isinstance(node_key, ScalarNode) and not node_key.style):
best_style = False
if not (isinstance(node_value, ScalarNode) and not node_value.style):
best_style = False
value.append((node_key, node_value))
if flow_style is None:
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
def ignore_aliases(self, data):
return False
class SafeRepresenter(BaseRepresenter):
def ignore_aliases(self, data):
if data in [None, ()]:
return True
if isinstance(data, (str, bytes, bool, int, float)):
return True
def represent_none(self, data):
return self.represent_scalar('tag:yaml.org,2002:null', 'null')
def represent_str(self, data):
return self.represent_scalar('tag:yaml.org,2002:str', data)
def represent_binary(self, data):
if hasattr(base64, 'encodebytes'):
data = base64.encodebytes(data).decode('ascii')
else:
data = base64.encodestring(data).decode('ascii')
return self.represent_scalar('tag:yaml.org,2002:binary', data, style='|')
def represent_bool(self, data):
if data:
value = 'true'
else:
value = 'false'
return self.represent_scalar('tag:yaml.org,2002:bool', value)
def represent_int(self, data):
return self.represent_scalar('tag:yaml.org,2002:int', str(data))
inf_value = 1e300
while repr(inf_value) != repr(inf_value*inf_value):
inf_value *= inf_value
def represent_float(self, data):
if data != data or (data == 0.0 and data == 1.0):
value = '.nan'
elif data == self.inf_value:
value = '.inf'
elif data == -self.inf_value:
value = '-.inf'
else:
value = repr(data).lower()
# Note that in some cases `repr(data)` represents a float number
# without the decimal parts. For instance:
# >>> repr(1e17)
# '1e17'
# Unfortunately, this is not a valid float representation according
# to the definition of the `!!float` tag. We fix this by adding
# '.0' before the 'e' symbol.
if '.' not in value and 'e' in value:
value = value.replace('e', '.0e', 1)
return self.represent_scalar('tag:yaml.org,2002:float', value)
def represent_list(self, data):
#pairs = (len(data) > 0 and isinstance(data, list))
#if pairs:
# for item in data:
# if not isinstance(item, tuple) or len(item) != 2:
# pairs = False
# break
#if not pairs:
return self.represent_sequence('tag:yaml.org,2002:seq', data)
#value = []
#for item_key, item_value in data:
# value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
# [(item_key, item_value)]))
#return SequenceNode(u'tag:yaml.org,2002:pairs', value)
def represent_dict(self, data):
return self.represent_mapping('tag:yaml.org,2002:map', data)
def represent_set(self, data):
value = {}
for key in data:
value[key] = None
return self.represent_mapping('tag:yaml.org,2002:set', value)
def represent_date(self, data):
value = data.isoformat()
return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
def represent_datetime(self, data):
value = data.isoformat(' ')
return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
def represent_yaml_object(self, tag, data, cls, flow_style=None):
if hasattr(data, '__getstate__'):
state = data.__getstate__()
else:
state = data.__dict__.copy()
return self.represent_mapping(tag, state, flow_style=flow_style)
def represent_undefined(self, data):
raise RepresenterError("cannot represent an object: %s" % data)
SafeRepresenter.add_representer(type(None),
SafeRepresenter.represent_none)
SafeRepresenter.add_representer(str,
SafeRepresenter.represent_str)
SafeRepresenter.add_representer(bytes,
SafeRepresenter.represent_binary)
SafeRepresenter.add_representer(bool,
SafeRepresenter.represent_bool)
SafeRepresenter.add_representer(int,
SafeRepresenter.represent_int)
SafeRepresenter.add_representer(float,
SafeRepresenter.represent_float)
SafeRepresenter.add_representer(list,
SafeRepresenter.represent_list)
SafeRepresenter.add_representer(tuple,
SafeRepresenter.represent_list)
SafeRepresenter.add_representer(dict,
SafeRepresenter.represent_dict)
SafeRepresenter.add_representer(set,
SafeRepresenter.represent_set)
SafeRepresenter.add_representer(datetime.date,
SafeRepresenter.represent_date)
SafeRepresenter.add_representer(datetime.datetime,
SafeRepresenter.represent_datetime)
SafeRepresenter.add_representer(None,
SafeRepresenter.represent_undefined)
class Representer(SafeRepresenter):
def represent_complex(self, data):
if data.imag == 0.0:
data = '%r' % data.real
elif data.real == 0.0:
data = '%rj' % data.imag
elif data.imag > 0:
data = '%r+%rj' % (data.real, data.imag)
else:
data = '%r%rj' % (data.real, data.imag)
return self.represent_scalar('tag:yaml.org,2002:python/complex', data)
def represent_tuple(self, data):
return self.represent_sequence('tag:yaml.org,2002:python/tuple', data)
def represent_name(self, data):
name = '%s.%s' % (data.__module__, data.__name__)
return self.represent_scalar('tag:yaml.org,2002:python/name:'+name, '')
def represent_module(self, data):
return self.represent_scalar(
'tag:yaml.org,2002:python/module:'+data.__name__, '')
def represent_object(self, data):
# We use __reduce__ API to save the data. data.__reduce__ returns
# a tuple of length 2-5:
# (function, args, state, listitems, dictitems)
# For reconstructing, we calls function(*args), then set its state,
# listitems, and dictitems if they are not None.
# A special case is when function.__name__ == '__newobj__'. In this
# case we create the object with args[0].__new__(*args).
# Another special case is when __reduce__ returns a string - we don't
# support it.
# We produce a !!python/object, !!python/object/new or
# !!python/object/apply node.
cls = type(data)
if cls in copyreg.dispatch_table:
reduce = copyreg.dispatch_table[cls](data)
elif hasattr(data, '__reduce_ex__'):
reduce = data.__reduce_ex__(2)
elif hasattr(data, '__reduce__'):
reduce = data.__reduce__()
else:
raise RepresenterError("cannot represent object: %r" % data)
reduce = (list(reduce)+[None]*5)[:5]
function, args, state, listitems, dictitems = reduce
args = list(args)
if state is None:
state = {}
if listitems is not None:
listitems = list(listitems)
if dictitems is not None:
dictitems = dict(dictitems)
if function.__name__ == '__newobj__':
function = args[0]
args = args[1:]
tag = 'tag:yaml.org,2002:python/object/new:'
newobj = True
else:
tag = 'tag:yaml.org,2002:python/object/apply:'
newobj = False
function_name = '%s.%s' % (function.__module__, function.__name__)
if not args and not listitems and not dictitems \
and isinstance(state, dict) and newobj:
return self.represent_mapping(
'tag:yaml.org,2002:python/object:'+function_name, state)
if not listitems and not dictitems \
and isinstance(state, dict) and not state:
return self.represent_sequence(tag+function_name, args)
value = {}
if args:
value['args'] = args
if state or not isinstance(state, dict):
value['state'] = state
if listitems:
value['listitems'] = listitems
if dictitems:
value['dictitems'] = dictitems
return self.represent_mapping(tag+function_name, value)
Representer.add_representer(complex,
Representer.represent_complex)
Representer.add_representer(tuple,
Representer.represent_tuple)
Representer.add_representer(type,
Representer.represent_name)
Representer.add_representer(types.FunctionType,
Representer.represent_name)
Representer.add_representer(types.BuiltinFunctionType,
Representer.represent_name)
Representer.add_representer(types.ModuleType,
Representer.represent_module)
Representer.add_multi_representer(object,
Representer.represent_object)
|
mit
|
kirachen/canvas
|
icl_data_import_export/sync_enrollment.py
|
1
|
2523
|
import sys
import requests, json
canvas_domain = "http://146.169.47.160:3000/"
canvas_access_token = "2Zg8KXWMNmt4LTukfYCJ1E50KoV4gJ7gGrydLrATH3BOgc1PJZ0g4L4CUuU9U8oP" #This never expires for this domain
data_dir = "data_dump/enrollments.txt"
def sync_enrollment():
print "syncing"
enrolled = read_enrollment()
change = False
for course in enrolled:
print "course: " + course
curr_enrollment = get_enrollment(course)
if len(curr_enrollment) != 0:
enrollment = enrolled[course]
for e in curr_enrollment:
if e not in enrollment:
print e + " is not enrolled in " + str(course) + " anymore"
delete_enrollment(curr_enrollment[e])
change = True
if not change:
print "No change detected"
def delete_enrollment((enrollment_id, course_id)):
req_url = canvas_domain + "api/v1/courses/" + str(course_id) + "/enrollments/" + str(enrollment_id)
payload = {"task":"delete"}
headers = {'Authorization': 'Bearer ' + canvas_access_token}
res = requests.delete(req_url, headers=headers, data=payload)
if res.status_code == 200:
print "enrollment " + str(enrollment_id) + " for course " + str(course_id) + " has been successfully deleted"
else:
print "failed deleting enrollment " + str(enrollment_id) + " for course " + str(course_id)
def get_enrollment(course_code):
enrollment = {}
req_url = canvas_domain + "api/v1/accounts/1/course_code/" + str(course_code) + "/student_enrollments"
headers = {'Authorization': 'Bearer ' + canvas_access_token}
res = requests.get(req_url, headers=headers)
for e in res.json():
enrollment[e["user"]["login_id"]] = (e["id"], e["course_id"])
return enrollment
def read_enrollment():
students = []
enrolled = {}
with open(data_dir, "rb") as file:
content = file.readlines()
content = [x.strip('\n') for x in content]
first_line = content[0]
course_code = first_line.split("\t")[0]
for line in content:
data = line.split("\t")
if int(data[2]) >= 2:
if data[0] == course_code:
students.append(data[1])
else:
enrolled[course_code] = students
course_code = data[0]
students = []
students.append(data[1])
return enrolled
if __name__ == "__main__":
sync_enrollment()
|
agpl-3.0
|
vladikr/nova_drafts
|
nova/objects/fixed_ip.py
|
2
|
11566
|
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import db
from nova import exception
from nova import objects
from nova.objects import base as obj_base
from nova.objects import fields
from nova.openstack.common import timeutils
from nova import utils
FIXED_IP_OPTIONAL_ATTRS = ['instance', 'network']
class FixedIP(obj_base.NovaPersistentObject, obj_base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Added virtual_interface field
# Version 1.2: Instance version 1.14
# Version 1.3: Instance 1.15
# Version 1.4: Added default_route field
VERSION = '1.4'
fields = {
'id': fields.IntegerField(),
'address': fields.IPV4AndV6AddressField(),
'network_id': fields.IntegerField(nullable=True),
'virtual_interface_id': fields.IntegerField(nullable=True),
'instance_uuid': fields.UUIDField(nullable=True),
'allocated': fields.BooleanField(),
'leased': fields.BooleanField(),
'reserved': fields.BooleanField(),
'host': fields.StringField(nullable=True),
'default_route': fields.BooleanField(),
'instance': fields.ObjectField('Instance', nullable=True),
'network': fields.ObjectField('Network', nullable=True),
'virtual_interface': fields.ObjectField('VirtualInterface',
nullable=True),
}
def obj_make_compatible(self, primitive, target_version):
target_version = utils.convert_version_to_tuple(target_version)
if target_version < (1, 4) and 'default_route' in primitive:
del primitive['default_route']
if target_version < (1, 3) and 'instance' in primitive:
self.instance.obj_make_compatible(primitive['instance'], '1.14')
primitive['instance']['nova_object.version'] = '1.14'
if target_version < (1, 2) and 'instance' in primitive:
self.instance.obj_make_compatible(primitive['instance'], '1.13')
primitive['instance']['nova_object.version'] = '1.13'
@property
def floating_ips(self):
return objects.FloatingIPList.get_by_fixed_ip_id(self._context,
self.id)
@staticmethod
def _from_db_object(context, fixedip, db_fixedip, expected_attrs=None):
if expected_attrs is None:
expected_attrs = []
for field in fixedip.fields:
if field in ('virtual_interface', 'default_route'):
# NOTE(danms): These fields are only set when doing a
# FixedIPList.get_by_network() because it's a relatively
# special-case thing, so skip them here
continue
if field not in FIXED_IP_OPTIONAL_ATTRS:
fixedip[field] = db_fixedip[field]
# NOTE(danms): Instance could be deleted, and thus None
if 'instance' in expected_attrs:
fixedip.instance = objects.Instance._from_db_object(
context,
objects.Instance(context),
db_fixedip['instance']) if db_fixedip['instance'] else None
if 'network' in expected_attrs:
fixedip.network = objects.Network._from_db_object(
context, objects.Network(context), db_fixedip['network'])
fixedip._context = context
fixedip.obj_reset_changes()
return fixedip
@obj_base.remotable_classmethod
def get_by_id(cls, context, id, expected_attrs=None):
if expected_attrs is None:
expected_attrs = []
get_network = 'network' in expected_attrs
db_fixedip = db.fixed_ip_get(context, id, get_network=get_network)
return cls._from_db_object(context, cls(context), db_fixedip,
expected_attrs)
@obj_base.remotable_classmethod
def get_by_address(cls, context, address, expected_attrs=None):
if expected_attrs is None:
expected_attrs = []
db_fixedip = db.fixed_ip_get_by_address(context, str(address),
columns_to_join=expected_attrs)
return cls._from_db_object(context, cls(context), db_fixedip,
expected_attrs)
@obj_base.remotable_classmethod
def get_by_floating_address(cls, context, address):
db_fixedip = db.fixed_ip_get_by_floating_address(context, str(address))
if db_fixedip is not None:
return cls._from_db_object(context, cls(context), db_fixedip)
@obj_base.remotable_classmethod
def get_by_network_and_host(cls, context, network_id, host):
db_fixedip = db.fixed_ip_get_by_network_host(context, network_id, host)
return cls._from_db_object(context, cls(context), db_fixedip)
@obj_base.remotable_classmethod
def associate(cls, context, address, instance_uuid, network_id=None,
reserved=False):
db_fixedip = db.fixed_ip_associate(context, address, instance_uuid,
network_id=network_id,
reserved=reserved)
return cls._from_db_object(context, cls(context), db_fixedip)
@obj_base.remotable_classmethod
def associate_pool(cls, context, network_id, instance_uuid=None,
host=None):
db_fixedip = db.fixed_ip_associate_pool(context, network_id,
instance_uuid=instance_uuid,
host=host)
return cls._from_db_object(context, cls(context), db_fixedip)
@obj_base.remotable_classmethod
def disassociate_by_address(cls, context, address):
db.fixed_ip_disassociate(context, address)
@obj_base.remotable_classmethod
def _disassociate_all_by_timeout(cls, context, host, time_str):
time = timeutils.parse_isotime(time_str)
return db.fixed_ip_disassociate_all_by_timeout(context, host, time)
@classmethod
def disassociate_all_by_timeout(cls, context, host, time):
return cls._disassociate_all_by_timeout(context, host,
timeutils.isotime(time))
@obj_base.remotable
def create(self, context):
updates = self.obj_get_changes()
if 'id' in updates:
raise exception.ObjectActionError(action='create',
reason='already created')
if 'address' in updates:
updates['address'] = str(updates['address'])
db_fixedip = db.fixed_ip_create(context, updates)
self._from_db_object(context, self, db_fixedip)
@obj_base.remotable
def save(self, context):
updates = self.obj_get_changes()
if 'address' in updates:
raise exception.ObjectActionError(action='save',
reason='address is not mutable')
db.fixed_ip_update(context, str(self.address), updates)
self.obj_reset_changes()
@obj_base.remotable
def disassociate(self, context):
db.fixed_ip_disassociate(context, str(self.address))
self.instance_uuid = None
self.instance = None
self.obj_reset_changes(['instance_uuid', 'instance'])
class FixedIPList(obj_base.ObjectListBase, obj_base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Added get_by_network()
# Version 1.2: FixedIP <= version 1.2
# Version 1.3: FixedIP <= version 1.3
# Version 1.4: FixedIP <= version 1.4
VERSION = '1.4'
fields = {
'objects': fields.ListOfObjectsField('FixedIP'),
}
child_versions = {
'1.0': '1.0',
'1.1': '1.1',
'1.2': '1.2',
'1.3': '1.3',
'1.4': '1.4',
}
@obj_base.remotable_classmethod
def get_all(cls, context):
db_fixedips = db.fixed_ip_get_all(context)
return obj_base.obj_make_list(context, cls(context),
objects.FixedIP, db_fixedips)
@obj_base.remotable_classmethod
def get_by_instance_uuid(cls, context, instance_uuid):
db_fixedips = db.fixed_ip_get_by_instance(context, instance_uuid)
return obj_base.obj_make_list(context, cls(context),
objects.FixedIP, db_fixedips)
@obj_base.remotable_classmethod
def get_by_host(cls, context, host):
db_fixedips = db.fixed_ip_get_by_host(context, host)
return obj_base.obj_make_list(context, cls(context),
objects.FixedIP, db_fixedips)
@obj_base.remotable_classmethod
def get_by_virtual_interface_id(cls, context, vif_id):
db_fixedips = db.fixed_ips_by_virtual_interface(context, vif_id)
return obj_base.obj_make_list(context, cls(context),
objects.FixedIP, db_fixedips)
@obj_base.remotable_classmethod
def get_by_network(cls, context, network, host=None):
ipinfo = db.network_get_associated_fixed_ips(context,
network['id'],
host=host)
if not ipinfo:
return []
fips = cls(context=context, objects=[])
for info in ipinfo:
inst = objects.Instance(context=context,
uuid=info['instance_uuid'],
hostname=info['instance_hostname'],
created_at=info['instance_created'],
updated_at=info['instance_updated'])
vif = objects.VirtualInterface(context=context,
id=info['vif_id'],
address=info['vif_address'])
fip = objects.FixedIP(context=context,
address=info['address'],
instance_uuid=info['instance_uuid'],
network_id=info['network_id'],
virtual_interface_id=info['vif_id'],
allocated=info['allocated'],
leased=info['leased'],
default_route=info['default_route'],
instance=inst,
virtual_interface=vif)
fips.objects.append(fip)
fips.obj_reset_changes()
return fips
@obj_base.remotable_classmethod
def bulk_create(self, context, fixed_ips):
ips = []
for fixedip in fixed_ips:
ip = obj_base.obj_to_primitive(fixedip)
if 'id' in ip:
raise exception.ObjectActionError(action='create',
reason='already created')
ips.append(ip)
db.fixed_ip_bulk_create(context, ips)
|
apache-2.0
|
renyi533/tensorflow
|
tensorflow/examples/speech_commands/freeze_test.py
|
12
|
4130
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for data input for speech commands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.examples.speech_commands import freeze
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
class FreezeTest(test.TestCase):
@test_util.run_deprecated_v1
def testCreateInferenceGraphWithMfcc(self):
with self.cached_session() as sess:
freeze.create_inference_graph(
wanted_words='a,b,c,d',
sample_rate=16000,
clip_duration_ms=1000.0,
clip_stride_ms=30.0,
window_size_ms=30.0,
window_stride_ms=10.0,
feature_bin_count=40,
model_architecture='conv',
preprocess='mfcc')
self.assertIsNotNone(sess.graph.get_tensor_by_name('wav_data:0'))
self.assertIsNotNone(
sess.graph.get_tensor_by_name('decoded_sample_data:0'))
self.assertIsNotNone(sess.graph.get_tensor_by_name('labels_softmax:0'))
ops = [node.op for node in sess.graph_def.node]
self.assertEqual(1, ops.count('Mfcc'))
@test_util.run_deprecated_v1
def testCreateInferenceGraphWithoutMfcc(self):
with self.cached_session() as sess:
freeze.create_inference_graph(
wanted_words='a,b,c,d',
sample_rate=16000,
clip_duration_ms=1000.0,
clip_stride_ms=30.0,
window_size_ms=30.0,
window_stride_ms=10.0,
feature_bin_count=40,
model_architecture='conv',
preprocess='average')
self.assertIsNotNone(sess.graph.get_tensor_by_name('wav_data:0'))
self.assertIsNotNone(
sess.graph.get_tensor_by_name('decoded_sample_data:0'))
self.assertIsNotNone(sess.graph.get_tensor_by_name('labels_softmax:0'))
ops = [node.op for node in sess.graph_def.node]
self.assertEqual(0, ops.count('Mfcc'))
@test_util.run_deprecated_v1
def testCreateInferenceGraphWithMicro(self):
with self.cached_session() as sess:
freeze.create_inference_graph(
wanted_words='a,b,c,d',
sample_rate=16000,
clip_duration_ms=1000.0,
clip_stride_ms=30.0,
window_size_ms=30.0,
window_stride_ms=10.0,
feature_bin_count=40,
model_architecture='conv',
preprocess='micro')
self.assertIsNotNone(sess.graph.get_tensor_by_name('wav_data:0'))
self.assertIsNotNone(
sess.graph.get_tensor_by_name('decoded_sample_data:0'))
self.assertIsNotNone(sess.graph.get_tensor_by_name('labels_softmax:0'))
@test_util.run_deprecated_v1
def testFeatureBinCount(self):
with self.cached_session() as sess:
freeze.create_inference_graph(
wanted_words='a,b,c,d',
sample_rate=16000,
clip_duration_ms=1000.0,
clip_stride_ms=30.0,
window_size_ms=30.0,
window_stride_ms=10.0,
feature_bin_count=80,
model_architecture='conv',
preprocess='average')
self.assertIsNotNone(sess.graph.get_tensor_by_name('wav_data:0'))
self.assertIsNotNone(
sess.graph.get_tensor_by_name('decoded_sample_data:0'))
self.assertIsNotNone(sess.graph.get_tensor_by_name('labels_softmax:0'))
ops = [node.op for node in sess.graph_def.node]
self.assertEqual(0, ops.count('Mfcc'))
if __name__ == '__main__':
test.main()
|
apache-2.0
|
gfyoung/pandas
|
pandas/tests/indexes/test_setops.py
|
1
|
16997
|
"""
The tests in this package are to ensure the proper resultant dtypes of
set operations.
"""
import numpy as np
import pytest
from pandas.core.dtypes.common import is_dtype_equal
import pandas as pd
from pandas import (
CategoricalIndex,
DatetimeIndex,
Float64Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
TimedeltaIndex,
UInt64Index,
)
import pandas._testing as tm
from pandas.api.types import is_datetime64tz_dtype, pandas_dtype
COMPATIBLE_INCONSISTENT_PAIRS = {
(Int64Index, RangeIndex): (tm.makeIntIndex, tm.makeRangeIndex),
(Float64Index, Int64Index): (tm.makeFloatIndex, tm.makeIntIndex),
(Float64Index, RangeIndex): (tm.makeFloatIndex, tm.makeIntIndex),
(Float64Index, UInt64Index): (tm.makeFloatIndex, tm.makeUIntIndex),
}
def test_union_same_types(index):
# Union with a non-unique, non-monotonic index raises error
# Only needed for bool index factory
idx1 = index.sort_values()
idx2 = index.sort_values()
assert idx1.union(idx2).dtype == idx1.dtype
def test_union_different_types(request, index, index_fixture2):
# This test only considers combinations of indices
# GH 23525
idx1, idx2 = index, index_fixture2
type_pair = tuple(sorted([type(idx1), type(idx2)], key=lambda x: str(x)))
if type_pair in COMPATIBLE_INCONSISTENT_PAIRS:
request.node.add_marker(
pytest.mark.xfail(reason="This test only considers non compatible indexes.")
)
if any(isinstance(idx, pd.MultiIndex) for idx in (idx1, idx2)):
pytest.skip("This test doesn't consider multiindixes.")
if is_dtype_equal(idx1.dtype, idx2.dtype):
pytest.skip("This test only considers non matching dtypes.")
# A union with a CategoricalIndex (even as dtype('O')) and a
# non-CategoricalIndex can only be made if both indices are monotonic.
# This is true before this PR as well.
# Union with a non-unique, non-monotonic index raises error
# This applies to the boolean index
idx1 = idx1.sort_values()
idx2 = idx2.sort_values()
assert idx1.union(idx2).dtype == np.dtype("O")
assert idx2.union(idx1).dtype == np.dtype("O")
@pytest.mark.parametrize("idx_fact1,idx_fact2", COMPATIBLE_INCONSISTENT_PAIRS.values())
def test_compatible_inconsistent_pairs(idx_fact1, idx_fact2):
# GH 23525
idx1 = idx_fact1(10)
idx2 = idx_fact2(20)
res1 = idx1.union(idx2)
res2 = idx2.union(idx1)
assert res1.dtype in (idx1.dtype, idx2.dtype)
assert res2.dtype in (idx1.dtype, idx2.dtype)
@pytest.mark.parametrize(
"left, right, expected",
[
("int64", "int64", "int64"),
("int64", "uint64", "object"),
("int64", "float64", "float64"),
("uint64", "float64", "float64"),
("uint64", "uint64", "uint64"),
("float64", "float64", "float64"),
("datetime64[ns]", "int64", "object"),
("datetime64[ns]", "uint64", "object"),
("datetime64[ns]", "float64", "object"),
("datetime64[ns, CET]", "int64", "object"),
("datetime64[ns, CET]", "uint64", "object"),
("datetime64[ns, CET]", "float64", "object"),
("Period[D]", "int64", "object"),
("Period[D]", "uint64", "object"),
("Period[D]", "float64", "object"),
],
)
@pytest.mark.parametrize("names", [("foo", "foo", "foo"), ("foo", "bar", None)])
def test_union_dtypes(left, right, expected, names):
left = pandas_dtype(left)
right = pandas_dtype(right)
a = pd.Index([], dtype=left, name=names[0])
b = pd.Index([], dtype=right, name=names[1])
result = a.union(b)
assert result.dtype == expected
assert result.name == names[2]
# Testing name retention
# TODO: pin down desired dtype; do we want it to be commutative?
result = a.intersection(b)
assert result.name == names[2]
def test_dunder_inplace_setops_deprecated(index):
# GH#37374 these will become logical ops, not setops
with tm.assert_produces_warning(FutureWarning):
index |= index
with tm.assert_produces_warning(FutureWarning):
index &= index
with tm.assert_produces_warning(FutureWarning):
index ^= index
@pytest.mark.parametrize("values", [[1, 2, 2, 3], [3, 3]])
def test_intersection_duplicates(values):
# GH#31326
a = pd.Index(values)
b = pd.Index([3, 3])
result = a.intersection(b)
expected = pd.Index([3])
tm.assert_index_equal(result, expected)
class TestSetOps:
# Set operation tests shared by all indexes in the `index` fixture
@pytest.mark.parametrize("case", [0.5, "xxx"])
@pytest.mark.parametrize(
"method", ["intersection", "union", "difference", "symmetric_difference"]
)
def test_set_ops_error_cases(self, case, method, index):
# non-iterable input
msg = "Input must be Index or array-like"
with pytest.raises(TypeError, match=msg):
getattr(index, method)(case)
def test_intersection_base(self, index):
if isinstance(index, CategoricalIndex):
return
first = index[:5]
second = index[:3]
intersect = first.intersection(second)
assert tm.equalContents(intersect, second)
if is_datetime64tz_dtype(index.dtype):
# The second.values below will drop tz, so the rest of this test
# is not applicable.
return
# GH#10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
result = first.intersection(case)
assert tm.equalContents(result, second)
if isinstance(index, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
first.intersection([1, 2, 3])
def test_union_base(self, index):
first = index[3:]
second = index[:5]
everything = index
union = first.union(second)
assert tm.equalContents(union, everything)
if is_datetime64tz_dtype(index.dtype):
# The second.values below will drop tz, so the rest of this test
# is not applicable.
return
# GH#10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
if not isinstance(index, CategoricalIndex):
result = first.union(case)
assert tm.equalContents(result, everything), (
result,
everything,
type(case),
)
if isinstance(index, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
first.union([1, 2, 3])
def test_difference_base(self, sort, index):
first = index[2:]
second = index[:4]
if isinstance(index, CategoricalIndex) or index.is_boolean():
answer = []
else:
answer = index[4:]
result = first.difference(second, sort)
assert tm.equalContents(result, answer)
# GH#10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
if isinstance(index, (DatetimeIndex, TimedeltaIndex)):
assert type(result) == type(answer)
tm.assert_numpy_array_equal(
result.sort_values().asi8, answer.sort_values().asi8
)
else:
result = first.difference(case, sort)
assert tm.equalContents(result, answer)
if isinstance(index, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
first.difference([1, 2, 3], sort)
def test_symmetric_difference(self, index):
if isinstance(index, CategoricalIndex):
return
if len(index) < 2:
return
if index[0] in index[1:] or index[-1] in index[:-1]:
# index fixture has e.g. an index of bools that does not satisfy this,
# another with [0, 0, 1, 1, 2, 2]
return
first = index[1:]
second = index[:-1]
answer = index[[0, -1]]
result = first.symmetric_difference(second)
assert tm.equalContents(result, answer)
# GH#10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
result = first.symmetric_difference(case)
if is_datetime64tz_dtype(first):
# second.values casts to tznaive
expected = first.union(case)
tm.assert_index_equal(result, expected)
continue
assert tm.equalContents(result, answer)
if isinstance(index, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
first.symmetric_difference([1, 2, 3])
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_corner_union(self, index, fname, sname, expected_name):
# GH#9943, GH#9862
# Test unions with various name combinations
# Do not test MultiIndex or repeats
if isinstance(index, MultiIndex) or not index.is_unique:
pytest.skip("Not for MultiIndex or repeated indices")
# Test copy.union(copy)
first = index.copy().set_names(fname)
second = index.copy().set_names(sname)
union = first.union(second)
expected = index.copy().set_names(expected_name)
tm.assert_index_equal(union, expected)
# Test copy.union(empty)
first = index.copy().set_names(fname)
second = index.drop(index).set_names(sname)
union = first.union(second)
expected = index.copy().set_names(expected_name)
tm.assert_index_equal(union, expected)
# Test empty.union(copy)
first = index.drop(index).set_names(fname)
second = index.copy().set_names(sname)
union = first.union(second)
expected = index.copy().set_names(expected_name)
tm.assert_index_equal(union, expected)
# Test empty.union(empty)
first = index.drop(index).set_names(fname)
second = index.drop(index).set_names(sname)
union = first.union(second)
expected = index.drop(index).set_names(expected_name)
tm.assert_index_equal(union, expected)
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_union_unequal(self, index, fname, sname, expected_name):
if isinstance(index, MultiIndex) or not index.is_unique:
pytest.skip("Not for MultiIndex or repeated indices")
# test copy.union(subset) - need sort for unicode and string
first = index.copy().set_names(fname)
second = index[1:].set_names(sname)
union = first.union(second).sort_values()
expected = index.set_names(expected_name).sort_values()
tm.assert_index_equal(union, expected)
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_corner_intersect(self, index, fname, sname, expected_name):
# GH#35847
# Test intersections with various name combinations
if isinstance(index, MultiIndex) or not index.is_unique:
pytest.skip("Not for MultiIndex or repeated indices")
# Test copy.intersection(copy)
first = index.copy().set_names(fname)
second = index.copy().set_names(sname)
intersect = first.intersection(second)
expected = index.copy().set_names(expected_name)
tm.assert_index_equal(intersect, expected)
# Test copy.intersection(empty)
first = index.copy().set_names(fname)
second = index.drop(index).set_names(sname)
intersect = first.intersection(second)
expected = index.drop(index).set_names(expected_name)
tm.assert_index_equal(intersect, expected)
# Test empty.intersection(copy)
first = index.drop(index).set_names(fname)
second = index.copy().set_names(sname)
intersect = first.intersection(second)
expected = index.drop(index).set_names(expected_name)
tm.assert_index_equal(intersect, expected)
# Test empty.intersection(empty)
first = index.drop(index).set_names(fname)
second = index.drop(index).set_names(sname)
intersect = first.intersection(second)
expected = index.drop(index).set_names(expected_name)
tm.assert_index_equal(intersect, expected)
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_intersect_unequal(self, index, fname, sname, expected_name):
if isinstance(index, MultiIndex) or not index.is_unique:
pytest.skip("Not for MultiIndex or repeated indices")
# test copy.intersection(subset) - need sort for unicode and string
first = index.copy().set_names(fname)
second = index[1:].set_names(sname)
intersect = first.intersection(second).sort_values()
expected = index[1:].set_names(expected_name).sort_values()
tm.assert_index_equal(intersect, expected)
def test_intersection_name_retention_with_nameless(self, index):
if isinstance(index, MultiIndex):
index = index.rename(list(range(index.nlevels)))
else:
index = index.rename("foo")
other = np.asarray(index)
result = index.intersection(other)
assert result.name == index.name
# empty other, same dtype
result = index.intersection(other[:0])
assert result.name == index.name
# empty `self`
result = index[:0].intersection(other)
assert result.name == index.name
def test_difference_preserves_type_empty(self, index, sort):
# GH#20040
# If taking difference of a set and itself, it
# needs to preserve the type of the index
if not index.is_unique:
return
result = index.difference(index, sort=sort)
expected = index[:0]
tm.assert_index_equal(result, expected, exact=True)
def test_difference_name_retention_equals(self, index, sort, names):
if isinstance(index, MultiIndex):
names = [[x] * index.nlevels for x in names]
index = index.rename(names[0])
other = index.rename(names[1])
assert index.equals(other)
result = index.difference(other)
expected = index[:0].rename(names[2])
tm.assert_index_equal(result, expected)
def test_intersection_difference_match_empty(self, index, sort):
# GH#20040
# Test that the intersection of an index with an
# empty index produces the same index as the difference
# of an index with itself. Test for all types
if not index.is_unique:
return
inter = index.intersection(index[:0])
diff = index.difference(index, sort=sort)
tm.assert_index_equal(inter, diff, exact=True)
@pytest.mark.parametrize(
"method", ["intersection", "union", "difference", "symmetric_difference"]
)
def test_setop_with_categorical(index, sort, method):
if isinstance(index, MultiIndex):
# tested separately in tests.indexes.multi.test_setops
return
other = index.astype("category")
result = getattr(index, method)(other, sort=sort)
expected = getattr(index, method)(index, sort=sort)
tm.assert_index_equal(result, expected)
result = getattr(index, method)(other[:5], sort=sort)
expected = getattr(index, method)(index[:5], sort=sort)
tm.assert_index_equal(result, expected)
def test_intersection_duplicates_all_indexes(index):
# GH#38743
if index.empty:
# No duplicates in empty indexes
return
def check_intersection_commutative(left, right):
assert left.intersection(right).equals(right.intersection(left))
idx = index
idx_non_unique = idx[[0, 0, 1, 2]]
check_intersection_commutative(idx, idx_non_unique)
assert idx.intersection(idx_non_unique).is_unique
|
bsd-3-clause
|
citrix-openstack-build/nova
|
nova/cmd/objectstore.py
|
17
|
1184
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Daemon for nova objectstore. Supports S3 API."""
import sys
from nova import config
from nova.objectstore import s3server
from nova.openstack.common import log as logging
from nova import service
from nova import utils
def main():
config.parse_args(sys.argv)
logging.setup("nova")
utils.monkey_patch()
server = s3server.get_wsgi_server()
service.serve(server)
service.wait()
|
apache-2.0
|
paurosello/frappe
|
frappe/website/doctype/help_article/help_article.py
|
12
|
2994
|
# Copyright (c) 2013, Frappe and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.website.website_generator import WebsiteGenerator
from frappe.utils import is_markdown, markdown
from frappe.website.utils import get_comment_list
from frappe import _
class HelpArticle(WebsiteGenerator):
def validate(self):
self.set_route()
def set_route(self):
'''Set route from category and title if missing'''
if not self.route:
self.route = '/'.join([frappe.get_value('Help Category', self.category, 'route'),
self.scrub(self.title)])
def on_update(self):
self.update_category()
clear_cache()
def update_category(self):
cnt = frappe.db.sql("""select count(*) from `tabHelp Article`
where category=%s and ifnull(published,0)=1""", self.category)[0][0]
cat = frappe.get_doc("Help Category", self.category)
cat.help_articles = cnt
cat.save()
def get_context(self, context):
if is_markdown(context.content):
context.content = markdown(context.content)
context.login_required = True
context.category = frappe.get_doc('Help Category', self.category)
context.level_class = get_level_class(self.level)
context.comment_list = get_comment_list(self.doctype, self.name)
context.show_sidebar = True
context.sidebar_items = get_sidebar_items()
context.parents = self.get_parents(context)
def get_parents(self, context):
return [{"title": context.category.category_name, "route":context.category.route}]
def get_list_context(context=None):
filters = dict(published=1)
category = frappe.db.get_value("Help Category", { "route": frappe.local.path })
if category:
filters['category'] = category
list_context = frappe._dict(
title = category or _("Knowledge Base"),
get_level_class = get_level_class,
show_sidebar = True,
sidebar_items = get_sidebar_items(),
hide_filters = True,
filters = filters,
category = frappe.local.form_dict.category,
no_breadcrumbs = True
)
if frappe.local.form_dict.txt:
list_context.blog_subtitle = _('Filtered by "{0}"').format(frappe.local.form_dict.txt)
#
# list_context.update(frappe.get_doc("Blog Settings", "Blog Settings").as_dict())
return list_context
def get_level_class(level):
return {
"Beginner": "green",
"Intermediate": "orange",
"Expert": "red"
}[level]
def get_sidebar_items():
def _get():
return frappe.db.sql("""select
concat(category_name, " (", help_articles, ")") as title,
concat('/', route) as route
from
`tabHelp Category`
where
ifnull(published,0)=1 and help_articles > 0
order by
help_articles desc""", as_dict=True)
return frappe.cache().get_value("knowledge_base:category_sidebar", _get)
def clear_cache():
clear_website_cache()
from frappe.website.render import clear_cache
clear_cache()
def clear_website_cache(path=None):
frappe.cache().delete_value("knowledge_base:category_sidebar")
frappe.cache().delete_value("knowledge_base:faq")
|
mit
|
cyrusin/tornado
|
demos/benchmark/gen_benchmark.py
|
99
|
1189
|
#!/usr/bin/env python
#
# A simple benchmark of the tornado.gen module.
# Runs in two modes, testing new-style (@coroutine and Futures)
# and old-style (@engine and Tasks) coroutines.
from timeit import Timer
from tornado import gen
from tornado.options import options, define, parse_command_line
define('num', default=10000, help='number of iterations')
# These benchmarks are delicate. They hit various fast-paths in the gen
# machinery in order to stay synchronous so we don't need an IOLoop.
# This removes noise from the results, but it's easy to change things
# in a way that completely invalidates the results.
@gen.engine
def e2(callback):
callback()
@gen.engine
def e1():
for i in range(10):
yield gen.Task(e2)
@gen.coroutine
def c2():
pass
@gen.coroutine
def c1():
for i in range(10):
yield c2()
def main():
parse_command_line()
t = Timer(e1)
results = t.timeit(options.num) / options.num
print('engine: %0.3f ms per iteration' % (results * 1000))
t = Timer(c1)
results = t.timeit(options.num) / options.num
print('coroutine: %0.3f ms per iteration' % (results * 1000))
if __name__ == '__main__':
main()
|
apache-2.0
|
joone/chromium-crosswalk
|
third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/mux.py
|
636
|
71218
|
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This file provides classes and helper functions for multiplexing extension.
Specification:
http://tools.ietf.org/html/draft-ietf-hybi-websocket-multiplexing-06
"""
import collections
import copy
import email
import email.parser
import logging
import math
import struct
import threading
import traceback
from mod_pywebsocket import common
from mod_pywebsocket import handshake
from mod_pywebsocket import util
from mod_pywebsocket._stream_base import BadOperationException
from mod_pywebsocket._stream_base import ConnectionTerminatedException
from mod_pywebsocket._stream_base import InvalidFrameException
from mod_pywebsocket._stream_hybi import Frame
from mod_pywebsocket._stream_hybi import Stream
from mod_pywebsocket._stream_hybi import StreamOptions
from mod_pywebsocket._stream_hybi import create_binary_frame
from mod_pywebsocket._stream_hybi import create_closing_handshake_body
from mod_pywebsocket._stream_hybi import create_header
from mod_pywebsocket._stream_hybi import create_length_header
from mod_pywebsocket._stream_hybi import parse_frame
from mod_pywebsocket.handshake import hybi
_CONTROL_CHANNEL_ID = 0
_DEFAULT_CHANNEL_ID = 1
_MUX_OPCODE_ADD_CHANNEL_REQUEST = 0
_MUX_OPCODE_ADD_CHANNEL_RESPONSE = 1
_MUX_OPCODE_FLOW_CONTROL = 2
_MUX_OPCODE_DROP_CHANNEL = 3
_MUX_OPCODE_NEW_CHANNEL_SLOT = 4
_MAX_CHANNEL_ID = 2 ** 29 - 1
_INITIAL_NUMBER_OF_CHANNEL_SLOTS = 64
_INITIAL_QUOTA_FOR_CLIENT = 8 * 1024
_HANDSHAKE_ENCODING_IDENTITY = 0
_HANDSHAKE_ENCODING_DELTA = 1
# We need only these status code for now.
_HTTP_BAD_RESPONSE_MESSAGES = {
common.HTTP_STATUS_BAD_REQUEST: 'Bad Request',
}
# DropChannel reason code
# TODO(bashi): Define all reason code defined in -05 draft.
_DROP_CODE_NORMAL_CLOSURE = 1000
_DROP_CODE_INVALID_ENCAPSULATING_MESSAGE = 2001
_DROP_CODE_CHANNEL_ID_TRUNCATED = 2002
_DROP_CODE_ENCAPSULATED_FRAME_IS_TRUNCATED = 2003
_DROP_CODE_UNKNOWN_MUX_OPCODE = 2004
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK = 2005
_DROP_CODE_CHANNEL_ALREADY_EXISTS = 2006
_DROP_CODE_NEW_CHANNEL_SLOT_VIOLATION = 2007
_DROP_CODE_UNKNOWN_REQUEST_ENCODING = 2010
_DROP_CODE_SEND_QUOTA_VIOLATION = 3005
_DROP_CODE_SEND_QUOTA_OVERFLOW = 3006
_DROP_CODE_ACKNOWLEDGED = 3008
_DROP_CODE_BAD_FRAGMENTATION = 3009
class MuxUnexpectedException(Exception):
"""Exception in handling multiplexing extension."""
pass
# Temporary
class MuxNotImplementedException(Exception):
"""Raised when a flow enters unimplemented code path."""
pass
class LogicalConnectionClosedException(Exception):
"""Raised when logical connection is gracefully closed."""
pass
class PhysicalConnectionError(Exception):
"""Raised when there is a physical connection error."""
def __init__(self, drop_code, message=''):
super(PhysicalConnectionError, self).__init__(
'code=%d, message=%r' % (drop_code, message))
self.drop_code = drop_code
self.message = message
class LogicalChannelError(Exception):
"""Raised when there is a logical channel error."""
def __init__(self, channel_id, drop_code, message=''):
super(LogicalChannelError, self).__init__(
'channel_id=%d, code=%d, message=%r' % (
channel_id, drop_code, message))
self.channel_id = channel_id
self.drop_code = drop_code
self.message = message
def _encode_channel_id(channel_id):
if channel_id < 0:
raise ValueError('Channel id %d must not be negative' % channel_id)
if channel_id < 2 ** 7:
return chr(channel_id)
if channel_id < 2 ** 14:
return struct.pack('!H', 0x8000 + channel_id)
if channel_id < 2 ** 21:
first = chr(0xc0 + (channel_id >> 16))
return first + struct.pack('!H', channel_id & 0xffff)
if channel_id < 2 ** 29:
return struct.pack('!L', 0xe0000000 + channel_id)
raise ValueError('Channel id %d is too large' % channel_id)
def _encode_number(number):
return create_length_header(number, False)
def _create_add_channel_response(channel_id, encoded_handshake,
encoding=0, rejected=False):
if encoding != 0 and encoding != 1:
raise ValueError('Invalid encoding %d' % encoding)
first_byte = ((_MUX_OPCODE_ADD_CHANNEL_RESPONSE << 5) |
(rejected << 4) | encoding)
block = (chr(first_byte) +
_encode_channel_id(channel_id) +
_encode_number(len(encoded_handshake)) +
encoded_handshake)
return block
def _create_drop_channel(channel_id, code=None, message=''):
if len(message) > 0 and code is None:
raise ValueError('Code must be specified if message is specified')
first_byte = _MUX_OPCODE_DROP_CHANNEL << 5
block = chr(first_byte) + _encode_channel_id(channel_id)
if code is None:
block += _encode_number(0) # Reason size
else:
reason = struct.pack('!H', code) + message
reason_size = _encode_number(len(reason))
block += reason_size + reason
return block
def _create_flow_control(channel_id, replenished_quota):
first_byte = _MUX_OPCODE_FLOW_CONTROL << 5
block = (chr(first_byte) +
_encode_channel_id(channel_id) +
_encode_number(replenished_quota))
return block
def _create_new_channel_slot(slots, send_quota):
if slots < 0 or send_quota < 0:
raise ValueError('slots and send_quota must be non-negative.')
first_byte = _MUX_OPCODE_NEW_CHANNEL_SLOT << 5
block = (chr(first_byte) +
_encode_number(slots) +
_encode_number(send_quota))
return block
def _create_fallback_new_channel_slot():
first_byte = (_MUX_OPCODE_NEW_CHANNEL_SLOT << 5) | 1 # Set the F flag
block = (chr(first_byte) + _encode_number(0) + _encode_number(0))
return block
def _parse_request_text(request_text):
request_line, header_lines = request_text.split('\r\n', 1)
words = request_line.split(' ')
if len(words) != 3:
raise ValueError('Bad Request-Line syntax %r' % request_line)
[command, path, version] = words
if version != 'HTTP/1.1':
raise ValueError('Bad request version %r' % version)
# email.parser.Parser() parses RFC 2822 (RFC 822) style headers.
# RFC 6455 refers RFC 2616 for handshake parsing, and RFC 2616 refers
# RFC 822.
headers = email.parser.Parser().parsestr(header_lines)
return command, path, version, headers
class _ControlBlock(object):
"""A structure that holds parsing result of multiplexing control block.
Control block specific attributes will be added by _MuxFramePayloadParser.
(e.g. encoded_handshake will be added for AddChannelRequest and
AddChannelResponse)
"""
def __init__(self, opcode):
self.opcode = opcode
class _MuxFramePayloadParser(object):
"""A class that parses multiplexed frame payload."""
def __init__(self, payload):
self._data = payload
self._read_position = 0
self._logger = util.get_class_logger(self)
def read_channel_id(self):
"""Reads channel id.
Raises:
ValueError: when the payload doesn't contain
valid channel id.
"""
remaining_length = len(self._data) - self._read_position
pos = self._read_position
if remaining_length == 0:
raise ValueError('Invalid channel id format')
channel_id = ord(self._data[pos])
channel_id_length = 1
if channel_id & 0xe0 == 0xe0:
if remaining_length < 4:
raise ValueError('Invalid channel id format')
channel_id = struct.unpack('!L',
self._data[pos:pos+4])[0] & 0x1fffffff
channel_id_length = 4
elif channel_id & 0xc0 == 0xc0:
if remaining_length < 3:
raise ValueError('Invalid channel id format')
channel_id = (((channel_id & 0x1f) << 16) +
struct.unpack('!H', self._data[pos+1:pos+3])[0])
channel_id_length = 3
elif channel_id & 0x80 == 0x80:
if remaining_length < 2:
raise ValueError('Invalid channel id format')
channel_id = struct.unpack('!H',
self._data[pos:pos+2])[0] & 0x3fff
channel_id_length = 2
self._read_position += channel_id_length
return channel_id
def read_inner_frame(self):
"""Reads an inner frame.
Raises:
PhysicalConnectionError: when the inner frame is invalid.
"""
if len(self._data) == self._read_position:
raise PhysicalConnectionError(
_DROP_CODE_ENCAPSULATED_FRAME_IS_TRUNCATED)
bits = ord(self._data[self._read_position])
self._read_position += 1
fin = (bits & 0x80) == 0x80
rsv1 = (bits & 0x40) == 0x40
rsv2 = (bits & 0x20) == 0x20
rsv3 = (bits & 0x10) == 0x10
opcode = bits & 0xf
payload = self.remaining_data()
# Consume rest of the message which is payload data of the original
# frame.
self._read_position = len(self._data)
return fin, rsv1, rsv2, rsv3, opcode, payload
def _read_number(self):
if self._read_position + 1 > len(self._data):
raise ValueError(
'Cannot read the first byte of number field')
number = ord(self._data[self._read_position])
if number & 0x80 == 0x80:
raise ValueError(
'The most significant bit of the first byte of number should '
'be unset')
self._read_position += 1
pos = self._read_position
if number == 127:
if pos + 8 > len(self._data):
raise ValueError('Invalid number field')
self._read_position += 8
number = struct.unpack('!Q', self._data[pos:pos+8])[0]
if number > 0x7FFFFFFFFFFFFFFF:
raise ValueError('Encoded number(%d) >= 2^63' % number)
if number <= 0xFFFF:
raise ValueError(
'%d should not be encoded by 9 bytes encoding' % number)
return number
if number == 126:
if pos + 2 > len(self._data):
raise ValueError('Invalid number field')
self._read_position += 2
number = struct.unpack('!H', self._data[pos:pos+2])[0]
if number <= 125:
raise ValueError(
'%d should not be encoded by 3 bytes encoding' % number)
return number
def _read_size_and_contents(self):
"""Reads data that consists of followings:
- the size of the contents encoded the same way as payload length
of the WebSocket Protocol with 1 bit padding at the head.
- the contents.
"""
try:
size = self._read_number()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
str(e))
pos = self._read_position
if pos + size > len(self._data):
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Cannot read %d bytes data' % size)
self._read_position += size
return self._data[pos:pos+size]
def _read_add_channel_request(self, first_byte, control_block):
reserved = (first_byte >> 2) & 0x7
if reserved != 0:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Reserved bits must be unset')
# Invalid encoding will be handled by MuxHandler.
encoding = first_byte & 0x3
try:
control_block.channel_id = self.read_channel_id()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK)
control_block.encoding = encoding
encoded_handshake = self._read_size_and_contents()
control_block.encoded_handshake = encoded_handshake
return control_block
def _read_add_channel_response(self, first_byte, control_block):
reserved = (first_byte >> 2) & 0x3
if reserved != 0:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Reserved bits must be unset')
control_block.accepted = (first_byte >> 4) & 1
control_block.encoding = first_byte & 0x3
try:
control_block.channel_id = self.read_channel_id()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK)
control_block.encoded_handshake = self._read_size_and_contents()
return control_block
def _read_flow_control(self, first_byte, control_block):
reserved = first_byte & 0x1f
if reserved != 0:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Reserved bits must be unset')
try:
control_block.channel_id = self.read_channel_id()
control_block.send_quota = self._read_number()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
str(e))
return control_block
def _read_drop_channel(self, first_byte, control_block):
reserved = first_byte & 0x1f
if reserved != 0:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Reserved bits must be unset')
try:
control_block.channel_id = self.read_channel_id()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK)
reason = self._read_size_and_contents()
if len(reason) == 0:
control_block.drop_code = None
control_block.drop_message = ''
elif len(reason) >= 2:
control_block.drop_code = struct.unpack('!H', reason[:2])[0]
control_block.drop_message = reason[2:]
else:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Received DropChannel that conains only 1-byte reason')
return control_block
def _read_new_channel_slot(self, first_byte, control_block):
reserved = first_byte & 0x1e
if reserved != 0:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Reserved bits must be unset')
control_block.fallback = first_byte & 1
try:
control_block.slots = self._read_number()
control_block.send_quota = self._read_number()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
str(e))
return control_block
def read_control_blocks(self):
"""Reads control block(s).
Raises:
PhysicalConnectionError: when the payload contains invalid control
block(s).
StopIteration: when no control blocks left.
"""
while self._read_position < len(self._data):
first_byte = ord(self._data[self._read_position])
self._read_position += 1
opcode = (first_byte >> 5) & 0x7
control_block = _ControlBlock(opcode=opcode)
if opcode == _MUX_OPCODE_ADD_CHANNEL_REQUEST:
yield self._read_add_channel_request(first_byte, control_block)
elif opcode == _MUX_OPCODE_ADD_CHANNEL_RESPONSE:
yield self._read_add_channel_response(
first_byte, control_block)
elif opcode == _MUX_OPCODE_FLOW_CONTROL:
yield self._read_flow_control(first_byte, control_block)
elif opcode == _MUX_OPCODE_DROP_CHANNEL:
yield self._read_drop_channel(first_byte, control_block)
elif opcode == _MUX_OPCODE_NEW_CHANNEL_SLOT:
yield self._read_new_channel_slot(first_byte, control_block)
else:
raise PhysicalConnectionError(
_DROP_CODE_UNKNOWN_MUX_OPCODE,
'Invalid opcode %d' % opcode)
assert self._read_position == len(self._data)
raise StopIteration
def remaining_data(self):
"""Returns remaining data."""
return self._data[self._read_position:]
class _LogicalRequest(object):
"""Mimics mod_python request."""
def __init__(self, channel_id, command, path, protocol, headers,
connection):
"""Constructs an instance.
Args:
channel_id: the channel id of the logical channel.
command: HTTP request command.
path: HTTP request path.
headers: HTTP headers.
connection: _LogicalConnection instance.
"""
self.channel_id = channel_id
self.method = command
self.uri = path
self.protocol = protocol
self.headers_in = headers
self.connection = connection
self.server_terminated = False
self.client_terminated = False
def is_https(self):
"""Mimics request.is_https(). Returns False because this method is
used only by old protocols (hixie and hybi00).
"""
return False
class _LogicalConnection(object):
"""Mimics mod_python mp_conn."""
# For details, see the comment of set_read_state().
STATE_ACTIVE = 1
STATE_GRACEFULLY_CLOSED = 2
STATE_TERMINATED = 3
def __init__(self, mux_handler, channel_id):
"""Constructs an instance.
Args:
mux_handler: _MuxHandler instance.
channel_id: channel id of this connection.
"""
self._mux_handler = mux_handler
self._channel_id = channel_id
self._incoming_data = ''
# - Protects _waiting_write_completion
# - Signals the thread waiting for completion of write by mux handler
self._write_condition = threading.Condition()
self._waiting_write_completion = False
self._read_condition = threading.Condition()
self._read_state = self.STATE_ACTIVE
def get_local_addr(self):
"""Getter to mimic mp_conn.local_addr."""
return self._mux_handler.physical_connection.get_local_addr()
local_addr = property(get_local_addr)
def get_remote_addr(self):
"""Getter to mimic mp_conn.remote_addr."""
return self._mux_handler.physical_connection.get_remote_addr()
remote_addr = property(get_remote_addr)
def get_memorized_lines(self):
"""Gets memorized lines. Not supported."""
raise MuxUnexpectedException('_LogicalConnection does not support '
'get_memorized_lines')
def write(self, data):
"""Writes data. mux_handler sends data asynchronously. The caller will
be suspended until write done.
Args:
data: data to be written.
Raises:
MuxUnexpectedException: when called before finishing the previous
write.
"""
try:
self._write_condition.acquire()
if self._waiting_write_completion:
raise MuxUnexpectedException(
'Logical connection %d is already waiting the completion '
'of write' % self._channel_id)
self._waiting_write_completion = True
self._mux_handler.send_data(self._channel_id, data)
self._write_condition.wait()
# TODO(tyoshino): Raise an exception if woke up by on_writer_done.
finally:
self._write_condition.release()
def write_control_data(self, data):
"""Writes data via the control channel. Don't wait finishing write
because this method can be called by mux dispatcher.
Args:
data: data to be written.
"""
self._mux_handler.send_control_data(data)
def on_write_data_done(self):
"""Called when sending data is completed."""
try:
self._write_condition.acquire()
if not self._waiting_write_completion:
raise MuxUnexpectedException(
'Invalid call of on_write_data_done for logical '
'connection %d' % self._channel_id)
self._waiting_write_completion = False
self._write_condition.notify()
finally:
self._write_condition.release()
def on_writer_done(self):
"""Called by the mux handler when the writer thread has finished."""
try:
self._write_condition.acquire()
self._waiting_write_completion = False
self._write_condition.notify()
finally:
self._write_condition.release()
def append_frame_data(self, frame_data):
"""Appends incoming frame data. Called when mux_handler dispatches
frame data to the corresponding application.
Args:
frame_data: incoming frame data.
"""
self._read_condition.acquire()
self._incoming_data += frame_data
self._read_condition.notify()
self._read_condition.release()
def read(self, length):
"""Reads data. Blocks until enough data has arrived via physical
connection.
Args:
length: length of data to be read.
Raises:
LogicalConnectionClosedException: when closing handshake for this
logical channel has been received.
ConnectionTerminatedException: when the physical connection has
closed, or an error is caused on the reader thread.
"""
self._read_condition.acquire()
while (self._read_state == self.STATE_ACTIVE and
len(self._incoming_data) < length):
self._read_condition.wait()
try:
if self._read_state == self.STATE_GRACEFULLY_CLOSED:
raise LogicalConnectionClosedException(
'Logical channel %d has closed.' % self._channel_id)
elif self._read_state == self.STATE_TERMINATED:
raise ConnectionTerminatedException(
'Receiving %d byte failed. Logical channel (%d) closed' %
(length, self._channel_id))
value = self._incoming_data[:length]
self._incoming_data = self._incoming_data[length:]
finally:
self._read_condition.release()
return value
def set_read_state(self, new_state):
"""Sets the state of this connection. Called when an event for this
connection has occurred.
Args:
new_state: state to be set. new_state must be one of followings:
- STATE_GRACEFULLY_CLOSED: when closing handshake for this
connection has been received.
- STATE_TERMINATED: when the physical connection has closed or
DropChannel of this connection has received.
"""
self._read_condition.acquire()
self._read_state = new_state
self._read_condition.notify()
self._read_condition.release()
class _InnerMessage(object):
"""Holds the result of _InnerMessageBuilder.build().
"""
def __init__(self, opcode, payload):
self.opcode = opcode
self.payload = payload
class _InnerMessageBuilder(object):
"""A class that holds the context of inner message fragmentation and
builds a message from fragmented inner frame(s).
"""
def __init__(self):
self._control_opcode = None
self._pending_control_fragments = []
self._message_opcode = None
self._pending_message_fragments = []
self._frame_handler = self._handle_first
def _handle_first(self, frame):
if frame.opcode == common.OPCODE_CONTINUATION:
raise InvalidFrameException('Sending invalid continuation opcode')
if common.is_control_opcode(frame.opcode):
return self._process_first_fragmented_control(frame)
else:
return self._process_first_fragmented_message(frame)
def _process_first_fragmented_control(self, frame):
self._control_opcode = frame.opcode
self._pending_control_fragments.append(frame.payload)
if not frame.fin:
self._frame_handler = self._handle_fragmented_control
return None
return self._reassemble_fragmented_control()
def _process_first_fragmented_message(self, frame):
self._message_opcode = frame.opcode
self._pending_message_fragments.append(frame.payload)
if not frame.fin:
self._frame_handler = self._handle_fragmented_message
return None
return self._reassemble_fragmented_message()
def _handle_fragmented_control(self, frame):
if frame.opcode != common.OPCODE_CONTINUATION:
raise InvalidFrameException(
'Sending invalid opcode %d while sending fragmented control '
'message' % frame.opcode)
self._pending_control_fragments.append(frame.payload)
if not frame.fin:
return None
return self._reassemble_fragmented_control()
def _reassemble_fragmented_control(self):
opcode = self._control_opcode
payload = ''.join(self._pending_control_fragments)
self._control_opcode = None
self._pending_control_fragments = []
if self._message_opcode is not None:
self._frame_handler = self._handle_fragmented_message
else:
self._frame_handler = self._handle_first
return _InnerMessage(opcode, payload)
def _handle_fragmented_message(self, frame):
# Sender can interleave a control message while sending fragmented
# messages.
if common.is_control_opcode(frame.opcode):
if self._control_opcode is not None:
raise MuxUnexpectedException(
'Should not reach here(Bug in builder)')
return self._process_first_fragmented_control(frame)
if frame.opcode != common.OPCODE_CONTINUATION:
raise InvalidFrameException(
'Sending invalid opcode %d while sending fragmented message' %
frame.opcode)
self._pending_message_fragments.append(frame.payload)
if not frame.fin:
return None
return self._reassemble_fragmented_message()
def _reassemble_fragmented_message(self):
opcode = self._message_opcode
payload = ''.join(self._pending_message_fragments)
self._message_opcode = None
self._pending_message_fragments = []
self._frame_handler = self._handle_first
return _InnerMessage(opcode, payload)
def build(self, frame):
"""Build an inner message. Returns an _InnerMessage instance when
the given frame is the last fragmented frame. Returns None otherwise.
Args:
frame: an inner frame.
Raises:
InvalidFrameException: when received invalid opcode. (e.g.
receiving non continuation data opcode but the fin flag of
the previous inner frame was not set.)
"""
return self._frame_handler(frame)
class _LogicalStream(Stream):
"""Mimics the Stream class. This class interprets multiplexed WebSocket
frames.
"""
def __init__(self, request, stream_options, send_quota, receive_quota):
"""Constructs an instance.
Args:
request: _LogicalRequest instance.
stream_options: StreamOptions instance.
send_quota: Initial send quota.
receive_quota: Initial receive quota.
"""
# Physical stream is responsible for masking.
stream_options.unmask_receive = False
Stream.__init__(self, request, stream_options)
self._send_closed = False
self._send_quota = send_quota
# - Protects _send_closed and _send_quota
# - Signals the thread waiting for send quota replenished
self._send_condition = threading.Condition()
# The opcode of the first frame in messages.
self._message_opcode = common.OPCODE_TEXT
# True when the last message was fragmented.
self._last_message_was_fragmented = False
self._receive_quota = receive_quota
self._write_inner_frame_semaphore = threading.Semaphore()
self._inner_message_builder = _InnerMessageBuilder()
def _create_inner_frame(self, opcode, payload, end=True):
frame = Frame(fin=end, opcode=opcode, payload=payload)
for frame_filter in self._options.outgoing_frame_filters:
frame_filter.filter(frame)
if len(payload) != len(frame.payload):
raise MuxUnexpectedException(
'Mux extension must not be used after extensions which change '
' frame boundary')
first_byte = ((frame.fin << 7) | (frame.rsv1 << 6) |
(frame.rsv2 << 5) | (frame.rsv3 << 4) | frame.opcode)
return chr(first_byte) + frame.payload
def _write_inner_frame(self, opcode, payload, end=True):
payload_length = len(payload)
write_position = 0
try:
# An inner frame will be fragmented if there is no enough send
# quota. This semaphore ensures that fragmented inner frames are
# sent in order on the logical channel.
# Note that frames that come from other logical channels or
# multiplexing control blocks can be inserted between fragmented
# inner frames on the physical channel.
self._write_inner_frame_semaphore.acquire()
# Consume an octet quota when this is the first fragmented frame.
if opcode != common.OPCODE_CONTINUATION:
try:
self._send_condition.acquire()
while (not self._send_closed) and self._send_quota == 0:
self._send_condition.wait()
if self._send_closed:
raise BadOperationException(
'Logical connection %d is closed' %
self._request.channel_id)
self._send_quota -= 1
finally:
self._send_condition.release()
while write_position < payload_length:
try:
self._send_condition.acquire()
while (not self._send_closed) and self._send_quota == 0:
self._logger.debug(
'No quota. Waiting FlowControl message for %d.' %
self._request.channel_id)
self._send_condition.wait()
if self._send_closed:
raise BadOperationException(
'Logical connection %d is closed' %
self.request._channel_id)
remaining = payload_length - write_position
write_length = min(self._send_quota, remaining)
inner_frame_end = (
end and
(write_position + write_length == payload_length))
inner_frame = self._create_inner_frame(
opcode,
payload[write_position:write_position+write_length],
inner_frame_end)
self._send_quota -= write_length
self._logger.debug('Consumed quota=%d, remaining=%d' %
(write_length, self._send_quota))
finally:
self._send_condition.release()
# Writing data will block the worker so we need to release
# _send_condition before writing.
self._logger.debug('Sending inner frame: %r' % inner_frame)
self._request.connection.write(inner_frame)
write_position += write_length
opcode = common.OPCODE_CONTINUATION
except ValueError, e:
raise BadOperationException(e)
finally:
self._write_inner_frame_semaphore.release()
def replenish_send_quota(self, send_quota):
"""Replenish send quota."""
try:
self._send_condition.acquire()
if self._send_quota + send_quota > 0x7FFFFFFFFFFFFFFF:
self._send_quota = 0
raise LogicalChannelError(
self._request.channel_id, _DROP_CODE_SEND_QUOTA_OVERFLOW)
self._send_quota += send_quota
self._logger.debug('Replenished send quota for channel id %d: %d' %
(self._request.channel_id, self._send_quota))
finally:
self._send_condition.notify()
self._send_condition.release()
def consume_receive_quota(self, amount):
"""Consumes receive quota. Returns False on failure."""
if self._receive_quota < amount:
self._logger.debug('Violate quota on channel id %d: %d < %d' %
(self._request.channel_id,
self._receive_quota, amount))
return False
self._receive_quota -= amount
return True
def send_message(self, message, end=True, binary=False):
"""Override Stream.send_message."""
if self._request.server_terminated:
raise BadOperationException(
'Requested send_message after sending out a closing handshake')
if binary and isinstance(message, unicode):
raise BadOperationException(
'Message for binary frame must be instance of str')
if binary:
opcode = common.OPCODE_BINARY
else:
opcode = common.OPCODE_TEXT
message = message.encode('utf-8')
for message_filter in self._options.outgoing_message_filters:
message = message_filter.filter(message, end, binary)
if self._last_message_was_fragmented:
if opcode != self._message_opcode:
raise BadOperationException('Message types are different in '
'frames for the same message')
opcode = common.OPCODE_CONTINUATION
else:
self._message_opcode = opcode
self._write_inner_frame(opcode, message, end)
self._last_message_was_fragmented = not end
def _receive_frame(self):
"""Overrides Stream._receive_frame.
In addition to call Stream._receive_frame, this method adds the amount
of payload to receiving quota and sends FlowControl to the client.
We need to do it here because Stream.receive_message() handles
control frames internally.
"""
opcode, payload, fin, rsv1, rsv2, rsv3 = Stream._receive_frame(self)
amount = len(payload)
# Replenish extra one octet when receiving the first fragmented frame.
if opcode != common.OPCODE_CONTINUATION:
amount += 1
self._receive_quota += amount
frame_data = _create_flow_control(self._request.channel_id,
amount)
self._logger.debug('Sending flow control for %d, replenished=%d' %
(self._request.channel_id, amount))
self._request.connection.write_control_data(frame_data)
return opcode, payload, fin, rsv1, rsv2, rsv3
def _get_message_from_frame(self, frame):
"""Overrides Stream._get_message_from_frame.
"""
try:
inner_message = self._inner_message_builder.build(frame)
except InvalidFrameException:
raise LogicalChannelError(
self._request.channel_id, _DROP_CODE_BAD_FRAGMENTATION)
if inner_message is None:
return None
self._original_opcode = inner_message.opcode
return inner_message.payload
def receive_message(self):
"""Overrides Stream.receive_message."""
# Just call Stream.receive_message(), but catch
# LogicalConnectionClosedException, which is raised when the logical
# connection has closed gracefully.
try:
return Stream.receive_message(self)
except LogicalConnectionClosedException, e:
self._logger.debug('%s', e)
return None
def _send_closing_handshake(self, code, reason):
"""Overrides Stream._send_closing_handshake."""
body = create_closing_handshake_body(code, reason)
self._logger.debug('Sending closing handshake for %d: (%r, %r)' %
(self._request.channel_id, code, reason))
self._write_inner_frame(common.OPCODE_CLOSE, body, end=True)
self._request.server_terminated = True
def send_ping(self, body=''):
"""Overrides Stream.send_ping"""
self._logger.debug('Sending ping on logical channel %d: %r' %
(self._request.channel_id, body))
self._write_inner_frame(common.OPCODE_PING, body, end=True)
self._ping_queue.append(body)
def _send_pong(self, body):
"""Overrides Stream._send_pong"""
self._logger.debug('Sending pong on logical channel %d: %r' %
(self._request.channel_id, body))
self._write_inner_frame(common.OPCODE_PONG, body, end=True)
def close_connection(self, code=common.STATUS_NORMAL_CLOSURE, reason=''):
"""Overrides Stream.close_connection."""
# TODO(bashi): Implement
self._logger.debug('Closing logical connection %d' %
self._request.channel_id)
self._request.server_terminated = True
def stop_sending(self):
"""Stops accepting new send operation (_write_inner_frame)."""
self._send_condition.acquire()
self._send_closed = True
self._send_condition.notify()
self._send_condition.release()
class _OutgoingData(object):
"""A structure that holds data to be sent via physical connection and
origin of the data.
"""
def __init__(self, channel_id, data):
self.channel_id = channel_id
self.data = data
class _PhysicalConnectionWriter(threading.Thread):
"""A thread that is responsible for writing data to physical connection.
TODO(bashi): Make sure there is no thread-safety problem when the reader
thread reads data from the same socket at a time.
"""
def __init__(self, mux_handler):
"""Constructs an instance.
Args:
mux_handler: _MuxHandler instance.
"""
threading.Thread.__init__(self)
self._logger = util.get_class_logger(self)
self._mux_handler = mux_handler
self.setDaemon(True)
# When set, make this thread stop accepting new data, flush pending
# data and exit.
self._stop_requested = False
# The close code of the physical connection.
self._close_code = common.STATUS_NORMAL_CLOSURE
# Deque for passing write data. It's protected by _deque_condition
# until _stop_requested is set.
self._deque = collections.deque()
# - Protects _deque, _stop_requested and _close_code
# - Signals threads waiting for them to be available
self._deque_condition = threading.Condition()
def put_outgoing_data(self, data):
"""Puts outgoing data.
Args:
data: _OutgoingData instance.
Raises:
BadOperationException: when the thread has been requested to
terminate.
"""
try:
self._deque_condition.acquire()
if self._stop_requested:
raise BadOperationException('Cannot write data anymore')
self._deque.append(data)
self._deque_condition.notify()
finally:
self._deque_condition.release()
def _write_data(self, outgoing_data):
message = (_encode_channel_id(outgoing_data.channel_id) +
outgoing_data.data)
try:
self._mux_handler.physical_stream.send_message(
message=message, end=True, binary=True)
except Exception, e:
util.prepend_message_to_exception(
'Failed to send message to %r: ' %
(self._mux_handler.physical_connection.remote_addr,), e)
raise
# TODO(bashi): It would be better to block the thread that sends
# control data as well.
if outgoing_data.channel_id != _CONTROL_CHANNEL_ID:
self._mux_handler.notify_write_data_done(outgoing_data.channel_id)
def run(self):
try:
self._deque_condition.acquire()
while not self._stop_requested:
if len(self._deque) == 0:
self._deque_condition.wait()
continue
outgoing_data = self._deque.popleft()
self._deque_condition.release()
self._write_data(outgoing_data)
self._deque_condition.acquire()
# Flush deque.
#
# At this point, self._deque_condition is always acquired.
try:
while len(self._deque) > 0:
outgoing_data = self._deque.popleft()
self._write_data(outgoing_data)
finally:
self._deque_condition.release()
# Close physical connection.
try:
# Don't wait the response here. The response will be read
# by the reader thread.
self._mux_handler.physical_stream.close_connection(
self._close_code, wait_response=False)
except Exception, e:
util.prepend_message_to_exception(
'Failed to close the physical connection: %r' % e)
raise
finally:
self._mux_handler.notify_writer_done()
def stop(self, close_code=common.STATUS_NORMAL_CLOSURE):
"""Stops the writer thread."""
self._deque_condition.acquire()
self._stop_requested = True
self._close_code = close_code
self._deque_condition.notify()
self._deque_condition.release()
class _PhysicalConnectionReader(threading.Thread):
"""A thread that is responsible for reading data from physical connection.
"""
def __init__(self, mux_handler):
"""Constructs an instance.
Args:
mux_handler: _MuxHandler instance.
"""
threading.Thread.__init__(self)
self._logger = util.get_class_logger(self)
self._mux_handler = mux_handler
self.setDaemon(True)
def run(self):
while True:
try:
physical_stream = self._mux_handler.physical_stream
message = physical_stream.receive_message()
if message is None:
break
# Below happens only when a data message is received.
opcode = physical_stream.get_last_received_opcode()
if opcode != common.OPCODE_BINARY:
self._mux_handler.fail_physical_connection(
_DROP_CODE_INVALID_ENCAPSULATING_MESSAGE,
'Received a text message on physical connection')
break
except ConnectionTerminatedException, e:
self._logger.debug('%s', e)
break
try:
self._mux_handler.dispatch_message(message)
except PhysicalConnectionError, e:
self._mux_handler.fail_physical_connection(
e.drop_code, e.message)
break
except LogicalChannelError, e:
self._mux_handler.fail_logical_channel(
e.channel_id, e.drop_code, e.message)
except Exception, e:
self._logger.debug(traceback.format_exc())
break
self._mux_handler.notify_reader_done()
class _Worker(threading.Thread):
"""A thread that is responsible for running the corresponding application
handler.
"""
def __init__(self, mux_handler, request):
"""Constructs an instance.
Args:
mux_handler: _MuxHandler instance.
request: _LogicalRequest instance.
"""
threading.Thread.__init__(self)
self._logger = util.get_class_logger(self)
self._mux_handler = mux_handler
self._request = request
self.setDaemon(True)
def run(self):
self._logger.debug('Logical channel worker started. (id=%d)' %
self._request.channel_id)
try:
# Non-critical exceptions will be handled by dispatcher.
self._mux_handler.dispatcher.transfer_data(self._request)
except LogicalChannelError, e:
self._mux_handler.fail_logical_channel(
e.channel_id, e.drop_code, e.message)
finally:
self._mux_handler.notify_worker_done(self._request.channel_id)
class _MuxHandshaker(hybi.Handshaker):
"""Opening handshake processor for multiplexing."""
_DUMMY_WEBSOCKET_KEY = 'dGhlIHNhbXBsZSBub25jZQ=='
def __init__(self, request, dispatcher, send_quota, receive_quota):
"""Constructs an instance.
Args:
request: _LogicalRequest instance.
dispatcher: Dispatcher instance (dispatch.Dispatcher).
send_quota: Initial send quota.
receive_quota: Initial receive quota.
"""
hybi.Handshaker.__init__(self, request, dispatcher)
self._send_quota = send_quota
self._receive_quota = receive_quota
# Append headers which should not be included in handshake field of
# AddChannelRequest.
# TODO(bashi): Make sure whether we should raise exception when
# these headers are included already.
request.headers_in[common.UPGRADE_HEADER] = (
common.WEBSOCKET_UPGRADE_TYPE)
request.headers_in[common.SEC_WEBSOCKET_VERSION_HEADER] = (
str(common.VERSION_HYBI_LATEST))
request.headers_in[common.SEC_WEBSOCKET_KEY_HEADER] = (
self._DUMMY_WEBSOCKET_KEY)
def _create_stream(self, stream_options):
"""Override hybi.Handshaker._create_stream."""
self._logger.debug('Creating logical stream for %d' %
self._request.channel_id)
return _LogicalStream(
self._request, stream_options, self._send_quota,
self._receive_quota)
def _create_handshake_response(self, accept):
"""Override hybi._create_handshake_response."""
response = []
response.append('HTTP/1.1 101 Switching Protocols\r\n')
# Upgrade and Sec-WebSocket-Accept should be excluded.
response.append('%s: %s\r\n' % (
common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE))
if self._request.ws_protocol is not None:
response.append('%s: %s\r\n' % (
common.SEC_WEBSOCKET_PROTOCOL_HEADER,
self._request.ws_protocol))
if (self._request.ws_extensions is not None and
len(self._request.ws_extensions) != 0):
response.append('%s: %s\r\n' % (
common.SEC_WEBSOCKET_EXTENSIONS_HEADER,
common.format_extensions(self._request.ws_extensions)))
response.append('\r\n')
return ''.join(response)
def _send_handshake(self, accept):
"""Override hybi.Handshaker._send_handshake."""
# Don't send handshake response for the default channel
if self._request.channel_id == _DEFAULT_CHANNEL_ID:
return
handshake_response = self._create_handshake_response(accept)
frame_data = _create_add_channel_response(
self._request.channel_id,
handshake_response)
self._logger.debug('Sending handshake response for %d: %r' %
(self._request.channel_id, frame_data))
self._request.connection.write_control_data(frame_data)
class _LogicalChannelData(object):
"""A structure that holds information about logical channel.
"""
def __init__(self, request, worker):
self.request = request
self.worker = worker
self.drop_code = _DROP_CODE_NORMAL_CLOSURE
self.drop_message = ''
class _HandshakeDeltaBase(object):
"""A class that holds information for delta-encoded handshake."""
def __init__(self, headers):
self._headers = headers
def create_headers(self, delta=None):
"""Creates request headers for an AddChannelRequest that has
delta-encoded handshake.
Args:
delta: headers should be overridden.
"""
headers = copy.copy(self._headers)
if delta:
for key, value in delta.items():
# The spec requires that a header with an empty value is
# removed from the delta base.
if len(value) == 0 and headers.has_key(key):
del headers[key]
else:
headers[key] = value
return headers
class _MuxHandler(object):
"""Multiplexing handler. When a handler starts, it launches three
threads; the reader thread, the writer thread, and a worker thread.
The reader thread reads data from the physical stream, i.e., the
ws_stream object of the underlying websocket connection. The reader
thread interprets multiplexed frames and dispatches them to logical
channels. Methods of this class are mostly called by the reader thread.
The writer thread sends multiplexed frames which are created by
logical channels via the physical connection.
The worker thread launched at the starting point handles the
"Implicitly Opened Connection". If multiplexing handler receives
an AddChannelRequest and accepts it, the handler will launch a new worker
thread and dispatch the request to it.
"""
def __init__(self, request, dispatcher):
"""Constructs an instance.
Args:
request: mod_python request of the physical connection.
dispatcher: Dispatcher instance (dispatch.Dispatcher).
"""
self.original_request = request
self.dispatcher = dispatcher
self.physical_connection = request.connection
self.physical_stream = request.ws_stream
self._logger = util.get_class_logger(self)
self._logical_channels = {}
self._logical_channels_condition = threading.Condition()
# Holds client's initial quota
self._channel_slots = collections.deque()
self._handshake_base = None
self._worker_done_notify_received = False
self._reader = None
self._writer = None
def start(self):
"""Starts the handler.
Raises:
MuxUnexpectedException: when the handler already started, or when
opening handshake of the default channel fails.
"""
if self._reader or self._writer:
raise MuxUnexpectedException('MuxHandler already started')
self._reader = _PhysicalConnectionReader(self)
self._writer = _PhysicalConnectionWriter(self)
self._reader.start()
self._writer.start()
# Create "Implicitly Opened Connection".
logical_connection = _LogicalConnection(self, _DEFAULT_CHANNEL_ID)
headers = copy.copy(self.original_request.headers_in)
# Add extensions for logical channel.
headers[common.SEC_WEBSOCKET_EXTENSIONS_HEADER] = (
common.format_extensions(
self.original_request.mux_processor.extensions()))
self._handshake_base = _HandshakeDeltaBase(headers)
logical_request = _LogicalRequest(
_DEFAULT_CHANNEL_ID,
self.original_request.method,
self.original_request.uri,
self.original_request.protocol,
self._handshake_base.create_headers(),
logical_connection)
# Client's send quota for the implicitly opened connection is zero,
# but we will send FlowControl later so set the initial quota to
# _INITIAL_QUOTA_FOR_CLIENT.
self._channel_slots.append(_INITIAL_QUOTA_FOR_CLIENT)
send_quota = self.original_request.mux_processor.quota()
if not self._do_handshake_for_logical_request(
logical_request, send_quota=send_quota):
raise MuxUnexpectedException(
'Failed handshake on the default channel id')
self._add_logical_channel(logical_request)
# Send FlowControl for the implicitly opened connection.
frame_data = _create_flow_control(_DEFAULT_CHANNEL_ID,
_INITIAL_QUOTA_FOR_CLIENT)
logical_request.connection.write_control_data(frame_data)
def add_channel_slots(self, slots, send_quota):
"""Adds channel slots.
Args:
slots: number of slots to be added.
send_quota: initial send quota for slots.
"""
self._channel_slots.extend([send_quota] * slots)
# Send NewChannelSlot to client.
frame_data = _create_new_channel_slot(slots, send_quota)
self.send_control_data(frame_data)
def wait_until_done(self, timeout=None):
"""Waits until all workers are done. Returns False when timeout has
occurred. Returns True on success.
Args:
timeout: timeout in sec.
"""
self._logical_channels_condition.acquire()
try:
while len(self._logical_channels) > 0:
self._logger.debug('Waiting workers(%d)...' %
len(self._logical_channels))
self._worker_done_notify_received = False
self._logical_channels_condition.wait(timeout)
if not self._worker_done_notify_received:
self._logger.debug('Waiting worker(s) timed out')
return False
finally:
self._logical_channels_condition.release()
# Flush pending outgoing data
self._writer.stop()
self._writer.join()
return True
def notify_write_data_done(self, channel_id):
"""Called by the writer thread when a write operation has done.
Args:
channel_id: objective channel id.
"""
try:
self._logical_channels_condition.acquire()
if channel_id in self._logical_channels:
channel_data = self._logical_channels[channel_id]
channel_data.request.connection.on_write_data_done()
else:
self._logger.debug('Seems that logical channel for %d has gone'
% channel_id)
finally:
self._logical_channels_condition.release()
def send_control_data(self, data):
"""Sends data via the control channel.
Args:
data: data to be sent.
"""
self._writer.put_outgoing_data(_OutgoingData(
channel_id=_CONTROL_CHANNEL_ID, data=data))
def send_data(self, channel_id, data):
"""Sends data via given logical channel. This method is called by
worker threads.
Args:
channel_id: objective channel id.
data: data to be sent.
"""
self._writer.put_outgoing_data(_OutgoingData(
channel_id=channel_id, data=data))
def _send_drop_channel(self, channel_id, code=None, message=''):
frame_data = _create_drop_channel(channel_id, code, message)
self._logger.debug(
'Sending drop channel for channel id %d' % channel_id)
self.send_control_data(frame_data)
def _send_error_add_channel_response(self, channel_id, status=None):
if status is None:
status = common.HTTP_STATUS_BAD_REQUEST
if status in _HTTP_BAD_RESPONSE_MESSAGES:
message = _HTTP_BAD_RESPONSE_MESSAGES[status]
else:
self._logger.debug('Response message for %d is not found' % status)
message = '???'
response = 'HTTP/1.1 %d %s\r\n\r\n' % (status, message)
frame_data = _create_add_channel_response(channel_id,
encoded_handshake=response,
encoding=0, rejected=True)
self.send_control_data(frame_data)
def _create_logical_request(self, block):
if block.channel_id == _CONTROL_CHANNEL_ID:
# TODO(bashi): Raise PhysicalConnectionError with code 2006
# instead of MuxUnexpectedException.
raise MuxUnexpectedException(
'Received the control channel id (0) as objective channel '
'id for AddChannel')
if block.encoding > _HANDSHAKE_ENCODING_DELTA:
raise PhysicalConnectionError(
_DROP_CODE_UNKNOWN_REQUEST_ENCODING)
method, path, version, headers = _parse_request_text(
block.encoded_handshake)
if block.encoding == _HANDSHAKE_ENCODING_DELTA:
headers = self._handshake_base.create_headers(headers)
connection = _LogicalConnection(self, block.channel_id)
request = _LogicalRequest(block.channel_id, method, path, version,
headers, connection)
return request
def _do_handshake_for_logical_request(self, request, send_quota=0):
try:
receive_quota = self._channel_slots.popleft()
except IndexError:
raise LogicalChannelError(
request.channel_id, _DROP_CODE_NEW_CHANNEL_SLOT_VIOLATION)
handshaker = _MuxHandshaker(request, self.dispatcher,
send_quota, receive_quota)
try:
handshaker.do_handshake()
except handshake.VersionException, e:
self._logger.info('%s', e)
self._send_error_add_channel_response(
request.channel_id, status=common.HTTP_STATUS_BAD_REQUEST)
return False
except handshake.HandshakeException, e:
# TODO(bashi): Should we _Fail the Logical Channel_ with 3001
# instead?
self._logger.info('%s', e)
self._send_error_add_channel_response(request.channel_id,
status=e.status)
return False
except handshake.AbortedByUserException, e:
self._logger.info('%s', e)
self._send_error_add_channel_response(request.channel_id)
return False
return True
def _add_logical_channel(self, logical_request):
try:
self._logical_channels_condition.acquire()
if logical_request.channel_id in self._logical_channels:
self._logger.debug('Channel id %d already exists' %
logical_request.channel_id)
raise PhysicalConnectionError(
_DROP_CODE_CHANNEL_ALREADY_EXISTS,
'Channel id %d already exists' %
logical_request.channel_id)
worker = _Worker(self, logical_request)
channel_data = _LogicalChannelData(logical_request, worker)
self._logical_channels[logical_request.channel_id] = channel_data
worker.start()
finally:
self._logical_channels_condition.release()
def _process_add_channel_request(self, block):
try:
logical_request = self._create_logical_request(block)
except ValueError, e:
self._logger.debug('Failed to create logical request: %r' % e)
self._send_error_add_channel_response(
block.channel_id, status=common.HTTP_STATUS_BAD_REQUEST)
return
if self._do_handshake_for_logical_request(logical_request):
if block.encoding == _HANDSHAKE_ENCODING_IDENTITY:
# Update handshake base.
# TODO(bashi): Make sure this is the right place to update
# handshake base.
self._handshake_base = _HandshakeDeltaBase(
logical_request.headers_in)
self._add_logical_channel(logical_request)
else:
self._send_error_add_channel_response(
block.channel_id, status=common.HTTP_STATUS_BAD_REQUEST)
def _process_flow_control(self, block):
try:
self._logical_channels_condition.acquire()
if not block.channel_id in self._logical_channels:
return
channel_data = self._logical_channels[block.channel_id]
channel_data.request.ws_stream.replenish_send_quota(
block.send_quota)
finally:
self._logical_channels_condition.release()
def _process_drop_channel(self, block):
self._logger.debug(
'DropChannel received for %d: code=%r, reason=%r' %
(block.channel_id, block.drop_code, block.drop_message))
try:
self._logical_channels_condition.acquire()
if not block.channel_id in self._logical_channels:
return
channel_data = self._logical_channels[block.channel_id]
channel_data.drop_code = _DROP_CODE_ACKNOWLEDGED
# Close the logical channel
channel_data.request.connection.set_read_state(
_LogicalConnection.STATE_TERMINATED)
channel_data.request.ws_stream.stop_sending()
finally:
self._logical_channels_condition.release()
def _process_control_blocks(self, parser):
for control_block in parser.read_control_blocks():
opcode = control_block.opcode
self._logger.debug('control block received, opcode: %d' % opcode)
if opcode == _MUX_OPCODE_ADD_CHANNEL_REQUEST:
self._process_add_channel_request(control_block)
elif opcode == _MUX_OPCODE_ADD_CHANNEL_RESPONSE:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Received AddChannelResponse')
elif opcode == _MUX_OPCODE_FLOW_CONTROL:
self._process_flow_control(control_block)
elif opcode == _MUX_OPCODE_DROP_CHANNEL:
self._process_drop_channel(control_block)
elif opcode == _MUX_OPCODE_NEW_CHANNEL_SLOT:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Received NewChannelSlot')
else:
raise MuxUnexpectedException(
'Unexpected opcode %r' % opcode)
def _process_logical_frame(self, channel_id, parser):
self._logger.debug('Received a frame. channel id=%d' % channel_id)
try:
self._logical_channels_condition.acquire()
if not channel_id in self._logical_channels:
# We must ignore the message for an inactive channel.
return
channel_data = self._logical_channels[channel_id]
fin, rsv1, rsv2, rsv3, opcode, payload = parser.read_inner_frame()
consuming_byte = len(payload)
if opcode != common.OPCODE_CONTINUATION:
consuming_byte += 1
if not channel_data.request.ws_stream.consume_receive_quota(
consuming_byte):
# The client violates quota. Close logical channel.
raise LogicalChannelError(
channel_id, _DROP_CODE_SEND_QUOTA_VIOLATION)
header = create_header(opcode, len(payload), fin, rsv1, rsv2, rsv3,
mask=False)
frame_data = header + payload
channel_data.request.connection.append_frame_data(frame_data)
finally:
self._logical_channels_condition.release()
def dispatch_message(self, message):
"""Dispatches message. The reader thread calls this method.
Args:
message: a message that contains encapsulated frame.
Raises:
PhysicalConnectionError: if the message contains physical
connection level errors.
LogicalChannelError: if the message contains logical channel
level errors.
"""
parser = _MuxFramePayloadParser(message)
try:
channel_id = parser.read_channel_id()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_CHANNEL_ID_TRUNCATED)
if channel_id == _CONTROL_CHANNEL_ID:
self._process_control_blocks(parser)
else:
self._process_logical_frame(channel_id, parser)
def notify_worker_done(self, channel_id):
"""Called when a worker has finished.
Args:
channel_id: channel id corresponded with the worker.
"""
self._logger.debug('Worker for channel id %d terminated' % channel_id)
try:
self._logical_channels_condition.acquire()
if not channel_id in self._logical_channels:
raise MuxUnexpectedException(
'Channel id %d not found' % channel_id)
channel_data = self._logical_channels.pop(channel_id)
finally:
self._worker_done_notify_received = True
self._logical_channels_condition.notify()
self._logical_channels_condition.release()
if not channel_data.request.server_terminated:
self._send_drop_channel(
channel_id, code=channel_data.drop_code,
message=channel_data.drop_message)
def notify_reader_done(self):
"""This method is called by the reader thread when the reader has
finished.
"""
self._logger.debug(
'Termiating all logical connections waiting for incoming data '
'...')
self._logical_channels_condition.acquire()
for channel_data in self._logical_channels.values():
try:
channel_data.request.connection.set_read_state(
_LogicalConnection.STATE_TERMINATED)
except Exception:
self._logger.debug(traceback.format_exc())
self._logical_channels_condition.release()
def notify_writer_done(self):
"""This method is called by the writer thread when the writer has
finished.
"""
self._logger.debug(
'Termiating all logical connections waiting for write '
'completion ...')
self._logical_channels_condition.acquire()
for channel_data in self._logical_channels.values():
try:
channel_data.request.connection.on_writer_done()
except Exception:
self._logger.debug(traceback.format_exc())
self._logical_channels_condition.release()
def fail_physical_connection(self, code, message):
"""Fail the physical connection.
Args:
code: drop reason code.
message: drop message.
"""
self._logger.debug('Failing the physical connection...')
self._send_drop_channel(_CONTROL_CHANNEL_ID, code, message)
self._writer.stop(common.STATUS_INTERNAL_ENDPOINT_ERROR)
def fail_logical_channel(self, channel_id, code, message):
"""Fail a logical channel.
Args:
channel_id: channel id.
code: drop reason code.
message: drop message.
"""
self._logger.debug('Failing logical channel %d...' % channel_id)
try:
self._logical_channels_condition.acquire()
if channel_id in self._logical_channels:
channel_data = self._logical_channels[channel_id]
# Close the logical channel. notify_worker_done() will be
# called later and it will send DropChannel.
channel_data.drop_code = code
channel_data.drop_message = message
channel_data.request.connection.set_read_state(
_LogicalConnection.STATE_TERMINATED)
channel_data.request.ws_stream.stop_sending()
else:
self._send_drop_channel(channel_id, code, message)
finally:
self._logical_channels_condition.release()
def use_mux(request):
return hasattr(request, 'mux_processor') and (
request.mux_processor.is_active())
def start(request, dispatcher):
mux_handler = _MuxHandler(request, dispatcher)
mux_handler.start()
mux_handler.add_channel_slots(_INITIAL_NUMBER_OF_CHANNEL_SLOTS,
_INITIAL_QUOTA_FOR_CLIENT)
mux_handler.wait_until_done()
# vi:sts=4 sw=4 et
|
bsd-3-clause
|
pavelchristof/gomoku-ai
|
tensorflow/contrib/slim/python/slim/nets/overfeat.py
|
164
|
5562
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the model definition for the OverFeat network.
The definition for the network was obtained from:
OverFeat: Integrated Recognition, Localization and Detection using
Convolutional Networks
Pierre Sermanet, David Eigen, Xiang Zhang, Michael Mathieu, Rob Fergus and
Yann LeCun, 2014
http://arxiv.org/abs/1312.6229
Usage:
with slim.arg_scope(overfeat.overfeat_arg_scope()):
outputs, end_points = overfeat.overfeat(inputs)
@@overfeat
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers.python.layers import layers as layers_lib
from tensorflow.contrib.layers.python.layers import regularizers
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
trunc_normal = lambda stddev: init_ops.truncated_normal_initializer(0.0, stddev)
def overfeat_arg_scope(weight_decay=0.0005):
with arg_scope(
[layers.conv2d, layers_lib.fully_connected],
activation_fn=nn_ops.relu,
weights_regularizer=regularizers.l2_regularizer(weight_decay),
biases_initializer=init_ops.zeros_initializer()):
with arg_scope([layers.conv2d], padding='SAME'):
with arg_scope([layers_lib.max_pool2d], padding='VALID') as arg_sc:
return arg_sc
def overfeat(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='overfeat'):
"""Contains the model definition for the OverFeat network.
The definition for the network was obtained from:
OverFeat: Integrated Recognition, Localization and Detection using
Convolutional Networks
Pierre Sermanet, David Eigen, Xiang Zhang, Michael Mathieu, Rob Fergus and
Yann LeCun, 2014
http://arxiv.org/abs/1312.6229
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 231x231. To use in fully
convolutional mode, set spatial_squeeze to false.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
Returns:
the last op containing the log predictions and end_points dict.
"""
with variable_scope.variable_scope(scope, 'overfeat', [inputs]) as sc:
end_points_collection = sc.name + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d
with arg_scope(
[layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d],
outputs_collections=end_points_collection):
net = layers.conv2d(
inputs, 64, [11, 11], 4, padding='VALID', scope='conv1')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool1')
net = layers.conv2d(net, 256, [5, 5], padding='VALID', scope='conv2')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool2')
net = layers.conv2d(net, 512, [3, 3], scope='conv3')
net = layers.conv2d(net, 1024, [3, 3], scope='conv4')
net = layers.conv2d(net, 1024, [3, 3], scope='conv5')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool5')
with arg_scope(
[layers.conv2d],
weights_initializer=trunc_normal(0.005),
biases_initializer=init_ops.constant_initializer(0.1)):
# Use conv2d instead of fully_connected layers.
net = layers.conv2d(net, 3072, [6, 6], padding='VALID', scope='fc6')
net = layers_lib.dropout(
net, dropout_keep_prob, is_training=is_training, scope='dropout6')
net = layers.conv2d(net, 4096, [1, 1], scope='fc7')
net = layers_lib.dropout(
net, dropout_keep_prob, is_training=is_training, scope='dropout7')
net = layers.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
biases_initializer=init_ops.zeros_initializer(),
scope='fc8')
# Convert end_points_collection into a end_point dict.
end_points = utils.convert_collection_to_dict(end_points_collection)
if spatial_squeeze:
net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc.name + '/fc8'] = net
return net, end_points
|
apache-2.0
|
o3project/odenos
|
src/test/python/org/o3project/odenos/__init__.py
|
233
|
1026
|
# -*- coding:utf-8 -*-
# Copyright 2015 NEC Corporation. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
|
apache-2.0
|
aferr/TimingCompartments
|
src/arch/x86/bios/E820.py
|
19
|
2615
|
# Copyright (c) 2008 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
from m5.params import *
from m5.SimObject import SimObject
class X86E820Entry(SimObject):
type = 'X86E820Entry'
cxx_class = 'X86ISA::E820Entry'
addr = Param.Addr(0, 'address of the beginning of the region')
size = Param.MemorySize('0B', 'size of the region')
range_type = Param.UInt64('type of the region')
class X86E820Table(SimObject):
type = 'X86E820Table'
cxx_class = 'X86ISA::E820Table'
entries = VectorParam.X86E820Entry('entries for the e820 table')
|
bsd-3-clause
|
scipy/scipy
|
scipy/integrate/quadpack.py
|
12
|
37343
|
# Author: Travis Oliphant 2001
# Author: Nathan Woods 2013 (nquad &c)
import sys
import warnings
from functools import partial
from . import _quadpack
import numpy
from numpy import Inf
__all__ = ['quad', 'dblquad', 'tplquad', 'nquad', 'quad_explain',
'IntegrationWarning']
error = _quadpack.error
class IntegrationWarning(UserWarning):
"""
Warning on issues during integration.
"""
pass
def quad_explain(output=sys.stdout):
"""
Print extra information about integrate.quad() parameters and returns.
Parameters
----------
output : instance with "write" method, optional
Information about `quad` is passed to ``output.write()``.
Default is ``sys.stdout``.
Returns
-------
None
Examples
--------
We can show detailed information of the `integrate.quad` function in stdout:
>>> from scipy.integrate import quad_explain
>>> quad_explain()
"""
output.write(quad.__doc__)
def quad(func, a, b, args=(), full_output=0, epsabs=1.49e-8, epsrel=1.49e-8,
limit=50, points=None, weight=None, wvar=None, wopts=None, maxp1=50,
limlst=50):
"""
Compute a definite integral.
Integrate func from `a` to `b` (possibly infinite interval) using a
technique from the Fortran library QUADPACK.
Parameters
----------
func : {function, scipy.LowLevelCallable}
A Python function or method to integrate. If `func` takes many
arguments, it is integrated along the axis corresponding to the
first argument.
If the user desires improved integration performance, then `f` may
be a `scipy.LowLevelCallable` with one of the signatures::
double func(double x)
double func(double x, void *user_data)
double func(int n, double *xx)
double func(int n, double *xx, void *user_data)
The ``user_data`` is the data contained in the `scipy.LowLevelCallable`.
In the call forms with ``xx``, ``n`` is the length of the ``xx``
array which contains ``xx[0] == x`` and the rest of the items are
numbers contained in the ``args`` argument of quad.
In addition, certain ctypes call signatures are supported for
backward compatibility, but those should not be used in new code.
a : float
Lower limit of integration (use -numpy.inf for -infinity).
b : float
Upper limit of integration (use numpy.inf for +infinity).
args : tuple, optional
Extra arguments to pass to `func`.
full_output : int, optional
Non-zero to return a dictionary of integration information.
If non-zero, warning messages are also suppressed and the
message is appended to the output tuple.
Returns
-------
y : float
The integral of func from `a` to `b`.
abserr : float
An estimate of the absolute error in the result.
infodict : dict
A dictionary containing additional information.
Run scipy.integrate.quad_explain() for more information.
message
A convergence message.
explain
Appended only with 'cos' or 'sin' weighting and infinite
integration limits, it contains an explanation of the codes in
infodict['ierlst']
Other Parameters
----------------
epsabs : float or int, optional
Absolute error tolerance. Default is 1.49e-8. `quad` tries to obtain
an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))``
where ``i`` = integral of `func` from `a` to `b`, and ``result`` is the
numerical approximation. See `epsrel` below.
epsrel : float or int, optional
Relative error tolerance. Default is 1.49e-8.
If ``epsabs <= 0``, `epsrel` must be greater than both 5e-29
and ``50 * (machine epsilon)``. See `epsabs` above.
limit : float or int, optional
An upper bound on the number of subintervals used in the adaptive
algorithm.
points : (sequence of floats,ints), optional
A sequence of break points in the bounded integration interval
where local difficulties of the integrand may occur (e.g.,
singularities, discontinuities). The sequence does not have
to be sorted. Note that this option cannot be used in conjunction
with ``weight``.
weight : float or int, optional
String indicating weighting function. Full explanation for this
and the remaining arguments can be found below.
wvar : optional
Variables for use with weighting functions.
wopts : optional
Optional input for reusing Chebyshev moments.
maxp1 : float or int, optional
An upper bound on the number of Chebyshev moments.
limlst : int, optional
Upper bound on the number of cycles (>=3) for use with a sinusoidal
weighting and an infinite end-point.
See Also
--------
dblquad : double integral
tplquad : triple integral
nquad : n-dimensional integrals (uses `quad` recursively)
fixed_quad : fixed-order Gaussian quadrature
quadrature : adaptive Gaussian quadrature
odeint : ODE integrator
ode : ODE integrator
simpson : integrator for sampled data
romb : integrator for sampled data
scipy.special : for coefficients and roots of orthogonal polynomials
Notes
-----
**Extra information for quad() inputs and outputs**
If full_output is non-zero, then the third output argument
(infodict) is a dictionary with entries as tabulated below. For
infinite limits, the range is transformed to (0,1) and the
optional outputs are given with respect to this transformed range.
Let M be the input argument limit and let K be infodict['last'].
The entries are:
'neval'
The number of function evaluations.
'last'
The number, K, of subintervals produced in the subdivision process.
'alist'
A rank-1 array of length M, the first K elements of which are the
left end points of the subintervals in the partition of the
integration range.
'blist'
A rank-1 array of length M, the first K elements of which are the
right end points of the subintervals.
'rlist'
A rank-1 array of length M, the first K elements of which are the
integral approximations on the subintervals.
'elist'
A rank-1 array of length M, the first K elements of which are the
moduli of the absolute error estimates on the subintervals.
'iord'
A rank-1 integer array of length M, the first L elements of
which are pointers to the error estimates over the subintervals
with ``L=K`` if ``K<=M/2+2`` or ``L=M+1-K`` otherwise. Let I be the
sequence ``infodict['iord']`` and let E be the sequence
``infodict['elist']``. Then ``E[I[1]], ..., E[I[L]]`` forms a
decreasing sequence.
If the input argument points is provided (i.e., it is not None),
the following additional outputs are placed in the output
dictionary. Assume the points sequence is of length P.
'pts'
A rank-1 array of length P+2 containing the integration limits
and the break points of the intervals in ascending order.
This is an array giving the subintervals over which integration
will occur.
'level'
A rank-1 integer array of length M (=limit), containing the
subdivision levels of the subintervals, i.e., if (aa,bb) is a
subinterval of ``(pts[1], pts[2])`` where ``pts[0]`` and ``pts[2]``
are adjacent elements of ``infodict['pts']``, then (aa,bb) has level l
if ``|bb-aa| = |pts[2]-pts[1]| * 2**(-l)``.
'ndin'
A rank-1 integer array of length P+2. After the first integration
over the intervals (pts[1], pts[2]), the error estimates over some
of the intervals may have been increased artificially in order to
put their subdivision forward. This array has ones in slots
corresponding to the subintervals for which this happens.
**Weighting the integrand**
The input variables, *weight* and *wvar*, are used to weight the
integrand by a select list of functions. Different integration
methods are used to compute the integral with these weighting
functions, and these do not support specifying break points. The
possible values of weight and the corresponding weighting functions are.
========== =================================== =====================
``weight`` Weight function used ``wvar``
========== =================================== =====================
'cos' cos(w*x) wvar = w
'sin' sin(w*x) wvar = w
'alg' g(x) = ((x-a)**alpha)*((b-x)**beta) wvar = (alpha, beta)
'alg-loga' g(x)*log(x-a) wvar = (alpha, beta)
'alg-logb' g(x)*log(b-x) wvar = (alpha, beta)
'alg-log' g(x)*log(x-a)*log(b-x) wvar = (alpha, beta)
'cauchy' 1/(x-c) wvar = c
========== =================================== =====================
wvar holds the parameter w, (alpha, beta), or c depending on the weight
selected. In these expressions, a and b are the integration limits.
For the 'cos' and 'sin' weighting, additional inputs and outputs are
available.
For finite integration limits, the integration is performed using a
Clenshaw-Curtis method which uses Chebyshev moments. For repeated
calculations, these moments are saved in the output dictionary:
'momcom'
The maximum level of Chebyshev moments that have been computed,
i.e., if ``M_c`` is ``infodict['momcom']`` then the moments have been
computed for intervals of length ``|b-a| * 2**(-l)``,
``l=0,1,...,M_c``.
'nnlog'
A rank-1 integer array of length M(=limit), containing the
subdivision levels of the subintervals, i.e., an element of this
array is equal to l if the corresponding subinterval is
``|b-a|* 2**(-l)``.
'chebmo'
A rank-2 array of shape (25, maxp1) containing the computed
Chebyshev moments. These can be passed on to an integration
over the same interval by passing this array as the second
element of the sequence wopts and passing infodict['momcom'] as
the first element.
If one of the integration limits is infinite, then a Fourier integral is
computed (assuming w neq 0). If full_output is 1 and a numerical error
is encountered, besides the error message attached to the output tuple,
a dictionary is also appended to the output tuple which translates the
error codes in the array ``info['ierlst']`` to English messages. The
output information dictionary contains the following entries instead of
'last', 'alist', 'blist', 'rlist', and 'elist':
'lst'
The number of subintervals needed for the integration (call it ``K_f``).
'rslst'
A rank-1 array of length M_f=limlst, whose first ``K_f`` elements
contain the integral contribution over the interval
``(a+(k-1)c, a+kc)`` where ``c = (2*floor(|w|) + 1) * pi / |w|``
and ``k=1,2,...,K_f``.
'erlst'
A rank-1 array of length ``M_f`` containing the error estimate
corresponding to the interval in the same position in
``infodict['rslist']``.
'ierlst'
A rank-1 integer array of length ``M_f`` containing an error flag
corresponding to the interval in the same position in
``infodict['rslist']``. See the explanation dictionary (last entry
in the output tuple) for the meaning of the codes.
Examples
--------
Calculate :math:`\\int^4_0 x^2 dx` and compare with an analytic result
>>> from scipy import integrate
>>> x2 = lambda x: x**2
>>> integrate.quad(x2, 0, 4)
(21.333333333333332, 2.3684757858670003e-13)
>>> print(4**3 / 3.) # analytical result
21.3333333333
Calculate :math:`\\int^\\infty_0 e^{-x} dx`
>>> invexp = lambda x: np.exp(-x)
>>> integrate.quad(invexp, 0, np.inf)
(1.0, 5.842605999138044e-11)
>>> f = lambda x,a : a*x
>>> y, err = integrate.quad(f, 0, 1, args=(1,))
>>> y
0.5
>>> y, err = integrate.quad(f, 0, 1, args=(3,))
>>> y
1.5
Calculate :math:`\\int^1_0 x^2 + y^2 dx` with ctypes, holding
y parameter as 1::
testlib.c =>
double func(int n, double args[n]){
return args[0]*args[0] + args[1]*args[1];}
compile to library testlib.*
::
from scipy import integrate
import ctypes
lib = ctypes.CDLL('/home/.../testlib.*') #use absolute path
lib.func.restype = ctypes.c_double
lib.func.argtypes = (ctypes.c_int,ctypes.c_double)
integrate.quad(lib.func,0,1,(1))
#(1.3333333333333333, 1.4802973661668752e-14)
print((1.0**3/3.0 + 1.0) - (0.0**3/3.0 + 0.0)) #Analytic result
# 1.3333333333333333
Be aware that pulse shapes and other sharp features as compared to the
size of the integration interval may not be integrated correctly using
this method. A simplified example of this limitation is integrating a
y-axis reflected step function with many zero values within the integrals
bounds.
>>> y = lambda x: 1 if x<=0 else 0
>>> integrate.quad(y, -1, 1)
(1.0, 1.1102230246251565e-14)
>>> integrate.quad(y, -1, 100)
(1.0000000002199108, 1.0189464580163188e-08)
>>> integrate.quad(y, -1, 10000)
(0.0, 0.0)
"""
if not isinstance(args, tuple):
args = (args,)
# check the limits of integration: \int_a^b, expect a < b
flip, a, b = b < a, min(a, b), max(a, b)
if weight is None:
retval = _quad(func, a, b, args, full_output, epsabs, epsrel, limit,
points)
else:
if points is not None:
msg = ("Break points cannot be specified when using weighted integrand.\n"
"Continuing, ignoring specified points.")
warnings.warn(msg, IntegrationWarning, stacklevel=2)
retval = _quad_weight(func, a, b, args, full_output, epsabs, epsrel,
limlst, limit, maxp1, weight, wvar, wopts)
if flip:
retval = (-retval[0],) + retval[1:]
ier = retval[-1]
if ier == 0:
return retval[:-1]
msgs = {80: "A Python error occurred possibly while calling the function.",
1: "The maximum number of subdivisions (%d) has been achieved.\n If increasing the limit yields no improvement it is advised to analyze \n the integrand in order to determine the difficulties. If the position of a \n local difficulty can be determined (singularity, discontinuity) one will \n probably gain from splitting up the interval and calling the integrator \n on the subranges. Perhaps a special-purpose integrator should be used." % limit,
2: "The occurrence of roundoff error is detected, which prevents \n the requested tolerance from being achieved. The error may be \n underestimated.",
3: "Extremely bad integrand behavior occurs at some points of the\n integration interval.",
4: "The algorithm does not converge. Roundoff error is detected\n in the extrapolation table. It is assumed that the requested tolerance\n cannot be achieved, and that the returned result (if full_output = 1) is \n the best which can be obtained.",
5: "The integral is probably divergent, or slowly convergent.",
6: "The input is invalid.",
7: "Abnormal termination of the routine. The estimates for result\n and error are less reliable. It is assumed that the requested accuracy\n has not been achieved.",
'unknown': "Unknown error."}
if weight in ['cos','sin'] and (b == Inf or a == -Inf):
msgs[1] = "The maximum number of cycles allowed has been achieved., e.e.\n of subintervals (a+(k-1)c, a+kc) where c = (2*int(abs(omega)+1))\n *pi/abs(omega), for k = 1, 2, ..., lst. One can allow more cycles by increasing the value of limlst. Look at info['ierlst'] with full_output=1."
msgs[4] = "The extrapolation table constructed for convergence acceleration\n of the series formed by the integral contributions over the cycles, \n does not converge to within the requested accuracy. Look at \n info['ierlst'] with full_output=1."
msgs[7] = "Bad integrand behavior occurs within one or more of the cycles.\n Location and type of the difficulty involved can be determined from \n the vector info['ierlist'] obtained with full_output=1."
explain = {1: "The maximum number of subdivisions (= limit) has been \n achieved on this cycle.",
2: "The occurrence of roundoff error is detected and prevents\n the tolerance imposed on this cycle from being achieved.",
3: "Extremely bad integrand behavior occurs at some points of\n this cycle.",
4: "The integral over this cycle does not converge (to within the required accuracy) due to roundoff in the extrapolation procedure invoked on this cycle. It is assumed that the result on this interval is the best which can be obtained.",
5: "The integral over this cycle is probably divergent or slowly convergent."}
try:
msg = msgs[ier]
except KeyError:
msg = msgs['unknown']
if ier in [1,2,3,4,5,7]:
if full_output:
if weight in ['cos', 'sin'] and (b == Inf or a == -Inf):
return retval[:-1] + (msg, explain)
else:
return retval[:-1] + (msg,)
else:
warnings.warn(msg, IntegrationWarning, stacklevel=2)
return retval[:-1]
elif ier == 6: # Forensic decision tree when QUADPACK throws ier=6
if epsabs <= 0: # Small error tolerance - applies to all methods
if epsrel < max(50 * sys.float_info.epsilon, 5e-29):
msg = ("If 'epsabs'<=0, 'epsrel' must be greater than both"
" 5e-29 and 50*(machine epsilon).")
elif weight in ['sin', 'cos'] and (abs(a) + abs(b) == Inf):
msg = ("Sine or cosine weighted intergals with infinite domain"
" must have 'epsabs'>0.")
elif weight is None:
if points is None: # QAGSE/QAGIE
msg = ("Invalid 'limit' argument. There must be"
" at least one subinterval")
else: # QAGPE
if not (min(a, b) <= min(points) <= max(points) <= max(a, b)):
msg = ("All break points in 'points' must lie within the"
" integration limits.")
elif len(points) >= limit:
msg = ("Number of break points ({:d})"
" must be less than subinterval"
" limit ({:d})").format(len(points), limit)
else:
if maxp1 < 1:
msg = "Chebyshev moment limit maxp1 must be >=1."
elif weight in ('cos', 'sin') and abs(a+b) == Inf: # QAWFE
msg = "Cycle limit limlst must be >=3."
elif weight.startswith('alg'): # QAWSE
if min(wvar) < -1:
msg = "wvar parameters (alpha, beta) must both be >= -1."
if b < a:
msg = "Integration limits a, b must satistfy a<b."
elif weight == 'cauchy' and wvar in (a, b):
msg = ("Parameter 'wvar' must not equal"
" integration limits 'a' or 'b'.")
raise ValueError(msg)
def _quad(func,a,b,args,full_output,epsabs,epsrel,limit,points):
infbounds = 0
if (b != Inf and a != -Inf):
pass # standard integration
elif (b == Inf and a != -Inf):
infbounds = 1
bound = a
elif (b == Inf and a == -Inf):
infbounds = 2
bound = 0 # ignored
elif (b != Inf and a == -Inf):
infbounds = -1
bound = b
else:
raise RuntimeError("Infinity comparisons don't work for you.")
if points is None:
if infbounds == 0:
return _quadpack._qagse(func,a,b,args,full_output,epsabs,epsrel,limit)
else:
return _quadpack._qagie(func,bound,infbounds,args,full_output,epsabs,epsrel,limit)
else:
if infbounds != 0:
raise ValueError("Infinity inputs cannot be used with break points.")
else:
#Duplicates force function evaluation at singular points
the_points = numpy.unique(points)
the_points = the_points[a < the_points]
the_points = the_points[the_points < b]
the_points = numpy.concatenate((the_points, (0., 0.)))
return _quadpack._qagpe(func,a,b,the_points,args,full_output,epsabs,epsrel,limit)
def _quad_weight(func,a,b,args,full_output,epsabs,epsrel,limlst,limit,maxp1,weight,wvar,wopts):
if weight not in ['cos','sin','alg','alg-loga','alg-logb','alg-log','cauchy']:
raise ValueError("%s not a recognized weighting function." % weight)
strdict = {'cos':1,'sin':2,'alg':1,'alg-loga':2,'alg-logb':3,'alg-log':4}
if weight in ['cos','sin']:
integr = strdict[weight]
if (b != Inf and a != -Inf): # finite limits
if wopts is None: # no precomputed Chebyshev moments
return _quadpack._qawoe(func, a, b, wvar, integr, args, full_output,
epsabs, epsrel, limit, maxp1,1)
else: # precomputed Chebyshev moments
momcom = wopts[0]
chebcom = wopts[1]
return _quadpack._qawoe(func, a, b, wvar, integr, args, full_output,
epsabs, epsrel, limit, maxp1, 2, momcom, chebcom)
elif (b == Inf and a != -Inf):
return _quadpack._qawfe(func, a, wvar, integr, args, full_output,
epsabs,limlst,limit,maxp1)
elif (b != Inf and a == -Inf): # remap function and interval
if weight == 'cos':
def thefunc(x,*myargs):
y = -x
func = myargs[0]
myargs = (y,) + myargs[1:]
return func(*myargs)
else:
def thefunc(x,*myargs):
y = -x
func = myargs[0]
myargs = (y,) + myargs[1:]
return -func(*myargs)
args = (func,) + args
return _quadpack._qawfe(thefunc, -b, wvar, integr, args,
full_output, epsabs, limlst, limit, maxp1)
else:
raise ValueError("Cannot integrate with this weight from -Inf to +Inf.")
else:
if a in [-Inf,Inf] or b in [-Inf,Inf]:
raise ValueError("Cannot integrate with this weight over an infinite interval.")
if weight.startswith('alg'):
integr = strdict[weight]
return _quadpack._qawse(func, a, b, wvar, integr, args,
full_output, epsabs, epsrel, limit)
else: # weight == 'cauchy'
return _quadpack._qawce(func, a, b, wvar, args, full_output,
epsabs, epsrel, limit)
def dblquad(func, a, b, gfun, hfun, args=(), epsabs=1.49e-8, epsrel=1.49e-8):
"""
Compute a double integral.
Return the double (definite) integral of ``func(y, x)`` from ``x = a..b``
and ``y = gfun(x)..hfun(x)``.
Parameters
----------
func : callable
A Python function or method of at least two variables: y must be the
first argument and x the second argument.
a, b : float
The limits of integration in x: `a` < `b`
gfun : callable or float
The lower boundary curve in y which is a function taking a single
floating point argument (x) and returning a floating point result
or a float indicating a constant boundary curve.
hfun : callable or float
The upper boundary curve in y (same requirements as `gfun`).
args : sequence, optional
Extra arguments to pass to `func`.
epsabs : float, optional
Absolute tolerance passed directly to the inner 1-D quadrature
integration. Default is 1.49e-8. `dblquad`` tries to obtain
an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))``
where ``i`` = inner integral of ``func(y, x)`` from ``gfun(x)``
to ``hfun(x)``, and ``result`` is the numerical approximation.
See `epsrel` below.
epsrel : float, optional
Relative tolerance of the inner 1-D integrals. Default is 1.49e-8.
If ``epsabs <= 0``, `epsrel` must be greater than both 5e-29
and ``50 * (machine epsilon)``. See `epsabs` above.
Returns
-------
y : float
The resultant integral.
abserr : float
An estimate of the error.
See also
--------
quad : single integral
tplquad : triple integral
nquad : N-dimensional integrals
fixed_quad : fixed-order Gaussian quadrature
quadrature : adaptive Gaussian quadrature
odeint : ODE integrator
ode : ODE integrator
simpson : integrator for sampled data
romb : integrator for sampled data
scipy.special : for coefficients and roots of orthogonal polynomials
Examples
--------
Compute the double integral of ``x * y**2`` over the box
``x`` ranging from 0 to 2 and ``y`` ranging from 0 to 1.
>>> from scipy import integrate
>>> f = lambda y, x: x*y**2
>>> integrate.dblquad(f, 0, 2, lambda x: 0, lambda x: 1)
(0.6666666666666667, 7.401486830834377e-15)
"""
def temp_ranges(*args):
return [gfun(args[0]) if callable(gfun) else gfun,
hfun(args[0]) if callable(hfun) else hfun]
return nquad(func, [temp_ranges, [a, b]], args=args,
opts={"epsabs": epsabs, "epsrel": epsrel})
def tplquad(func, a, b, gfun, hfun, qfun, rfun, args=(), epsabs=1.49e-8,
epsrel=1.49e-8):
"""
Compute a triple (definite) integral.
Return the triple integral of ``func(z, y, x)`` from ``x = a..b``,
``y = gfun(x)..hfun(x)``, and ``z = qfun(x,y)..rfun(x,y)``.
Parameters
----------
func : function
A Python function or method of at least three variables in the
order (z, y, x).
a, b : float
The limits of integration in x: `a` < `b`
gfun : function or float
The lower boundary curve in y which is a function taking a single
floating point argument (x) and returning a floating point result
or a float indicating a constant boundary curve.
hfun : function or float
The upper boundary curve in y (same requirements as `gfun`).
qfun : function or float
The lower boundary surface in z. It must be a function that takes
two floats in the order (x, y) and returns a float or a float
indicating a constant boundary surface.
rfun : function or float
The upper boundary surface in z. (Same requirements as `qfun`.)
args : tuple, optional
Extra arguments to pass to `func`.
epsabs : float, optional
Absolute tolerance passed directly to the innermost 1-D quadrature
integration. Default is 1.49e-8.
epsrel : float, optional
Relative tolerance of the innermost 1-D integrals. Default is 1.49e-8.
Returns
-------
y : float
The resultant integral.
abserr : float
An estimate of the error.
See Also
--------
quad: Adaptive quadrature using QUADPACK
quadrature: Adaptive Gaussian quadrature
fixed_quad: Fixed-order Gaussian quadrature
dblquad: Double integrals
nquad : N-dimensional integrals
romb: Integrators for sampled data
simpson: Integrators for sampled data
ode: ODE integrators
odeint: ODE integrators
scipy.special: For coefficients and roots of orthogonal polynomials
Examples
--------
Compute the triple integral of ``x * y * z``, over ``x`` ranging
from 1 to 2, ``y`` ranging from 2 to 3, ``z`` ranging from 0 to 1.
>>> from scipy import integrate
>>> f = lambda z, y, x: x*y*z
>>> integrate.tplquad(f, 1, 2, lambda x: 2, lambda x: 3,
... lambda x, y: 0, lambda x, y: 1)
(1.8750000000000002, 3.324644794257407e-14)
"""
# f(z, y, x)
# qfun/rfun (x, y)
# gfun/hfun(x)
# nquad will hand (y, x, t0, ...) to ranges0
# nquad will hand (x, t0, ...) to ranges1
# Stupid different API...
def ranges0(*args):
return [qfun(args[1], args[0]) if callable(qfun) else qfun,
rfun(args[1], args[0]) if callable(rfun) else rfun]
def ranges1(*args):
return [gfun(args[0]) if callable(gfun) else gfun,
hfun(args[0]) if callable(hfun) else hfun]
ranges = [ranges0, ranges1, [a, b]]
return nquad(func, ranges, args=args,
opts={"epsabs": epsabs, "epsrel": epsrel})
def nquad(func, ranges, args=None, opts=None, full_output=False):
"""
Integration over multiple variables.
Wraps `quad` to enable integration over multiple variables.
Various options allow improved integration of discontinuous functions, as
well as the use of weighted integration, and generally finer control of the
integration process.
Parameters
----------
func : {callable, scipy.LowLevelCallable}
The function to be integrated. Has arguments of ``x0, ... xn``,
``t0, ... tm``, where integration is carried out over ``x0, ... xn``,
which must be floats. Where ```t0, ... tm``` are extra arguments
passed in args.
Function signature should be ``func(x0, x1, ..., xn, t0, t1, ..., tm)``.
Integration is carried out in order. That is, integration over ``x0``
is the innermost integral, and ``xn`` is the outermost.
If the user desires improved integration performance, then `f` may
be a `scipy.LowLevelCallable` with one of the signatures::
double func(int n, double *xx)
double func(int n, double *xx, void *user_data)
where ``n`` is the number of variables and args. The ``xx`` array
contains the coordinates and extra arguments. ``user_data`` is the data
contained in the `scipy.LowLevelCallable`.
ranges : iterable object
Each element of ranges may be either a sequence of 2 numbers, or else
a callable that returns such a sequence. ``ranges[0]`` corresponds to
integration over x0, and so on. If an element of ranges is a callable,
then it will be called with all of the integration arguments available,
as well as any parametric arguments. e.g., if
``func = f(x0, x1, x2, t0, t1)``, then ``ranges[0]`` may be defined as
either ``(a, b)`` or else as ``(a, b) = range0(x1, x2, t0, t1)``.
args : iterable object, optional
Additional arguments ``t0, ..., tn``, required by `func`, `ranges`, and
``opts``.
opts : iterable object or dict, optional
Options to be passed to `quad`. May be empty, a dict, or
a sequence of dicts or functions that return a dict. If empty, the
default options from scipy.integrate.quad are used. If a dict, the same
options are used for all levels of integraion. If a sequence, then each
element of the sequence corresponds to a particular integration. e.g.,
opts[0] corresponds to integration over x0, and so on. If a callable,
the signature must be the same as for ``ranges``. The available
options together with their default values are:
- epsabs = 1.49e-08
- epsrel = 1.49e-08
- limit = 50
- points = None
- weight = None
- wvar = None
- wopts = None
For more information on these options, see `quad` and `quad_explain`.
full_output : bool, optional
Partial implementation of ``full_output`` from scipy.integrate.quad.
The number of integrand function evaluations ``neval`` can be obtained
by setting ``full_output=True`` when calling nquad.
Returns
-------
result : float
The result of the integration.
abserr : float
The maximum of the estimates of the absolute error in the various
integration results.
out_dict : dict, optional
A dict containing additional information on the integration.
See Also
--------
quad : 1-D numerical integration
dblquad, tplquad : double and triple integrals
fixed_quad : fixed-order Gaussian quadrature
quadrature : adaptive Gaussian quadrature
Examples
--------
>>> from scipy import integrate
>>> func = lambda x0,x1,x2,x3 : x0**2 + x1*x2 - x3**3 + np.sin(x0) + (
... 1 if (x0-.2*x3-.5-.25*x1>0) else 0)
>>> def opts0(*args, **kwargs):
... return {'points':[0.2*args[2] + 0.5 + 0.25*args[0]]}
>>> integrate.nquad(func, [[0,1], [-1,1], [.13,.8], [-.15,1]],
... opts=[opts0,{},{},{}], full_output=True)
(1.5267454070738633, 2.9437360001402324e-14, {'neval': 388962})
>>> scale = .1
>>> def func2(x0, x1, x2, x3, t0, t1):
... return x0*x1*x3**2 + np.sin(x2) + 1 + (1 if x0+t1*x1-t0>0 else 0)
>>> def lim0(x1, x2, x3, t0, t1):
... return [scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) - 1,
... scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) + 1]
>>> def lim1(x2, x3, t0, t1):
... return [scale * (t0*x2 + t1*x3) - 1,
... scale * (t0*x2 + t1*x3) + 1]
>>> def lim2(x3, t0, t1):
... return [scale * (x3 + t0**2*t1**3) - 1,
... scale * (x3 + t0**2*t1**3) + 1]
>>> def lim3(t0, t1):
... return [scale * (t0+t1) - 1, scale * (t0+t1) + 1]
>>> def opts0(x1, x2, x3, t0, t1):
... return {'points' : [t0 - t1*x1]}
>>> def opts1(x2, x3, t0, t1):
... return {}
>>> def opts2(x3, t0, t1):
... return {}
>>> def opts3(t0, t1):
... return {}
>>> integrate.nquad(func2, [lim0, lim1, lim2, lim3], args=(0,0),
... opts=[opts0, opts1, opts2, opts3])
(25.066666666666666, 2.7829590483937256e-13)
"""
depth = len(ranges)
ranges = [rng if callable(rng) else _RangeFunc(rng) for rng in ranges]
if args is None:
args = ()
if opts is None:
opts = [dict([])] * depth
if isinstance(opts, dict):
opts = [_OptFunc(opts)] * depth
else:
opts = [opt if callable(opt) else _OptFunc(opt) for opt in opts]
return _NQuad(func, ranges, opts, full_output).integrate(*args)
class _RangeFunc:
def __init__(self, range_):
self.range_ = range_
def __call__(self, *args):
"""Return stored value.
*args needed because range_ can be float or func, and is called with
variable number of parameters.
"""
return self.range_
class _OptFunc:
def __init__(self, opt):
self.opt = opt
def __call__(self, *args):
"""Return stored dict."""
return self.opt
class _NQuad:
def __init__(self, func, ranges, opts, full_output):
self.abserr = 0
self.func = func
self.ranges = ranges
self.opts = opts
self.maxdepth = len(ranges)
self.full_output = full_output
if self.full_output:
self.out_dict = {'neval': 0}
def integrate(self, *args, **kwargs):
depth = kwargs.pop('depth', 0)
if kwargs:
raise ValueError('unexpected kwargs')
# Get the integration range and options for this depth.
ind = -(depth + 1)
fn_range = self.ranges[ind]
low, high = fn_range(*args)
fn_opt = self.opts[ind]
opt = dict(fn_opt(*args))
if 'points' in opt:
opt['points'] = [x for x in opt['points'] if low <= x <= high]
if depth + 1 == self.maxdepth:
f = self.func
else:
f = partial(self.integrate, depth=depth+1)
quad_r = quad(f, low, high, args=args, full_output=self.full_output,
**opt)
value = quad_r[0]
abserr = quad_r[1]
if self.full_output:
infodict = quad_r[2]
# The 'neval' parameter in full_output returns the total
# number of times the integrand function was evaluated.
# Therefore, only the innermost integration loop counts.
if depth + 1 == self.maxdepth:
self.out_dict['neval'] += infodict['neval']
self.abserr = max(self.abserr, abserr)
if depth > 0:
return value
else:
# Final result of N-D integration with error
if self.full_output:
return value, self.abserr, self.out_dict
else:
return value, self.abserr
|
bsd-3-clause
|
ebar0n/django
|
django/contrib/admin/widgets.py
|
2
|
17014
|
"""
Form Widget classes specific to the Django admin site.
"""
import copy
import json
from django import forms
from django.conf import settings
from django.db.models.deletion import CASCADE
from django.urls import reverse
from django.urls.exceptions import NoReverseMatch
from django.utils.html import smart_urlquote
from django.utils.safestring import mark_safe
from django.utils.text import Truncator
from django.utils.translation import get_language, gettext as _
class FilteredSelectMultiple(forms.SelectMultiple):
"""
A SelectMultiple with a JavaScript filter interface.
Note that the resulting JavaScript assumes that the jsi18n
catalog has been loaded in the page
"""
@property
def media(self):
extra = '' if settings.DEBUG else '.min'
js = [
'vendor/jquery/jquery%s.js' % extra,
'jquery.init.js',
'core.js',
'SelectBox.js',
'SelectFilter2.js',
]
return forms.Media(js=["admin/js/%s" % path for path in js])
def __init__(self, verbose_name, is_stacked, attrs=None, choices=()):
self.verbose_name = verbose_name
self.is_stacked = is_stacked
super().__init__(attrs, choices)
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
context['widget']['attrs']['class'] = 'selectfilter'
if self.is_stacked:
context['widget']['attrs']['class'] += 'stacked'
context['widget']['attrs']['data-field-name'] = self.verbose_name
context['widget']['attrs']['data-is-stacked'] = int(self.is_stacked)
return context
class AdminDateWidget(forms.DateInput):
@property
def media(self):
extra = '' if settings.DEBUG else '.min'
js = [
'vendor/jquery/jquery%s.js' % extra,
'jquery.init.js',
'calendar.js',
'admin/DateTimeShortcuts.js',
]
return forms.Media(js=["admin/js/%s" % path for path in js])
def __init__(self, attrs=None, format=None):
attrs = {'class': 'vDateField', 'size': '10', **(attrs or {})}
super().__init__(attrs=attrs, format=format)
class AdminTimeWidget(forms.TimeInput):
@property
def media(self):
extra = '' if settings.DEBUG else '.min'
js = [
'vendor/jquery/jquery%s.js' % extra,
'jquery.init.js',
'calendar.js',
'admin/DateTimeShortcuts.js',
]
return forms.Media(js=["admin/js/%s" % path for path in js])
def __init__(self, attrs=None, format=None):
attrs = {'class': 'vTimeField', 'size': '8', **(attrs or {})}
super().__init__(attrs=attrs, format=format)
class AdminSplitDateTime(forms.SplitDateTimeWidget):
"""
A SplitDateTime Widget that has some admin-specific styling.
"""
template_name = 'admin/widgets/split_datetime.html'
def __init__(self, attrs=None):
widgets = [AdminDateWidget, AdminTimeWidget]
# Note that we're calling MultiWidget, not SplitDateTimeWidget, because
# we want to define widgets.
forms.MultiWidget.__init__(self, widgets, attrs)
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
context['date_label'] = _('Date:')
context['time_label'] = _('Time:')
return context
class AdminRadioSelect(forms.RadioSelect):
template_name = 'admin/widgets/radio.html'
class AdminFileWidget(forms.ClearableFileInput):
template_name = 'admin/widgets/clearable_file_input.html'
def url_params_from_lookup_dict(lookups):
"""
Convert the type of lookups specified in a ForeignKey limit_choices_to
attribute to a dictionary of query parameters
"""
params = {}
if lookups and hasattr(lookups, 'items'):
for k, v in lookups.items():
if callable(v):
v = v()
if isinstance(v, (tuple, list)):
v = ','.join(str(x) for x in v)
elif isinstance(v, bool):
v = ('0', '1')[v]
else:
v = str(v)
params[k] = v
return params
class ForeignKeyRawIdWidget(forms.TextInput):
"""
A Widget for displaying ForeignKeys in the "raw_id" interface rather than
in a <select> box.
"""
template_name = 'admin/widgets/foreign_key_raw_id.html'
def __init__(self, rel, admin_site, attrs=None, using=None):
self.rel = rel
self.admin_site = admin_site
self.db = using
super().__init__(attrs)
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
rel_to = self.rel.model
if rel_to in self.admin_site._registry:
# The related object is registered with the same AdminSite
related_url = reverse(
'admin:%s_%s_changelist' % (
rel_to._meta.app_label,
rel_to._meta.model_name,
),
current_app=self.admin_site.name,
)
params = self.url_parameters()
if params:
related_url += '?' + '&'.join('%s=%s' % (k, v) for k, v in params.items())
context['related_url'] = mark_safe(related_url)
context['link_title'] = _('Lookup')
# The JavaScript code looks for this class.
context['widget']['attrs'].setdefault('class', 'vForeignKeyRawIdAdminField')
if context['widget']['value']:
context['link_label'], context['link_url'] = self.label_and_url_for_value(value)
return context
def base_url_parameters(self):
limit_choices_to = self.rel.limit_choices_to
if callable(limit_choices_to):
limit_choices_to = limit_choices_to()
return url_params_from_lookup_dict(limit_choices_to)
def url_parameters(self):
from django.contrib.admin.views.main import TO_FIELD_VAR
params = self.base_url_parameters()
params.update({TO_FIELD_VAR: self.rel.get_related_field().name})
return params
def label_and_url_for_value(self, value):
key = self.rel.get_related_field().name
try:
obj = self.rel.model._default_manager.using(self.db).get(**{key: value})
except (ValueError, self.rel.model.DoesNotExist):
return '', ''
try:
url = reverse(
'%s:%s_%s_change' % (
self.admin_site.name,
obj._meta.app_label,
obj._meta.object_name.lower(),
),
args=(obj.pk,)
)
except NoReverseMatch:
url = '' # Admin not registered for target model.
return Truncator(obj).words(14, truncate='...'), url
class ManyToManyRawIdWidget(ForeignKeyRawIdWidget):
"""
A Widget for displaying ManyToMany ids in the "raw_id" interface rather than
in a <select multiple> box.
"""
template_name = 'admin/widgets/many_to_many_raw_id.html'
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
if self.rel.model in self.admin_site._registry:
# The related object is registered with the same AdminSite
context['widget']['attrs']['class'] = 'vManyToManyRawIdAdminField'
return context
def url_parameters(self):
return self.base_url_parameters()
def label_and_url_for_value(self, value):
return '', ''
def value_from_datadict(self, data, files, name):
value = data.get(name)
if value:
return value.split(',')
def format_value(self, value):
return ','.join(str(v) for v in value) if value else ''
class RelatedFieldWidgetWrapper(forms.Widget):
"""
This class is a wrapper to a given widget to add the add icon for the
admin interface.
"""
template_name = 'admin/widgets/related_widget_wrapper.html'
def __init__(self, widget, rel, admin_site, can_add_related=None,
can_change_related=False, can_delete_related=False):
self.needs_multipart_form = widget.needs_multipart_form
self.attrs = widget.attrs
self.choices = widget.choices
self.widget = widget
self.rel = rel
# Backwards compatible check for whether a user can add related
# objects.
if can_add_related is None:
can_add_related = rel.model in admin_site._registry
self.can_add_related = can_add_related
# XXX: The UX does not support multiple selected values.
multiple = getattr(widget, 'allow_multiple_selected', False)
self.can_change_related = not multiple and can_change_related
# XXX: The deletion UX can be confusing when dealing with cascading deletion.
cascade = getattr(rel, 'on_delete', None) is CASCADE
self.can_delete_related = not multiple and not cascade and can_delete_related
# so we can check if the related object is registered with this AdminSite
self.admin_site = admin_site
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.widget = copy.deepcopy(self.widget, memo)
obj.attrs = self.widget.attrs
memo[id(self)] = obj
return obj
@property
def is_hidden(self):
return self.widget.is_hidden
@property
def media(self):
return self.widget.media
def get_related_url(self, info, action, *args):
return reverse("admin:%s_%s_%s" % (info + (action,)),
current_app=self.admin_site.name, args=args)
def get_context(self, name, value, attrs):
from django.contrib.admin.views.main import IS_POPUP_VAR, TO_FIELD_VAR
rel_opts = self.rel.model._meta
info = (rel_opts.app_label, rel_opts.model_name)
self.widget.choices = self.choices
url_params = '&'.join("%s=%s" % param for param in [
(TO_FIELD_VAR, self.rel.get_related_field().name),
(IS_POPUP_VAR, 1),
])
context = {
'rendered_widget': self.widget.render(name, value, attrs),
'name': name,
'url_params': url_params,
'model': rel_opts.verbose_name,
}
if self.can_change_related:
change_related_template_url = self.get_related_url(info, 'change', '__fk__')
context.update(
can_change_related=True,
change_related_template_url=change_related_template_url,
)
if self.can_add_related:
add_related_url = self.get_related_url(info, 'add')
context.update(
can_add_related=True,
add_related_url=add_related_url,
)
if self.can_delete_related:
delete_related_template_url = self.get_related_url(info, 'delete', '__fk__')
context.update(
can_delete_related=True,
delete_related_template_url=delete_related_template_url,
)
return context
def value_from_datadict(self, data, files, name):
return self.widget.value_from_datadict(data, files, name)
def value_omitted_from_data(self, data, files, name):
return self.widget.value_omitted_from_data(data, files, name)
def id_for_label(self, id_):
return self.widget.id_for_label(id_)
class AdminTextareaWidget(forms.Textarea):
def __init__(self, attrs=None):
super().__init__(attrs={'class': 'vLargeTextField', **(attrs or {})})
class AdminTextInputWidget(forms.TextInput):
def __init__(self, attrs=None):
super().__init__(attrs={'class': 'vTextField', **(attrs or {})})
class AdminEmailInputWidget(forms.EmailInput):
def __init__(self, attrs=None):
super().__init__(attrs={'class': 'vTextField', **(attrs or {})})
class AdminURLFieldWidget(forms.URLInput):
template_name = 'admin/widgets/url.html'
def __init__(self, attrs=None):
super().__init__(attrs={'class': 'vURLField', **(attrs or {})})
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
context['current_label'] = _('Currently:')
context['change_label'] = _('Change:')
context['widget']['href'] = smart_urlquote(context['widget']['value']) if value else ''
return context
class AdminIntegerFieldWidget(forms.NumberInput):
class_name = 'vIntegerField'
def __init__(self, attrs=None):
super().__init__(attrs={'class': self.class_name, **(attrs or {})})
class AdminBigIntegerFieldWidget(AdminIntegerFieldWidget):
class_name = 'vBigIntegerField'
# Mapping of lower case language codes [returned by Django's get_language()]
# to language codes supported by select2.
# See django/contrib/admin/static/admin/js/vendor/select2/i18n/*
SELECT2_TRANSLATIONS = {x.lower(): x for x in [
'ar', 'az', 'bg', 'ca', 'cs', 'da', 'de', 'el', 'en', 'es', 'et',
'eu', 'fa', 'fi', 'fr', 'gl', 'he', 'hi', 'hr', 'hu', 'id', 'is',
'it', 'ja', 'km', 'ko', 'lt', 'lv', 'mk', 'ms', 'nb', 'nl', 'pl',
'pt-BR', 'pt', 'ro', 'ru', 'sk', 'sr-Cyrl', 'sr', 'sv', 'th',
'tr', 'uk', 'vi', 'zh-CN', 'zh-TW',
]}
class AutocompleteMixin:
"""
Select widget mixin that loads options from AutocompleteJsonView via AJAX.
Renders the necessary data attributes for select2 and adds the static form
media.
"""
url_name = '%s:%s_%s_autocomplete'
def __init__(self, rel, admin_site, attrs=None, choices=(), using=None):
self.rel = rel
self.admin_site = admin_site
self.db = using
self.choices = choices
if attrs is not None:
self.attrs = attrs.copy()
else:
self.attrs = {}
def get_url(self):
model = self.rel.model
return reverse(self.url_name % (self.admin_site.name, model._meta.app_label, model._meta.model_name))
def build_attrs(self, base_attrs, extra_attrs=None):
"""
Set select2's AJAX attributes.
Attributes can be set using the html5 data attribute.
Nested attributes require a double dash as per
https://select2.org/configuration/data-attributes#nested-subkey-options
"""
attrs = super().build_attrs(base_attrs, extra_attrs=extra_attrs)
attrs.setdefault('class', '')
attrs.update({
'data-ajax--cache': 'true',
'data-ajax--type': 'GET',
'data-ajax--url': self.get_url(),
'data-theme': 'admin-autocomplete',
'data-allow-clear': json.dumps(not self.is_required),
'data-placeholder': '', # Allows clearing of the input.
'class': attrs['class'] + 'admin-autocomplete',
})
return attrs
def optgroups(self, name, value, attr=None):
"""Return selected options based on the ModelChoiceIterator."""
default = (None, [], 0)
groups = [default]
has_selected = False
selected_choices = {
str(v) for v in value
if str(v) not in self.choices.field.empty_values
}
if not self.is_required and not self.allow_multiple_selected:
default[1].append(self.create_option(name, '', '', False, 0))
choices = (
(obj.pk, self.choices.field.label_from_instance(obj))
for obj in self.choices.queryset.using(self.db).filter(pk__in=selected_choices)
)
for option_value, option_label in choices:
selected = (
str(option_value) in value and
(has_selected is False or self.allow_multiple_selected)
)
if selected is True and has_selected is False:
has_selected = True
index = len(default[1])
subgroup = default[1]
subgroup.append(self.create_option(name, option_value, option_label, selected_choices, index))
return groups
@property
def media(self):
extra = '' if settings.DEBUG else '.min'
i18n_name = SELECT2_TRANSLATIONS.get(get_language())
i18n_file = ('admin/js/vendor/select2/i18n/%s.js' % i18n_name,) if i18n_name else ()
return forms.Media(
js=(
'admin/js/vendor/jquery/jquery%s.js' % extra,
'admin/js/vendor/select2/select2.full%s.js' % extra,
) + i18n_file + (
'admin/js/jquery.init.js',
'admin/js/autocomplete.js',
),
css={
'screen': (
'admin/css/vendor/select2/select2%s.css' % extra,
'admin/css/autocomplete.css',
),
},
)
class AutocompleteSelect(AutocompleteMixin, forms.Select):
pass
class AutocompleteSelectMultiple(AutocompleteMixin, forms.SelectMultiple):
pass
|
bsd-3-clause
|
marcuspridham/crosswalk
|
build/android/lzma_compress.py
|
11
|
1753
|
#!/usr/bin/env python
#
# Copyright (c) 2015 Intel Corporation. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=F0401
import optparse
import os
import shutil
import sys
import subprocess
GYP_ANDROID_DIR = os.path.join(os.path.dirname(__file__),
os.pardir, os.pardir, os.pardir,
'build',
'android',
'gyp')
sys.path.append(GYP_ANDROID_DIR)
from util import build_utils
def DoCompress(dest_path, sources):
build_utils.DeleteDirectory(dest_path)
build_utils.MakeDirectory(dest_path)
for source in sources:
shutil.copy(source, dest_path)
file_to_compress = os.path.join(dest_path, os.path.basename(source))
subprocess.check_call(['lzma', '-f', file_to_compress])
def DoShowOutputNames(dest_path, sources):
for source in sources:
print('%s.lzma' % os.path.join(dest_path, os.path.basename(source)))
def main():
parser = optparse.OptionParser()
parser.add_option('--dest-path',
help='Destination directory for compressed files')
parser.add_option('--mode', choices=('compress', 'show-output-names'),
help='Whether to compress the files or show their '
'compressed names')
parser.add_option('--sources', help='The list of files to be compressed')
options, _ = parser.parse_args(sys.argv)
sources = build_utils.ParseGypList(options.sources)
if options.mode == 'compress':
return DoCompress(options.dest_path, sources)
else:
return DoShowOutputNames(options.dest_path, sources)
if __name__ == '__main__':
sys.exit(main())
|
bsd-3-clause
|
sangwook236/sangwook-library
|
python/src/swl/machine_vision/draw_attention.py
|
2
|
7505
|
import math
import tensorflow as tf
#--------------------------------------------------------------------
class DrawAttentionBase(object):
@staticmethod
def filter(img, width, height, Fx, Fy, gamma, patch_width, patch_height):
Fxt = tf.transpose(Fx, perm=[0, 2, 1])
img = tf.reshape(img, [-1, height, width])
glimpse = tf.matmul(Fy, tf.matmul(img, Fxt))
glimpse = tf.reshape(glimpse, [-1, patch_width * patch_height])
return glimpse * tf.reshape(gamma, [-1, 1])
@staticmethod
def linear_transform(x, output_dim):
"""
Affine transformation W * x + b.
Assumes x.shape = (batch_size, num_features).
"""
W = tf.get_variable('W', [x.get_shape()[1], output_dim])
b = tf.get_variable('b', [output_dim], initializer=tf.constant_initializer(0.0))
return tf.matmul(x, W) + b
#--------------------------------------------------------------------
# REF [paper] >> "DRAW: A Recurrent Neural Network For Image Generation", arXiv 2015
# REF [site] >> https://github.com/ericjang/draw
class DrawAttention(DrawAttentionBase):
@staticmethod
def getWriteAttention(ctx, batch_size, width, height, patch_size, reuse=tf.AUTO_REUSE, eps=1.0e-8):
Fx, Fy, gamma = DrawAttention.getAttentionParameters(ctx, width, height, patch_size, 'draw_write_attention', reuse, eps)
with tf.variable_scope('draw_writing_patch', reuse=reuse):
w = DrawAttention.linear_transform(ctx, patch_size * patch_size) # batch_size * (patch_size * patch_size).
w = tf.reshape(w, [batch_size, patch_size, patch_size])
Fyt = tf.transpose(Fy, perm=[0, 2, 1])
wr = tf.matmul(Fyt, tf.matmul(w, Fx))
wr = tf.reshape(wr, [batch_size, height * width])
#gamma = tf.tile(gamma, [1, height * width])
return wr * tf.reshape(1.0 / gamma, [-1, 1])
@staticmethod
def getReadAttention(x, ctx, width, height, patch_size, reuse=tf.AUTO_REUSE, eps=1.0e-8):
Fx, Fy, gamma = DrawAttention.getAttentionParameters(ctx, width, height, patch_size, 'draw_read_attention', reuse, eps)
return DrawAttention.filter(x, width, height, Fx, Fy, gamma, patch_size, patch_size) # batch_size * (patch_size * patch_size).
@staticmethod
def getAttentionParameters(ctx, width, height, patch_size, scope, reuse, eps=1.0e-8):
with tf.variable_scope(scope, reuse=reuse):
params = DrawAttention.linear_transform(ctx, 5) # 5 parameters.
# Grid center (gx, gy), stride (delta), isotropic variance (sigma^2), scalar intensity (gamma).
#gx_tilde, gy_tilde, log_sigma2, log_delta, log_gamma = tf.split(1, 5, params)
gx_tilde, gy_tilde, log_sigma2, log_delta, log_gamma = tf.split(params, 5, 1)
gx = (width + 1) / 2 * (gx_tilde + 1)
gy = (height + 1) / 2 * (gy_tilde + 1)
sigma2 = tf.exp(log_sigma2)
delta = (max(width, height) - 1) / (patch_size - 1) * tf.exp(log_delta) # batch_size * patch_size.
# Attention parameters: Fx, Fy, gamma.
return DrawAttention._filterbank(width, height, gx, gy, sigma2, delta, patch_size, eps) + (tf.exp(log_gamma),)
@staticmethod
def _filterbank(width, height, gx, gy, sigma2, delta, patch_size, eps=1.0e-8):
grid_i = tf.reshape(tf.cast(tf.range(patch_size), tf.float32), [1, -1])
mu_x = gx + (grid_i - patch_size / 2 - 0.5) * delta # Eqn 19.
mu_y = gy + (grid_i - patch_size / 2 - 0.5) * delta # Eqn 20.
a = tf.reshape(tf.cast(tf.range(width), tf.float32), [1, 1, -1])
b = tf.reshape(tf.cast(tf.range(height), tf.float32), [1, 1, -1])
mu_x = tf.reshape(mu_x, [-1, patch_size, 1])
mu_y = tf.reshape(mu_y, [-1, patch_size, 1])
sigma2 = tf.reshape(sigma2, [-1, 1, 1])
Fx = tf.exp(-tf.square(a - mu_x) / (2 * sigma2))
Fy = tf.exp(-tf.square(b - mu_y) / (2 * sigma2)) # batch_size * patch_size * height.
# Normalize, sum over width and height dims.
Fx = Fx / tf.maximum(tf.reduce_sum(Fx, 2, keepdims=True), eps)
Fy = Fy / tf.maximum(tf.reduce_sum(Fy, 2, keepdims=True), eps)
return Fx, Fy
#--------------------------------------------------------------------
# REF [paper] >> "End-to-End Instance Segmentation with Recurrent Attention", arXiv 2017
# REF [site] >> https://github.com/renmengye/rec-attend-public
class DrawRectangularAttention(DrawAttentionBase):
@staticmethod
def getWriteAttention(ctx, batch_size, width, height, patch_width, patch_height, reuse=tf.AUTO_REUSE, eps=1.0e-8):
Fx, Fy, gamma = DrawRectangularAttention.getAttentionParameters(ctx, width, height, patch_width, patch_height, 'draw_write_attention', reuse, eps)
with tf.variable_scope('draw_writing_patch', reuse=reuse):
w = DrawRectangularAttention.linear_transform(ctx, patch_width * patch_height) # batch_size * (patch_width * patch_height).
w = tf.reshape(w, [batch_size, patch_height, patch_width])
Fyt = tf.transpose(Fy, perm=[0, 2, 1])
wr = tf.matmul(Fyt, tf.matmul(w, Fx))
wr = tf.reshape(wr, [batch_size, height * width])
#gamma = tf.tile(gamma, [1, height * width])
return wr * tf.reshape(1.0 / gamma, [-1, 1])
@staticmethod
def getReadAttention(x, ctx, width, height, patch_width, patch_height, reuse=tf.AUTO_REUSE, eps=1.0e-8):
Fx, Fy, gamma = DrawRectangularAttention.getAttentionParameters(ctx, width, height, patch_width, patch_height, 'draw_read_attention', reuse, eps)
return DrawRectangularAttention.filter(x, width, height, Fx, Fy, gamma, patch_width, patch_height) # batch_size * (patch_width * patch_height).
@staticmethod
def getAttentionParameters(ctx, width, height, patch_width, patch_height, scope, reuse, eps=1.0e-8):
with tf.variable_scope(scope, reuse=reuse):
params = DrawRectangularAttention.linear_transform(ctx, 7) # 7 parameters.
# Grid center (gx, gy), stride (deltax, deltay), anisotropic variance (sigmax^2, sigmay^2), scalar intensity (gamma).
#gx_tilde, gy_tilde, log_sigmax2, log_sigmay2, log_deltax, log_deltay, log_gamma = tf.split(1, 7, params)
gx_tilde, gy_tilde, log_sigmax2, log_sigmay2, log_deltax, log_deltay, log_gamma = tf.split(params, 7, 1)
gx = (gx_tilde + 1) * width / 2
gy = (gy_tilde + 1) * height / 2
sigmax2 = tf.exp(log_sigmax2)
sigmay2 = tf.exp(log_sigmay2)
deltax = tf.exp(log_deltax) * width # batch_size * patch_width.
deltay = tf.exp(log_deltay) * height # batch_size * patch_height.
# Attention parameters: Fx, Fy, gamma.
return DrawRectangularAttention._filterbank(width, height, gx, gy, sigmax2, sigmay2, deltax, deltay, patch_width, patch_height, eps) + (tf.exp(log_gamma),)
@staticmethod
def _filterbank(width, height, gx, gy, sigmax2, sigmay2, deltax, deltay, patch_width, patch_height, eps=1.0e-8):
grid_ix = tf.reshape(tf.cast(tf.range(patch_width), tf.float32), [1, -1])
grid_iy = tf.reshape(tf.cast(tf.range(patch_height), tf.float32), [1, -1])
mu_x = gx + (deltax + 1) * (grid_ix - patch_width / 2 + 0.5) / patch_width
mu_y = gy + (deltay + 1) * (grid_iy - patch_height / 2 + 0.5) / patch_height
a = tf.reshape(tf.cast(tf.range(width), tf.float32), [1, 1, -1])
b = tf.reshape(tf.cast(tf.range(height), tf.float32), [1, 1, -1])
mu_x = tf.reshape(mu_x, [-1, patch_width, 1])
mu_y = tf.reshape(mu_y, [-1, patch_height, 1])
sigmax2 = tf.reshape(sigmax2, [-1, 1, 1])
sigmay2 = tf.reshape(sigmay2, [-1, 1, 1])
Fx = tf.exp(-tf.square(a - mu_x) / (2 * sigmax2)) / (math.sqrt(2 * math.pi) * tf.sqrt(sigmax2)) # batch_size * patch_width * width.
Fy = tf.exp(-tf.square(b - mu_y) / (2 * sigmay2)) / (math.sqrt(2 * math.pi) * tf.sqrt(sigmay2)) # batch_size * patch_height * height.
# Normalize, sum over width and height dims.
Fx = Fx / tf.maximum(Fx, eps)
Fy = Fy / tf.maximum(Fy, eps)
return Fx, Fy
|
gpl-2.0
|
openstack/cinder
|
cinder/tests/unit/fake_constants.py
|
2
|
5220
|
# Copyright 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
ACTION_FAILED_ID = 'f26f181d-7891-4720-b022-b074ec1733ef'
ACTION2_FAILED_ID = '02f53bd8-3514-485b-ba60-2722ef09c016'
ALREADY_EXISTS_ID = '8f7495fe-5e44-4f33-81af-4b28e9b2952f'
ATTACHMENT_ID = '4dc3bb12-ad75-41b9-ab2c-7609e743e600'
ATTACHMENT2_ID = 'ac2439fe-c071-468f-94e3-547bedb95de0'
BACKUP_ID = '707844eb-6d8a-4ac1-8b98-618e1c0b3a3a'
BACKUP2_ID = '40e8462a-c9d8-462f-a810-b732a1790535'
BACKUP3_ID = '30ae7641-017e-4221-a642-855687c8bd71'
BACKUP4_ID = '23f8605b-8273-4f49-9b3d-1eeca81a63c2'
BACKUP5_ID = '50c97b22-51ea-440b-8d01-ded20a55d7e0'
CGSNAPSHOT_ID = '5e34cce3-bc97-46b7-a127-5cfb95ef445d'
CGSNAPSHOT_NAME = 'cgsnapshot-5e34cce3-bc97-46b7-a127-5cfb95ef445d'
CGSNAPSHOT2_ID = '5c36d762-d6ba-4f04-bd07-88a298cc410a'
CGSNAPSHOT3_ID = '5f392156-fc03-492a-9cb8-e46a7eedaf33'
CONSISTENCY_GROUP_ID = 'f18abf73-79ee-4f2b-8d4f-1c044148f117'
CONSISTENCY_GROUP2_ID = '8afc8952-9dce-4228-9f8a-706c5cb5fc82'
ENCRYPTION_KEY_ID = 'e8387001-745d-45d0-9e4e-0473815ef09a'
ENCRYPTION_KEY2_ID = 'fa0dc8ce-79a4-4162-846f-c731b99f3113'
ENCRYPTION_TYPE_ID = 'af2ae9b8-f40a-4cbc-9f51-b54eb5469405'
IMAGE_ID = 'e79161cd-5f9d-4007-8823-81a807a64332'
INSTANCE_ID = 'fa617131-cdbc-45dc-afff-f21f17ae054e'
IN_USE_ID = '8ee42073-4ac2-4099-8c7a-d416630e6aee'
INVALID_ID = 'f45dcab0-ff2a-46ec-b3b7-74d6f4bb0027'
KEY_ID = '9112ecec-fb9d-4299-a948-ffb52650a5b5'
OBJECT_ID = 'd7c5b12f-d57d-4762-99ab-db5f62ae3569'
OBJECT2_ID = '51f5b8fa-c13c-48ba-8c9d-b470466cbc9c'
OBJECT3_ID = '7bf5ffa9-18a2-4b64-aab4-0798b53ee4e7'
PROJECT_ID = '89afd400-b646-4bbc-b12b-c0a4d63e5bd3'
PROJECT2_ID = '452ebfbc-55d9-402a-87af-65061916c24b'
PROJECT3_ID = 'f6c912d7-bf30-4b12-af81-a9e0b2f85f85'
DOMAIN_ID = 'e747b880-4565-4d18-b8e2-310bdec83759'
PROVIDER_ID = '60087173-e899-470a-9e3a-ba4cffa3e3e3'
PROVIDER2_ID = '1060eccd-64bb-4ed2-86ce-aeaf135a97b8'
PROVIDER3_ID = '63736819-1c95-440e-a873-b9d685afede5'
PROVIDER4_ID = '7db06e02-26b6-4282-945d-7f6c9347a7b0'
QOS_SPEC_ID = 'fc0f7527-79d7-44be-a4f6-3b24db8e11ac'
QOS_SPEC2_ID = 'c561b69d-98d9-478c-815b-6de11f5a09c9'
QOS_SPEC3_ID = '6034720b-f586-4302-a1eb-fe30672069f6'
RAISE_ID = 'a56762e1-4a30-4008-b997-5a438ec9c457'
REQUEST_ID = '253c2a22-931e-4104-a9ab-1d70071e4bd4'
SNAPSHOT_ID = '253b2878-ec60-4793-ad19-e65496ec7aab'
SNAPSHOT_NAME = 'snapshot-253b2878-ec60-4793-ad19-e65496ec7aab'
SNAPSHOT2_ID = 'c02c44fa-5665-4a26-9e66-2ebaf25e5d2d'
SNAPSHOT3_ID = '454f9970-1e05-4193-a3ed-5c390c3faa18'
UPDATE_FAILED_ID = '110b29df-5e0f-4dbb-840c-ef5963d06933'
USER_ID = 'c853ca26-e8ea-4797-8a52-ee124a013d0e'
USER2_ID = '95f7b7ed-bd7f-426e-b05f-f1ffeb4f09df'
USER3_ID = '5f590c70-7f2b-4240-a9b2-a37d343e2a63'
VOLUME_ID = '1e5177e7-95e5-4a0f-b170-e45f4b469f6a'
VOLUME_NAME = 'volume-1e5177e7-95e5-4a0f-b170-e45f4b469f6a'
VOLUME2_ID = '43a09914-e495-475f-b862-0bda3c8918e4'
VOLUME2_NAME = 'volume-43a09914-e495-475f-b862-0bda3c8918e4'
VOLUME3_ID = '1b1cf149-219c-44ac-aee3-13121a7f86a7'
VOLUME3_NAME = 'volume-1b1cf149-219c-44ac-aee3-13121a7f86a7'
VOLUME4_ID = '904d4602-4301-4e9b-8df1-8133b51904e6'
VOLUME4_NAME = 'volume-904d4602-4301-4e9b-8df1-8133b51904e6'
VOLUME5_ID = '17b0e01d-3d2d-4c31-a1aa-c962420bc3dc'
VOLUME5_NAME = 'volume-17b0e01d-3d2d-4c31-a1aa-c962420bc3dc'
VOLUME6_ID = '84375761-46e0-4df2-a567-02f0113428d7'
VOLUME7_ID = '4d6722d1-fafb-455c-9a1c-bc542841c408'
VOLUME8_ID = '439965c7-2ce5-4dff-81fe-549007b2b9da'
VOLUME9_ID = '9bcc62a8-d407-4711-8471-8b9010ae10a3'
VOLUME_NAME_ID = 'ee73d33c-52ed-4cb7-a8a9-2687c1205c22'
VOLUME2_NAME_ID = '63fbdd21-03bc-4309-b867-2893848f86af'
VOLUME_TYPE_ID = '4e9e6d23-eed0-426d-b90a-28f87a94b6fe'
VOLUME_TYPE_NAME = 'vol_type_name'
VOLUME_TYPE2_ID = 'c4daaf47-c530-4901-b28e-f5f0a359c4e6'
VOLUME_TYPE3_ID = 'a3d55d15-eeb1-4816-ada9-bf82decc09b3'
VOLUME_TYPE4_ID = '69943076-754d-4da8-8718-0b0117e9cab1'
VOLUME_TYPE5_ID = '1c450d81-8aab-459e-b338-a6569139b835'
WILL_NOT_BE_FOUND_ID = 'ce816f65-c5aa-46d6-bd62-5272752d584a'
GROUP_TYPE_ID = '29514915-5208-46ab-9ece-1cc4688ad0c1'
GROUP_TYPE2_ID = 'f8645498-1323-47a2-9442-5c57724d2e3c'
GROUP_TYPE3_ID = '1b7915f4-b899-4510-9eff-bd67508c3334'
GROUP_ID = '9a965cc6-ee3a-468d-a721-cebb193f696f'
GROUP2_ID = '40a85639-abc3-4461-9230-b131abd8ee07'
GROUP3_ID = '1078414b-380c-474c-bf76-57e2c235841c'
GROUP_SNAPSHOT_ID = '1e2ab152-44f0-11e6-819f-000c29d19d84'
GROUP_SNAPSHOT2_ID = '33e2ff04-44f0-11e6-819f-000c29d19d84'
# I don't care what it's used for, I just want a damn UUID
UUID1 = '84d0c5f7-2349-401c-8672-f76214d13cab'
UUID2 = '25406d50-e645-4e62-a9ef-1f53f9cba13f'
UUID3 = '29c80662-3a9f-4844-a585-55cd3cd180b5'
UUID4 = '4cd72b2b-5a4f-4f24-93dc-7c0212002916'
UUID5 = '0a574d83-cacf-42b9-8f9f-8f4faa6d4746'
|
apache-2.0
|
ntuecon/server
|
pyenv/Lib/site-packages/twisted/internet/error.py
|
15
|
12645
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Exceptions and errors for use in twisted.internet modules.
"""
from __future__ import division, absolute_import
import socket
from twisted.python import deprecate
from incremental import Version
class BindError(Exception):
"""An error occurred binding to an interface"""
def __str__(self):
s = self.__doc__
if self.args:
s = '%s: %s' % (s, ' '.join(self.args))
s = '%s.' % s
return s
class CannotListenError(BindError):
"""
This gets raised by a call to startListening, when the object cannotstart
listening.
@ivar interface: the interface I tried to listen on
@ivar port: the port I tried to listen on
@ivar socketError: the exception I got when I tried to listen
@type socketError: L{socket.error}
"""
def __init__(self, interface, port, socketError):
BindError.__init__(self, interface, port, socketError)
self.interface = interface
self.port = port
self.socketError = socketError
def __str__(self):
iface = self.interface or 'any'
return "Couldn't listen on %s:%s: %s." % (iface, self.port,
self.socketError)
class MulticastJoinError(Exception):
"""
An attempt to join a multicast group failed.
"""
class MessageLengthError(Exception):
"""Message is too long to send"""
def __str__(self):
s = self.__doc__
if self.args:
s = '%s: %s' % (s, ' '.join(self.args))
s = '%s.' % s
return s
class DNSLookupError(IOError):
"""DNS lookup failed"""
def __str__(self):
s = self.__doc__
if self.args:
s = '%s: %s' % (s, ' '.join(self.args))
s = '%s.' % s
return s
class ConnectInProgressError(Exception):
"""A connect operation was started and isn't done yet."""
# connection errors
class ConnectError(Exception):
"""An error occurred while connecting"""
def __init__(self, osError=None, string=""):
self.osError = osError
Exception.__init__(self, string)
def __str__(self):
s = self.__doc__ or self.__class__.__name__
if self.osError:
s = '%s: %s' % (s, self.osError)
if self.args[0]:
s = '%s: %s' % (s, self.args[0])
s = '%s.' % s
return s
class ConnectBindError(ConnectError):
"""Couldn't bind"""
class UnknownHostError(ConnectError):
"""Hostname couldn't be looked up"""
class NoRouteError(ConnectError):
"""No route to host"""
class ConnectionRefusedError(ConnectError):
"""Connection was refused by other side"""
class TCPTimedOutError(ConnectError):
"""TCP connection timed out"""
class BadFileError(ConnectError):
"""File used for UNIX socket is no good"""
class ServiceNameUnknownError(ConnectError):
"""Service name given as port is unknown"""
class UserError(ConnectError):
"""User aborted connection"""
class TimeoutError(UserError):
"""User timeout caused connection failure"""
class SSLError(ConnectError):
"""An SSL error occurred"""
class VerifyError(Exception):
"""Could not verify something that was supposed to be signed.
"""
class PeerVerifyError(VerifyError):
"""The peer rejected our verify error.
"""
class CertificateError(Exception):
"""
We did not find a certificate where we expected to find one.
"""
try:
import errno
errnoMapping = {
errno.ENETUNREACH: NoRouteError,
errno.ECONNREFUSED: ConnectionRefusedError,
errno.ETIMEDOUT: TCPTimedOutError,
}
if hasattr(errno, "WSAECONNREFUSED"):
errnoMapping[errno.WSAECONNREFUSED] = ConnectionRefusedError
errnoMapping[errno.WSAENETUNREACH] = NoRouteError
except ImportError:
errnoMapping = {}
def getConnectError(e):
"""Given a socket exception, return connection error."""
if isinstance(e, Exception):
args = e.args
else:
args = e
try:
number, string = args
except ValueError:
return ConnectError(string=e)
if hasattr(socket, 'gaierror') and isinstance(e, socket.gaierror):
# Only works in 2.2 in newer. Really that means always; #5978 covers
# this and other weirdnesses in this function.
klass = UnknownHostError
else:
klass = errnoMapping.get(number, ConnectError)
return klass(number, string)
class ConnectionClosed(Exception):
"""
Connection was closed, whether cleanly or non-cleanly.
"""
class ConnectionLost(ConnectionClosed):
"""Connection to the other side was lost in a non-clean fashion"""
def __str__(self):
s = self.__doc__.strip().splitlines()[0]
if self.args:
s = '%s: %s' % (s, ' '.join(self.args))
s = '%s.' % s
return s
class ConnectionAborted(ConnectionLost):
"""
Connection was aborted locally, using
L{twisted.internet.interfaces.ITCPTransport.abortConnection}.
@since: 11.1
"""
class ConnectionDone(ConnectionClosed):
"""Connection was closed cleanly"""
def __str__(self):
s = self.__doc__
if self.args:
s = '%s: %s' % (s, ' '.join(self.args))
s = '%s.' % s
return s
class FileDescriptorOverrun(ConnectionLost):
"""
A mis-use of L{IUNIXTransport.sendFileDescriptor} caused the connection to
be closed.
Each file descriptor sent using C{sendFileDescriptor} must be associated
with at least one byte sent using L{ITransport.write}. If at any point
fewer bytes have been written than file descriptors have been sent, the
connection is closed with this exception.
"""
class ConnectionFdescWentAway(ConnectionLost):
"""Uh""" #TODO
class AlreadyCalled(ValueError):
"""Tried to cancel an already-called event"""
def __str__(self):
s = self.__doc__
if self.args:
s = '%s: %s' % (s, ' '.join(self.args))
s = '%s.' % s
return s
class AlreadyCancelled(ValueError):
"""Tried to cancel an already-cancelled event"""
def __str__(self):
s = self.__doc__
if self.args:
s = '%s: %s' % (s, ' '.join(self.args))
s = '%s.' % s
return s
class PotentialZombieWarning(Warning):
"""
Emitted when L{IReactorProcess.spawnProcess} is called in a way which may
result in termination of the created child process not being reported.
Deprecated in Twisted 10.0.
"""
MESSAGE = (
"spawnProcess called, but the SIGCHLD handler is not "
"installed. This probably means you have not yet "
"called reactor.run, or called "
"reactor.run(installSignalHandler=0). You will probably "
"never see this process finish, and it may become a "
"zombie process.")
deprecate.deprecatedModuleAttribute(
Version("Twisted", 10, 0, 0),
"There is no longer any potential for zombie process.",
__name__,
"PotentialZombieWarning")
class ProcessDone(ConnectionDone):
"""A process has ended without apparent errors"""
def __init__(self, status):
Exception.__init__(self, "process finished with exit code 0")
self.exitCode = 0
self.signal = None
self.status = status
class ProcessTerminated(ConnectionLost):
"""
A process has ended with a probable error condition
@ivar exitCode: See L{__init__}
@ivar signal: See L{__init__}
@ivar status: See L{__init__}
"""
def __init__(self, exitCode=None, signal=None, status=None):
"""
@param exitCode: The exit status of the process. This is roughly like
the value you might pass to L{os.exit}. This is L{None} if the
process exited due to a signal.
@type exitCode: L{int} or L{None}
@param signal: The exit signal of the process. This is L{None} if the
process did not exit due to a signal.
@type signal: L{int} or L{None}
@param status: The exit code of the process. This is a platform
specific combination of the exit code and the exit signal. See
L{os.WIFEXITED} and related functions.
@type status: L{int}
"""
self.exitCode = exitCode
self.signal = signal
self.status = status
s = "process ended"
if exitCode is not None: s = s + " with exit code %s" % exitCode
if signal is not None: s = s + " by signal %s" % signal
Exception.__init__(self, s)
class ProcessExitedAlready(Exception):
"""
The process has already exited and the operation requested can no longer
be performed.
"""
class NotConnectingError(RuntimeError):
"""The Connector was not connecting when it was asked to stop connecting"""
def __str__(self):
s = self.__doc__
if self.args:
s = '%s: %s' % (s, ' '.join(self.args))
s = '%s.' % s
return s
class NotListeningError(RuntimeError):
"""The Port was not listening when it was asked to stop listening"""
def __str__(self):
s = self.__doc__
if self.args:
s = '%s: %s' % (s, ' '.join(self.args))
s = '%s.' % s
return s
class ReactorNotRunning(RuntimeError):
"""
Error raised when trying to stop a reactor which is not running.
"""
class ReactorNotRestartable(RuntimeError):
"""
Error raised when trying to run a reactor which was stopped.
"""
class ReactorAlreadyRunning(RuntimeError):
"""
Error raised when trying to start the reactor multiple times.
"""
class ReactorAlreadyInstalledError(AssertionError):
"""
Could not install reactor because one is already installed.
"""
class ConnectingCancelledError(Exception):
"""
An C{Exception} that will be raised when an L{IStreamClientEndpoint} is
cancelled before it connects.
@ivar address: The L{IAddress} that is the destination of the
cancelled L{IStreamClientEndpoint}.
"""
def __init__(self, address):
"""
@param address: The L{IAddress} that is the destination of the
L{IStreamClientEndpoint} that was cancelled.
"""
Exception.__init__(self, address)
self.address = address
class NoProtocol(Exception):
"""
An C{Exception} that will be raised when the factory given to a
L{IStreamClientEndpoint} returns L{None} from C{buildProtocol}.
"""
class UnsupportedAddressFamily(Exception):
"""
An attempt was made to use a socket with an address family (eg I{AF_INET},
I{AF_INET6}, etc) which is not supported by the reactor.
"""
class UnsupportedSocketType(Exception):
"""
An attempt was made to use a socket of a type (eg I{SOCK_STREAM},
I{SOCK_DGRAM}, etc) which is not supported by the reactor.
"""
class AlreadyListened(Exception):
"""
An attempt was made to listen on a file descriptor which can only be
listened on once.
"""
class InvalidAddressError(ValueError):
"""
An invalid address was specified (i.e. neither IPv4 or IPv6, or expected
one and got the other).
@ivar address: See L{__init__}
@ivar message: See L{__init__}
"""
def __init__(self, address, message):
"""
@param address: The address that was provided.
@type address: L{bytes}
@param message: A native string of additional information provided by
the calling context.
@type address: L{str}
"""
self.address = address
self.message = message
__all__ = [
'BindError', 'CannotListenError', 'MulticastJoinError',
'MessageLengthError', 'DNSLookupError', 'ConnectInProgressError',
'ConnectError', 'ConnectBindError', 'UnknownHostError', 'NoRouteError',
'ConnectionRefusedError', 'TCPTimedOutError', 'BadFileError',
'ServiceNameUnknownError', 'UserError', 'TimeoutError', 'SSLError',
'VerifyError', 'PeerVerifyError', 'CertificateError',
'getConnectError', 'ConnectionClosed', 'ConnectionLost',
'ConnectionDone', 'ConnectionFdescWentAway', 'AlreadyCalled',
'AlreadyCancelled', 'PotentialZombieWarning', 'ProcessDone',
'ProcessTerminated', 'ProcessExitedAlready', 'NotConnectingError',
'NotListeningError', 'ReactorNotRunning', 'ReactorAlreadyRunning',
'ReactorAlreadyInstalledError', 'ConnectingCancelledError',
'UnsupportedAddressFamily', 'UnsupportedSocketType', 'InvalidAddressError']
|
bsd-3-clause
|
tux-00/ansible
|
lib/ansible/modules/cloud/centurylink/clc_group.py
|
70
|
17220
|
#!/usr/bin/python
#
# Copyright (c) 2015 CenturyLink
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: clc_group
short_description: Create/delete Server Groups at Centurylink Cloud
description:
- Create or delete Server Groups at Centurylink Centurylink Cloud
version_added: "2.0"
options:
name:
description:
- The name of the Server Group
required: True
description:
description:
- A description of the Server Group
required: False
parent:
description:
- The parent group of the server group. If parent is not provided, it creates the group at top level.
required: False
location:
description:
- Datacenter to create the group in. If location is not provided, the group gets created in the default datacenter
associated with the account
required: False
state:
description:
- Whether to create or delete the group
default: present
choices: ['present', 'absent']
wait:
description:
- Whether to wait for the tasks to finish before returning.
choices: [ True, False ]
default: True
required: False
requirements:
- python = 2.7
- requests >= 2.5.0
- clc-sdk
author: "CLC Runner (@clc-runner)"
notes:
- To use this module, it is required to set the below environment variables which enables access to the
Centurylink Cloud
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
'''
EXAMPLES = '''
# Create a Server Group
---
- name: Create Server Group
hosts: localhost
gather_facts: False
connection: local
tasks:
- name: Create / Verify a Server Group at CenturyLink Cloud
clc_group:
name: My Cool Server Group
parent: Default Group
state: present
register: clc
- name: debug
debug:
var: clc
# Delete a Server Group
---
- name: Delete Server Group
hosts: localhost
gather_facts: False
connection: local
tasks:
- name: Delete / Verify Absent a Server Group at CenturyLink Cloud
clc_group:
name: My Cool Server Group
parent: Default Group
state: absent
register: clc
- name: debug
debug:
var: clc
'''
RETURN = '''
group:
description: The group information
returned: success
type: dict
sample:
{
"changeInfo":{
"createdBy":"service.wfad",
"createdDate":"2015-07-29T18:52:47Z",
"modifiedBy":"service.wfad",
"modifiedDate":"2015-07-29T18:52:47Z"
},
"customFields":[
],
"description":"test group",
"groups":[
],
"id":"bb5f12a3c6044ae4ad0a03e73ae12cd1",
"links":[
{
"href":"/v2/groups/wfad",
"rel":"createGroup",
"verbs":[
"POST"
]
},
{
"href":"/v2/servers/wfad",
"rel":"createServer",
"verbs":[
"POST"
]
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1",
"rel":"self",
"verbs":[
"GET",
"PATCH",
"DELETE"
]
},
{
"href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0",
"id":"086ac1dfe0b6411989e8d1b77c4065f0",
"rel":"parentGroup"
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/defaults",
"rel":"defaults",
"verbs":[
"GET",
"POST"
]
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/billing",
"rel":"billing"
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/archive",
"rel":"archiveGroupAction"
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/statistics",
"rel":"statistics"
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/upcomingScheduledActivities",
"rel":"upcomingScheduledActivities"
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/horizontalAutoscalePolicy",
"rel":"horizontalAutoscalePolicyMapping",
"verbs":[
"GET",
"PUT",
"DELETE"
]
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/scheduledActivities",
"rel":"scheduledActivities",
"verbs":[
"GET",
"POST"
]
}
],
"locationId":"UC1",
"name":"test group",
"status":"active",
"type":"default"
}
'''
__version__ = '${version}'
import os
from distutils.version import LooseVersion
try:
import requests
except ImportError:
REQUESTS_FOUND = False
else:
REQUESTS_FOUND = True
#
# Requires the clc-python-sdk.
# sudo pip install clc-sdk
#
try:
import clc as clc_sdk
from clc import CLCException
except ImportError:
CLC_FOUND = False
clc_sdk = None
else:
CLC_FOUND = True
from ansible.module_utils.basic import AnsibleModule
class ClcGroup(object):
clc = None
root_group = None
def __init__(self, module):
"""
Construct module
"""
self.clc = clc_sdk
self.module = module
self.group_dict = {}
if not CLC_FOUND:
self.module.fail_json(
msg='clc-python-sdk required for this module')
if not REQUESTS_FOUND:
self.module.fail_json(
msg='requests library is required for this module')
if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
self.module.fail_json(
msg='requests library version should be >= 2.5.0')
self._set_user_agent(self.clc)
def process_request(self):
"""
Execute the main code path, and handle the request
:return: none
"""
location = self.module.params.get('location')
group_name = self.module.params.get('name')
parent_name = self.module.params.get('parent')
group_description = self.module.params.get('description')
state = self.module.params.get('state')
self._set_clc_credentials_from_env()
self.group_dict = self._get_group_tree_for_datacenter(
datacenter=location)
if state == "absent":
changed, group, requests = self._ensure_group_is_absent(
group_name=group_name, parent_name=parent_name)
if requests:
self._wait_for_requests_to_complete(requests)
else:
changed, group = self._ensure_group_is_present(
group_name=group_name, parent_name=parent_name, group_description=group_description)
try:
group = group.data
except AttributeError:
group = group_name
self.module.exit_json(changed=changed, group=group)
@staticmethod
def _define_module_argument_spec():
"""
Define the argument spec for the ansible module
:return: argument spec dictionary
"""
argument_spec = dict(
name=dict(required=True),
description=dict(default=None),
parent=dict(default=None),
location=dict(default=None),
state=dict(default='present', choices=['present', 'absent']),
wait=dict(type='bool', default=True))
return argument_spec
def _set_clc_credentials_from_env(self):
"""
Set the CLC Credentials on the sdk by reading environment variables
:return: none
"""
env = os.environ
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
clc_alias = env.get('CLC_ACCT_ALIAS', False)
api_url = env.get('CLC_V2_API_URL', False)
if api_url:
self.clc.defaults.ENDPOINT_URL_V2 = api_url
if v2_api_token and clc_alias:
self.clc._LOGIN_TOKEN_V2 = v2_api_token
self.clc._V2_ENABLED = True
self.clc.ALIAS = clc_alias
elif v2_api_username and v2_api_passwd:
self.clc.v2.SetCredentials(
api_username=v2_api_username,
api_passwd=v2_api_passwd)
else:
return self.module.fail_json(
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
"environment variables")
def _ensure_group_is_absent(self, group_name, parent_name):
"""
Ensure that group_name is absent by deleting it if necessary
:param group_name: string - the name of the clc server group to delete
:param parent_name: string - the name of the parent group for group_name
:return: changed, group
"""
changed = False
group = []
results = []
if self._group_exists(group_name=group_name, parent_name=parent_name):
if not self.module.check_mode:
group.append(group_name)
result = self._delete_group(group_name)
results.append(result)
changed = True
return changed, group, results
def _delete_group(self, group_name):
"""
Delete the provided server group
:param group_name: string - the server group to delete
:return: none
"""
response = None
group, parent = self.group_dict.get(group_name)
try:
response = group.Delete()
except CLCException as ex:
self.module.fail_json(msg='Failed to delete group :{0}. {1}'.format(
group_name, ex.response_text
))
return response
def _ensure_group_is_present(
self,
group_name,
parent_name,
group_description):
"""
Checks to see if a server group exists, creates it if it doesn't.
:param group_name: the name of the group to validate/create
:param parent_name: the name of the parent group for group_name
:param group_description: a short description of the server group (used when creating)
:return: (changed, group) -
changed: Boolean- whether a change was made,
group: A clc group object for the group
"""
assert self.root_group, "Implementation Error: Root Group not set"
parent = parent_name if parent_name is not None else self.root_group.name
description = group_description
changed = False
group = group_name
parent_exists = self._group_exists(group_name=parent, parent_name=None)
child_exists = self._group_exists(
group_name=group_name,
parent_name=parent)
if parent_exists and child_exists:
group, parent = self.group_dict[group_name]
changed = False
elif parent_exists and not child_exists:
if not self.module.check_mode:
group = self._create_group(
group=group,
parent=parent,
description=description)
changed = True
else:
self.module.fail_json(
msg="parent group: " +
parent +
" does not exist")
return changed, group
def _create_group(self, group, parent, description):
"""
Create the provided server group
:param group: clc_sdk.Group - the group to create
:param parent: clc_sdk.Parent - the parent group for {group}
:param description: string - a text description of the group
:return: clc_sdk.Group - the created group
"""
response = None
(parent, grandparent) = self.group_dict[parent]
try:
response = parent.Create(name=group, description=description)
except CLCException as ex:
self.module.fail_json(msg='Failed to create group :{0}. {1}'.format(
group, ex.response_text))
return response
def _group_exists(self, group_name, parent_name):
"""
Check to see if a group exists
:param group_name: string - the group to check
:param parent_name: string - the parent of group_name
:return: boolean - whether the group exists
"""
result = False
if group_name in self.group_dict:
(group, parent) = self.group_dict[group_name]
if parent_name is None or parent_name == parent.name:
result = True
return result
def _get_group_tree_for_datacenter(self, datacenter=None):
"""
Walk the tree of groups for a datacenter
:param datacenter: string - the datacenter to walk (ex: 'UC1')
:return: a dictionary of groups and parents
"""
self.root_group = self.clc.v2.Datacenter(
location=datacenter).RootGroup()
return self._walk_groups_recursive(
parent_group=None,
child_group=self.root_group)
def _walk_groups_recursive(self, parent_group, child_group):
"""
Walk a parent-child tree of groups, starting with the provided child group
:param parent_group: clc_sdk.Group - the parent group to start the walk
:param child_group: clc_sdk.Group - the child group to start the walk
:return: a dictionary of groups and parents
"""
result = {str(child_group): (child_group, parent_group)}
groups = child_group.Subgroups().groups
if len(groups) > 0:
for group in groups:
if group.type != 'default':
continue
result.update(self._walk_groups_recursive(child_group, group))
return result
def _wait_for_requests_to_complete(self, requests_lst):
"""
Waits until the CLC requests are complete if the wait argument is True
:param requests_lst: The list of CLC request objects
:return: none
"""
if not self.module.params['wait']:
return
for request in requests_lst:
request.WaitUntilComplete()
for request_details in request.requests:
if request_details.Status() != 'succeeded':
self.module.fail_json(
msg='Unable to process group request')
@staticmethod
def _set_user_agent(clc):
if hasattr(clc, 'SetRequestsSession'):
agent_string = "ClcAnsibleModule/" + __version__
ses = requests.Session()
ses.headers.update({"Api-Client": agent_string})
ses.headers['User-Agent'] += " " + agent_string
clc.SetRequestsSession(ses)
def main():
"""
The main function. Instantiates the module and calls process_request.
:return: none
"""
module = AnsibleModule(
argument_spec=ClcGroup._define_module_argument_spec(),
supports_check_mode=True)
clc_group = ClcGroup(module)
clc_group.process_request()
if __name__ == '__main__':
main()
|
gpl-3.0
|
xevernever/attrition
|
attrition/attrition_problem.py
|
1
|
1089
|
""""Goal 0: Tee Up Project; Why Do We Care?"""
import pandas as pd
import matplotlib.pyplot as plt
## Import Data ##
df = load_data() # df = pd.read_csv('HR_comma_sep.csv')
#df.head()
#df.dtypes
def display_attrition_level():
""" DV1: Left-or-Stayed (binary variable); Display Attrition Level """
df = load_data()
(stayed_count, left_count) = pd.value_counts(df['left'])
attrition_percentage = round(100 * left_count/(left_count+stayed_count),1)
print("Percentage of employees that have left: {0}%".format(attrition_percentage))
# def separation(var='left'):
# df = load_data();
# if var == 'left':
# return df.query('left==0')
# elif var == 'right':
# return df.query('left==1')
# else:
# return False
def display_histogram_tenure():
""" DV2: Tenure (Years with Company); Distribution """
#%matplotlib inline
df = load_data()
df.time_spend_company.hist()
#df.time_spend_company.plot.hist()
plt.title('Histogram of Tenure With Company')
plt.xlabel('Years With Company')
plt.ylabel('Frequency')
|
bsd-3-clause
|
groschovskiy/lerigos_music
|
Server/API/lib/google/protobuf/internal/test_bad_identifiers_pb2.py
|
43
|
5759
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/protobuf/internal/test_bad_identifiers.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import service as _service
from google.protobuf import service_reflection
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/protobuf/internal/test_bad_identifiers.proto',
package='protobuf_unittest',
syntax='proto2',
serialized_pb=_b('\n3google/protobuf/internal/test_bad_identifiers.proto\x12\x11protobuf_unittest\"\x1e\n\x12TestBadIdentifiers*\x08\x08\x64\x10\x80\x80\x80\x80\x02\"\x10\n\x0e\x41notherMessage2\x10\n\x0e\x41notherService:;\n\x07message\x12%.protobuf_unittest.TestBadIdentifiers\x18\x64 \x01(\t:\x03\x66oo:>\n\ndescriptor\x12%.protobuf_unittest.TestBadIdentifiers\x18\x65 \x01(\t:\x03\x62\x61r:>\n\nreflection\x12%.protobuf_unittest.TestBadIdentifiers\x18\x66 \x01(\t:\x03\x62\x61z:;\n\x07service\x12%.protobuf_unittest.TestBadIdentifiers\x18g \x01(\t:\x03quxB\x03\x90\x01\x01')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
MESSAGE_FIELD_NUMBER = 100
message = _descriptor.FieldDescriptor(
name='message', full_name='protobuf_unittest.message', index=0,
number=100, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=_b("foo").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None)
DESCRIPTOR_FIELD_NUMBER = 101
descriptor = _descriptor.FieldDescriptor(
name='descriptor', full_name='protobuf_unittest.descriptor', index=1,
number=101, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=_b("bar").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None)
REFLECTION_FIELD_NUMBER = 102
reflection = _descriptor.FieldDescriptor(
name='reflection', full_name='protobuf_unittest.reflection', index=2,
number=102, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=_b("baz").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None)
SERVICE_FIELD_NUMBER = 103
service = _descriptor.FieldDescriptor(
name='service', full_name='protobuf_unittest.service', index=3,
number=103, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=_b("qux").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None)
_TESTBADIDENTIFIERS = _descriptor.Descriptor(
name='TestBadIdentifiers',
full_name='protobuf_unittest.TestBadIdentifiers',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(100, 536870912), ],
oneofs=[
],
serialized_start=74,
serialized_end=104,
)
_ANOTHERMESSAGE = _descriptor.Descriptor(
name='AnotherMessage',
full_name='protobuf_unittest.AnotherMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=106,
serialized_end=122,
)
DESCRIPTOR.message_types_by_name['TestBadIdentifiers'] = _TESTBADIDENTIFIERS
DESCRIPTOR.message_types_by_name['AnotherMessage'] = _ANOTHERMESSAGE
DESCRIPTOR.extensions_by_name['message'] = message
DESCRIPTOR.extensions_by_name['descriptor'] = descriptor
DESCRIPTOR.extensions_by_name['reflection'] = reflection
DESCRIPTOR.extensions_by_name['service'] = service
TestBadIdentifiers = _reflection.GeneratedProtocolMessageType('TestBadIdentifiers', (_message.Message,), dict(
DESCRIPTOR = _TESTBADIDENTIFIERS,
__module__ = 'google.protobuf.internal.test_bad_identifiers_pb2'
# @@protoc_insertion_point(class_scope:protobuf_unittest.TestBadIdentifiers)
))
_sym_db.RegisterMessage(TestBadIdentifiers)
AnotherMessage = _reflection.GeneratedProtocolMessageType('AnotherMessage', (_message.Message,), dict(
DESCRIPTOR = _ANOTHERMESSAGE,
__module__ = 'google.protobuf.internal.test_bad_identifiers_pb2'
# @@protoc_insertion_point(class_scope:protobuf_unittest.AnotherMessage)
))
_sym_db.RegisterMessage(AnotherMessage)
TestBadIdentifiers.RegisterExtension(message)
TestBadIdentifiers.RegisterExtension(descriptor)
TestBadIdentifiers.RegisterExtension(reflection)
TestBadIdentifiers.RegisterExtension(service)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\220\001\001'))
_ANOTHERSERVICE = _descriptor.ServiceDescriptor(
name='AnotherService',
full_name='protobuf_unittest.AnotherService',
file=DESCRIPTOR,
index=0,
options=None,
serialized_start=124,
serialized_end=140,
methods=[
])
AnotherService = service_reflection.GeneratedServiceType('AnotherService', (_service.Service,), dict(
DESCRIPTOR = _ANOTHERSERVICE,
__module__ = 'google.protobuf.internal.test_bad_identifiers_pb2'
))
AnotherService_Stub = service_reflection.GeneratedServiceStubType('AnotherService_Stub', (AnotherService,), dict(
DESCRIPTOR = _ANOTHERSERVICE,
__module__ = 'google.protobuf.internal.test_bad_identifiers_pb2'
))
# @@protoc_insertion_point(module_scope)
|
apache-2.0
|
nilbody/h2o-3
|
h2o-docs-theme/demo_docs/source/conf.py
|
32
|
8221
|
# -*- coding: utf-8 -*-
#
# Sphinx RTD theme demo documentation build configuration file, created by
# sphinx-quickstart on Sun Nov 3 11:56:36 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
sys.path.append(os.path.abspath('.'))
sys.path.append(os.path.abspath('./test_py_module'))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
]
# Math
mathjax_path = "http://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'H<sub>2</sub>O Documentation'
copyright = u'2013, 0xdata, Inc.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1'
# The full version, including alpha/beta/rc tags.
release = '1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
# 'sticky_navigation' : True # Set to False to disable the sticky nav while scrolling.
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["../.."]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'SphinxRTDthemedemodoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'SphinxRTDthemedemo.tex', u'Sphinx RTD theme demo Documentation',
u'Dave Snider', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'sphinxrtdthemedemo', u'Sphinx RTD theme demo Documentation',
[u'Dave Snider'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'SphinxRTDthemedemo', u'Sphinx RTD theme demo Documentation',
u'Dave Snider', 'SphinxRTDthemedemo', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
apache-2.0
|
sudosurootdev/external_chromium_org
|
native_client_sdk/src/build_tools/tests/sdktools_commands_test.py
|
76
|
18779
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import re
import tarfile
import tempfile
import unittest
from sdktools_test import SdkToolsTestCase
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
BUILD_TOOLS_DIR = os.path.dirname(SCRIPT_DIR)
TOOLS_DIR = os.path.join(os.path.dirname(BUILD_TOOLS_DIR), 'tools')
sys.path.extend([BUILD_TOOLS_DIR, TOOLS_DIR])
import manifest_util
import oshelpers
class TestCommands(SdkToolsTestCase):
def setUp(self):
self.SetupDefault()
def _AddDummyBundle(self, manifest, bundle_name):
bundle = manifest_util.Bundle(bundle_name)
bundle.revision = 1337
bundle.version = 23
bundle.description = bundle_name
bundle.stability = 'beta'
bundle.recommended = 'no'
bundle.repath = bundle_name
archive = self._MakeDummyArchive(bundle_name)
bundle.AddArchive(archive)
manifest.SetBundle(bundle)
# Need to get the bundle from the manifest -- it doesn't use the one we
# gave it.
return manifest.GetBundle(bundle_name)
def _MakeDummyArchive(self, bundle_name, tarname=None, filename='dummy.txt'):
tarname = (tarname or bundle_name) + '.tar.bz2'
temp_dir = tempfile.mkdtemp(prefix='archive')
try:
dummy_path = os.path.join(temp_dir, filename)
with open(dummy_path, 'w') as stream:
stream.write('Dummy stuff for %s' % bundle_name)
# Build the tarfile directly into the server's directory.
tar_path = os.path.join(self.basedir, tarname)
tarstream = tarfile.open(tar_path, 'w:bz2')
try:
tarstream.add(dummy_path, os.path.join(bundle_name, filename))
finally:
tarstream.close()
with open(tar_path, 'rb') as archive_stream:
sha1, size = manifest_util.DownloadAndComputeHash(archive_stream)
archive = manifest_util.Archive(manifest_util.GetHostOS())
archive.url = self.server.GetURL(os.path.basename(tar_path))
archive.size = size
archive.checksum = sha1
return archive
finally:
oshelpers.Remove(['-rf', temp_dir])
def testInfoBasic(self):
"""The info command should display information about the given bundle."""
self._WriteManifest()
output = self._Run(['info', 'sdk_tools'])
# Make sure basic information is there
bundle = self.manifest.GetBundle('sdk_tools')
archive = bundle.GetHostOSArchive();
self.assertTrue(bundle.name in output)
self.assertTrue(bundle.description in output)
self.assertTrue(str(bundle.revision) in output)
self.assertTrue(str(archive.size) in output)
self.assertTrue(archive.checksum in output)
self.assertTrue(bundle.stability in output)
def testInfoUnknownBundle(self):
"""The info command should notify the user of unknown bundles."""
self._WriteManifest()
bogus_bundle = 'foobar'
output = self._Run(['info', bogus_bundle])
self.assertTrue(re.search(r'[uU]nknown', output))
self.assertTrue(bogus_bundle in output)
def testInfoMultipleBundles(self):
"""The info command should support listing multiple bundles."""
self._AddDummyBundle(self.manifest, 'pepper_23')
self._AddDummyBundle(self.manifest, 'pepper_24')
self._WriteManifest()
output = self._Run(['info', 'pepper_23', 'pepper_24'])
self.assertTrue('pepper_23' in output)
self.assertTrue('pepper_24' in output)
self.assertFalse(re.search(r'[uU]nknown', output))
def testInfoMultipleArchives(self):
"""The info command should display multiple archives."""
bundle = self._AddDummyBundle(self.manifest, 'pepper_26')
archive2 = self._MakeDummyArchive('pepper_26', tarname='pepper_26_more',
filename='dummy2.txt')
archive2.host_os = 'all'
bundle.AddArchive(archive2)
self._WriteManifest()
output = self._Run(['info', 'pepper_26'])
self.assertTrue('pepper_26' in output)
self.assertTrue('pepper_26_more' in output)
def testListBasic(self):
"""The list command should display basic information about remote
bundles."""
self._WriteManifest()
output = self._Run(['list'])
self.assertTrue(re.search('I.*?sdk_tools.*?stable', output, re.MULTILINE))
# This line is important (it's used by the updater to determine if the
# sdk_tools bundle needs to be updated), so let's be explicit.
self.assertTrue('All installed bundles are up-to-date.')
def testListMultiple(self):
"""The list command should display multiple bundles."""
self._AddDummyBundle(self.manifest, 'pepper_23')
self._WriteManifest()
output = self._Run(['list'])
# Added pepper_23 to the remote manifest not the local manifest, so it
# shouldn't be installed.
self.assertTrue(re.search('^[^I]*pepper_23', output, re.MULTILINE))
self.assertTrue('sdk_tools' in output)
def testListWithRevision(self):
"""The list command should display the revision, if desired."""
self._AddDummyBundle(self.manifest, 'pepper_23')
self._WriteManifest()
output = self._Run(['list', '-r'])
self.assertTrue(re.search('pepper_23.*?r1337', output))
def testListWithUpdatedRevision(self):
"""The list command should display when there is an update available."""
p23bundle = self._AddDummyBundle(self.manifest, 'pepper_23')
self._WriteCacheManifest(self.manifest)
# Modify the remote manifest to have a newer revision.
p23bundle.revision += 1
self._WriteManifest()
output = self._Run(['list', '-r'])
# We should see a display like this: I* pepper_23 (r1337 -> r1338)
# The star indicates the bundle has an update.
self.assertTrue(re.search('I\*\s+pepper_23.*?r1337.*?r1338', output))
def testListLocalVersionNotOnRemote(self):
"""The list command should tell the user if they have a bundle installed
that doesn't exist in the remote manifest."""
self._WriteManifest()
p23bundle = self._AddDummyBundle(self.manifest, 'pepper_23')
self._WriteCacheManifest(self.manifest)
output = self._Run(['list', '-r'])
message = 'Bundles installed locally that are not available remotely:'
message_loc = output.find(message)
self.assertNotEqual(message_loc, -1)
# Make sure pepper_23 is listed after the message above.
self.assertTrue('pepper_23' in output[message_loc:])
def testSources(self):
"""The sources command should allow adding/listing/removing of sources.
When a source is added, it will provide an additional set of bundles."""
other_manifest = manifest_util.SDKManifest()
self._AddDummyBundle(other_manifest, 'naclmono_23')
with open(os.path.join(self.basedir, 'source.json'), 'w') as stream:
stream.write(other_manifest.GetDataAsString())
source_json_url = self.server.GetURL('source.json')
self._WriteManifest()
output = self._Run(['sources', '--list'])
self.assertTrue('No external sources installed.' in output)
output = self._Run(['sources', '--add', source_json_url])
output = self._Run(['sources', '--list'])
self.assertTrue(source_json_url in output)
# Should be able to get info about that bundle.
output = self._Run(['info', 'naclmono_23'])
self.assertTrue('Unknown bundle' not in output)
self._Run(['sources', '--remove', source_json_url])
output = self._Run(['sources', '--list'])
self.assertTrue('No external sources installed.' in output)
def testUpdateBasic(self):
"""The update command should install the contents of a bundle to the SDK."""
self._AddDummyBundle(self.manifest, 'pepper_23')
self._WriteManifest()
self._Run(['update', 'pepper_23'])
self.assertTrue(os.path.exists(
os.path.join(self.basedir, 'nacl_sdk', 'pepper_23', 'dummy.txt')))
def testUpdateInCacheButDirectoryRemoved(self):
"""The update command should update if the bundle directory does not exist,
even if the bundle is already in the cache manifest."""
self._AddDummyBundle(self.manifest, 'pepper_23')
self._WriteCacheManifest(self.manifest)
self._WriteManifest()
self._Run(['update', 'pepper_23'])
self.assertTrue(os.path.exists(
os.path.join(self.basedir, 'nacl_sdk', 'pepper_23', 'dummy.txt')))
def testUpdateNoNewVersion(self):
"""The update command should do nothing if the bundle is already up-to-date.
"""
self._AddDummyBundle(self.manifest, 'pepper_23')
self._WriteManifest()
self._Run(['update', 'pepper_23'])
output = self._Run(['update', 'pepper_23'])
self.assertTrue('is already up-to-date.' in output)
def testUpdateWithNewVersion(self):
"""The update command should update to a new version if it exists."""
bundle = self._AddDummyBundle(self.manifest, 'pepper_23')
self._WriteManifest()
self._Run(['update', 'pepper_23'])
bundle.revision += 1
self._WriteManifest()
output = self._Run(['update', 'pepper_23'])
self.assertTrue('already exists, but has an update available' in output)
# Now update using --force.
output = self._Run(['update', 'pepper_23', '--force'])
self.assertTrue('Updating bundle' in output)
cache_manifest = self._ReadCacheManifest()
num_archives = len(cache_manifest.GetBundle('pepper_23').GetArchives())
self.assertEqual(num_archives, 1)
def testUpdateUnknownBundles(self):
"""The update command should ignore unknown bundles and notify the user."""
self._WriteManifest()
output = self._Run(['update', 'foobar'])
self.assertTrue('unknown bundle' in output)
def testUpdateRecommended(self):
"""The update command should update only recommended bundles when run
without args.
"""
bundle_25 = self._AddDummyBundle(self.manifest, 'pepper_25')
bundle_25.recommended = 'no'
bundle_26 = self._AddDummyBundle(self.manifest, 'pepper_26')
bundle_26.recommended = 'yes'
self._WriteManifest()
output = self._Run(['update'])
# Should not try to update sdk_tools (even though it is recommended)
self.assertTrue('Ignoring manual update request.' not in output)
self.assertFalse(os.path.exists(
os.path.join(self.basedir, 'nacl_sdk', 'pepper_25')))
self.assertTrue(os.path.exists(
os.path.join(self.basedir, 'nacl_sdk', 'pepper_26', 'dummy.txt')))
def testUpdateCanary(self):
"""The update command should create the correct directory name for repath'd
bundles.
"""
bundle = self._AddDummyBundle(self.manifest, 'pepper_26')
bundle.name = 'pepper_canary'
self._WriteManifest()
output = self._Run(['update', 'pepper_canary'])
self.assertTrue(os.path.exists(
os.path.join(self.basedir, 'nacl_sdk', 'pepper_canary', 'dummy.txt')))
def testUpdateMultiArchive(self):
"""The update command should include download/untar multiple archives
specified in the bundle.
"""
bundle = self._AddDummyBundle(self.manifest, 'pepper_26')
archive2 = self._MakeDummyArchive('pepper_26', tarname='pepper_26_more',
filename='dummy2.txt')
archive2.host_os = 'all'
bundle.AddArchive(archive2)
self._WriteManifest()
output = self._Run(['update', 'pepper_26'])
self.assertTrue(os.path.exists(
os.path.join(self.basedir, 'nacl_sdk', 'pepper_26', 'dummy.txt')))
self.assertTrue(os.path.exists(
os.path.join(self.basedir, 'nacl_sdk', 'pepper_26', 'dummy2.txt')))
def testUpdateBadSize(self):
"""If an archive has a bad size, print an error.
"""
bundle = self._AddDummyBundle(self.manifest, 'pepper_26')
archive = bundle.GetHostOSArchive();
archive.size = -1
self._WriteManifest()
stdout = self._Run(['update', 'pepper_26'], expect_error=True)
self.assertTrue('Size mismatch' in stdout)
def testUpdateBadSHA(self):
"""If an archive has a bad SHA, print an error.
"""
bundle = self._AddDummyBundle(self.manifest, 'pepper_26')
archive = bundle.GetHostOSArchive();
archive.checksum = 0
self._WriteManifest()
stdout = self._Run(['update', 'pepper_26'], expect_error=True)
self.assertTrue('SHA1 checksum mismatch' in stdout)
def testUninstall(self):
"""The uninstall command should remove the installed bundle, if it
exists.
"""
# First install the bundle.
self._AddDummyBundle(self.manifest, 'pepper_23')
self._WriteManifest()
output = self._Run(['update', 'pepper_23'])
self.assertTrue(os.path.exists(
os.path.join(self.basedir, 'nacl_sdk', 'pepper_23', 'dummy.txt')))
# Now remove it.
self._Run(['uninstall', 'pepper_23'])
self.assertFalse(os.path.exists(
os.path.join(self.basedir, 'nacl_sdk', 'pepper_23')))
# The bundle should not be marked as installed.
output = self._Run(['list'])
self.assertTrue(re.search('^[^I]*pepper_23', output, re.MULTILINE))
def testReinstall(self):
"""The reinstall command should remove, then install, the specified
bundles.
"""
# First install the bundle.
self._AddDummyBundle(self.manifest, 'pepper_23')
self._WriteManifest()
output = self._Run(['update', 'pepper_23'])
dummy_txt = os.path.join(self.basedir, 'nacl_sdk', 'pepper_23', 'dummy.txt')
self.assertTrue(os.path.exists(dummy_txt))
with open(dummy_txt) as f:
self.assertEqual(f.read(), 'Dummy stuff for pepper_23')
# Change some files.
foo_txt = os.path.join(self.basedir, 'nacl_sdk', 'pepper_23', 'foo.txt')
with open(foo_txt, 'w') as f:
f.write('Another dummy file. This one is not part of the bundle.')
with open(dummy_txt, 'w') as f:
f.write('changed dummy.txt')
# Reinstall the bundle.
self._Run(['reinstall', 'pepper_23'])
self.assertFalse(os.path.exists(foo_txt))
self.assertTrue(os.path.exists(dummy_txt))
with open(dummy_txt) as f:
self.assertEqual(f.read(), 'Dummy stuff for pepper_23')
cache_manifest = self._ReadCacheManifest()
num_archives = len(cache_manifest.GetBundle('pepper_23').GetArchives())
self.assertEqual(num_archives, 1)
def testReinstallWithDuplicatedArchives(self):
"""The reinstall command should only use the most recent archive if there
are duplicated archives.
NOTE: There was a bug where the sdk_cache/naclsdk_manifest2.json file was
duplicating archives from different revisions. Make sure that reinstall
ignores old archives in the bundle.
"""
# First install the bundle.
self._AddDummyBundle(self.manifest, 'pepper_23')
self._WriteManifest()
self._Run(['update', 'pepper_23'])
manifest = self._ReadCacheManifest()
bundle = manifest.GetBundle('pepper_23')
self.assertEqual(len(bundle.GetArchives()), 1)
# Now add a bogus duplicate archive
archive2 = self._MakeDummyArchive('pepper_23', tarname='pepper_23',
filename='dummy2.txt')
bundle.AddArchive(archive2)
self._WriteCacheManifest(manifest)
output = self._Run(['reinstall', 'pepper_23'])
# When updating just one file, there is no (file 1/2 - "...") output.
self.assertFalse('file 1/' in output)
# Should be using the last archive.
self.assertFalse(os.path.exists(
os.path.join(self.basedir, 'nacl_sdk', 'pepper_23', 'dummy.txt')))
self.assertTrue(os.path.exists(
os.path.join(self.basedir, 'nacl_sdk', 'pepper_23', 'dummy2.txt')))
def testReinstallDoesntUpdate(self):
"""The reinstall command should not update a bundle that has an update."""
# First install the bundle.
bundle = self._AddDummyBundle(self.manifest, 'pepper_23')
self._WriteManifest()
self._Run(['update', 'pepper_23'])
dummy_txt = os.path.join(self.basedir, 'nacl_sdk', 'pepper_23', 'dummy.txt')
self.assertTrue(os.path.exists(dummy_txt))
with open(dummy_txt) as f:
self.assertEqual(f.read(), 'Dummy stuff for pepper_23')
# Update the revision.
bundle.revision += 1
self._WriteManifest()
# Change the file.
foo_txt = os.path.join(self.basedir, 'nacl_sdk', 'pepper_23', 'foo.txt')
with open(dummy_txt, 'w') as f:
f.write('changed dummy.txt')
# Reinstall.
self._Run(['reinstall', 'pepper_23'])
# The data has been reinstalled.
self.assertTrue(os.path.exists(dummy_txt))
with open(dummy_txt) as f:
self.assertEqual(f.read(), 'Dummy stuff for pepper_23')
# ... but the version hasn't been updated.
output = self._Run(['list', '-r'])
self.assertTrue(re.search('I\*\s+pepper_23.*?r1337.*?r1338', output))
def testArchiveCacheBasic(self):
"""Downloaded archives should be stored in the cache by default."""
self._AddDummyBundle(self.manifest, 'pepper_23')
self._WriteManifest()
self._Run(['update', 'pepper_23'])
archive_cache = os.path.join(self.cache_dir, 'archives')
cache_contents = os.listdir(archive_cache)
self.assertEqual(cache_contents, ['pepper_23'])
cache_contents = os.listdir(os.path.join(archive_cache, 'pepper_23'))
self.assertEqual(cache_contents, ['pepper_23.tar.bz2'])
def testArchiveCacheEviction(self):
archive_cache = os.path.join(self.cache_dir, 'archives')
self._AddDummyBundle(self.manifest, 'pepper_23')
self._AddDummyBundle(self.manifest, 'pepper_22')
self._WriteManifest()
# First install pepper_23
self._Run(['update', 'pepper_23'])
archive = os.path.join(archive_cache, 'pepper_23', 'pepper_23.tar.bz2')
archive_size = os.path.getsize(archive)
# Set the mtime on the pepper_23 bundle to be a few seconds in the past.
# This is needed so that the two bundles don't end up with the same
# timestamp which can happen on systems that don't report sub-second
# timestamps.
atime = os.path.getatime(archive)
mtime = os.path.getmtime(archive)
os.utime(archive, (atime, mtime-10))
# Set cache limit to size of pepper archive * 1.5
self._WriteConfig('{ "cache_max": %d }' % int(archive_size * 1.5))
# Now install pepper_22, which should cause pepper_23 to be evicted
self._Run(['update', 'pepper_22'])
cache_contents = os.listdir(archive_cache)
self.assertEqual(cache_contents, ['pepper_22'])
def testArchiveCacheZero(self):
"""Archives should not be cached when cache_max is zero."""
self._AddDummyBundle(self.manifest, 'pepper_23')
self._WriteConfig('{ "cache_max": 0 }')
self._AddDummyBundle(self.manifest, 'pepper_23')
self._WriteManifest()
self._Run(['update', 'pepper_23'])
archive_cache = os.path.join(self.cache_dir, 'archives')
# Archive folder should be completely remove by cache cleanup
self.assertFalse(os.path.exists(archive_cache))
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
ishanic/scikit-learn
|
examples/classification/plot_digits_classification.py
|
289
|
2397
|
"""
================================
Recognizing hand-written digits
================================
An example showing how the scikit-learn can be used to recognize images of
hand-written digits.
This example is commented in the
:ref:`tutorial section of the user manual <introduction>`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics
# The digits dataset
digits = datasets.load_digits()
# The data that we are interested in is made of 8x8 images of digits, let's
# have a look at the first 3 images, stored in the `images` attribute of the
# dataset. If we were working from image files, we could load them using
# pylab.imread. Note that each image must have the same size. For these
# images, we know which digit they represent: it is given in the 'target' of
# the dataset.
images_and_labels = list(zip(digits.images, digits.target))
for index, (image, label) in enumerate(images_and_labels[:4]):
plt.subplot(2, 4, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Training: %i' % label)
# To apply a classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
# Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=0.001)
# We learn the digits on the first half of the digits
classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])
# Now predict the value of the digit on the second half:
expected = digits.target[n_samples / 2:]
predicted = classifier.predict(data[n_samples / 2:])
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
images_and_predictions = list(zip(digits.images[n_samples / 2:], predicted))
for index, (image, prediction) in enumerate(images_and_predictions[:4]):
plt.subplot(2, 4, index + 5)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Prediction: %i' % prediction)
plt.show()
|
bsd-3-clause
|
geoscixyz/em_examples
|
em_examples/InductionSphereTEM.py
|
1
|
19333
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter, FormatStrFormatter
from matplotlib.path import Path
import matplotlib.patches as patches
##############################################
# PLOTTING FUNCTIONS FOR WIDGETS
##############################################
def fcn_TDEM_InductionSpherePlaneWidget(xtx,ytx,ztx,m,orient,x0,y0,z0,a,sig,mur,xrx,yrx,zrx,logt,Comp,Type):
sig = 10**sig
t = 10**logt
if Type == 'B':
Type = 'b'
elif Type == 'dB/dt':
Type = 'dbdt'
tvec = np.logspace(-6,0,31)
xmin, xmax, dx, ymin, ymax, dy = -30., 30., 0.3, -30., 30., 0.4
X,Y = np.mgrid[xmin:xmax+dx:dx, ymin:ymax+dy:dy]
X = np.transpose(X)
Y = np.transpose(Y)
Obj = SphereTEM(m,orient,xtx,ytx,ztx)
Bx,By,Bz,Babs = Obj.fcn_ComputeTimeResponse(t,sig,mur,a,x0,y0,z0,X,Y,zrx,Type)
Bxi,Byi,Bzi,Babsi = Obj.fcn_ComputeTimeResponse(tvec,sig,mur,a,x0,y0,z0,xrx,yrx,zrx,Type)
fig1 = plt.figure(figsize=(17,6))
Ax1 = fig1.add_axes([0.04,0,0.43,1])
Ax2 = fig1.add_axes([0.6,0,0.4,1])
if Comp == 'x':
Ax1 = plotAnomalyXYplane(Ax1,t,X,Y,ztx,Bx,Comp,Type)
Ax1 = plotPlaceTxRxSphereXY(Ax1,xtx,ytx,xrx,yrx,x0,y0,a)
Ax2 = plotResponseTEM(Ax2,t,tvec,Bxi,Comp,Type)
elif Comp == 'y':
Ax1 = plotAnomalyXYplane(Ax1,t,X,Y,ztx,By,Comp,Type)
Ax1 = plotPlaceTxRxSphereXY(Ax1,xtx,ytx,xrx,yrx,x0,y0,a)
Ax2 = plotResponseTEM(Ax2,t,tvec,Byi,Comp,Type)
elif Comp == 'z':
Ax1 = plotAnomalyXYplane(Ax1,t,X,Y,ztx,Bz,Comp,Type)
Ax1 = plotPlaceTxRxSphereXY(Ax1,xtx,ytx,xrx,yrx,x0,y0,a)
Ax2 = plotResponseTEM(Ax2,t,tvec,Bzi,Comp,Type)
elif Comp == 'abs':
Ax1 = plotAnomalyXYplane(Ax1,t,X,Y,ztx,Babs,Comp,Type)
Ax1 = plotPlaceTxRxSphereXY(Ax1,xtx,ytx,xrx,yrx,x0,y0,a)
Ax2 = plotResponseTEM(Ax2,t,tvec,Babsi,Comp,Type)
plt.show(fig1)
def fcn_TDEM_InductionSphereProfileWidget(xtx,ztx,m,orient,x0,z0,a,sig,mur,xrx,zrx,logt,Flag):
sig = 10**sig
t = 10**logt
if orient == "Vert. Coaxial":
orient = 'x'
elif orient == "Horiz. Coplanar":
orient = 'z'
if Flag == 'dBs/dt':
Type = 'dbdt'
else:
Type = 'b'
# Same global functions can be used but with ytx, y0, yrx, Y = 0.
tvec = np.logspace(-6,0,31)
xmin, xmax, dx, zmin, zmax, dz = -30., 30., 0.3, -40., 20., 0.4
X,Z = np.mgrid[xmin:xmax+dx:dx, zmin:zmax+dz:dz]
X = np.transpose(X)
Z = np.transpose(Z)
Obj = SphereTEM(m,orient,xtx,0.,ztx)
Bxi,Byi,Bzi,Babsi = Obj.fcn_ComputeTimeResponse(tvec,sig,mur,a,x0,0.,z0,xrx,0.,zrx,Type)
Hxt,Hyt,Hzt = fcn_ComputePrimary(m,orient,xtx,0.,ztx,x0,0.,z0)
fig1 = plt.figure(figsize=(17,6))
Ax1 = fig1.add_axes([0.04,0,0.38,1])
Ax2 = fig1.add_axes([0.6,0,0.4,1])
Ax1 = plotProfileTxRxSphere(Ax1,xtx,ztx,x0,z0,a,xrx,zrx,X,Z,orient)
if Flag == 'Bp':
Hpx,Hpy,Hpz = fcn_ComputePrimary(m,orient,xtx,0.,ztx,X,0.,Z)
Ax1 = plotProfileTxRxArrow(Ax1,x0,z0,Hxt,Hzt,Flag)
Ax1 = plotProfileXZplane(Ax1,X,Z,Hpx,Hpz,Flag)
elif Flag == 'Bs':
Bx,By,Bz,Babs = Obj.fcn_ComputeTimeResponse(t,sig,mur,a,x0,0.,z0,X,0.,Z,Type)
Chi = fcn_ComputeExcitation_TEM(t,sig,mur,a)
Ax1 = plotProfileTxRxArrow(Ax1,x0,z0,Chi*Hxt,Chi*Hzt,Type)
Ax1 = plotProfileXZplane(Ax1,X,Z,Bx,Bz,Flag)
elif Flag == 'dBs/dt':
Bx,By,Bz,Babs = Obj.fcn_ComputeTimeResponse(t,sig,mur,a,x0,0.,z0,X,0.,Z,Type)
Chi = fcn_ComputeExcitation_TEM(t,sig,mur,a)
Ax1 = plotProfileTxRxArrow(Ax1,x0,z0,Chi*Hxt,Chi*Hzt,Type)
Ax1 = plotProfileXZplane(Ax1,X,Z,Bx,Bz,Flag)
if (orient == 'x') & (Flag == 'Bp'):
Type = 'b'
Ax2 = plotResponseTEM(Ax2,t,tvec,Bxi,orient,Type)
elif (orient == 'z') & (Flag == 'Bp'):
Type = 'b'
Ax2 = plotResponseTEM(Ax2,t,tvec,Bzi,orient,Type)
elif (orient == 'x') & (Flag == 'Bs'):
Type = 'b'
Ax2 = plotResponseTEM(Ax2,t,tvec,Bxi,orient,Type)
elif (orient == 'z') & (Flag == 'Bs'):
Type = 'b'
Ax2 = plotResponseTEM(Ax2,t,tvec,Bzi,orient,Type)
elif (orient == 'x') & (Flag == 'dBs/dt'):
Type = 'dbdt'
Ax2 = plotResponseTEM(Ax2,t,tvec,Bxi,orient,Type)
elif (orient == 'z') & (Flag == 'dBs/dt'):
Type = 'dbdt'
Ax2 = plotResponseTEM(Ax2,t,tvec,Bzi,orient,Type)
plt.show(fig1)
def fcn_TDEM_InductionSphereProfileEM61Widget(xtx,ztx,L,m,orient,x0,z0,a,sig,mur,logt,Flag):
xtx = xtx - L/2
xrx = xtx + L
zrx = ztx
sig = 10**sig
t = 10**logt
if orient == "Vert. Coaxial":
orient = 'x'
elif orient == "Horiz. Coplanar":
orient = 'z'
if Flag == 'dBs/dt':
Type = 'dbdt'
else:
Type = 'b'
# Same global functions can be used but with ytx, y0, yrx, Y = 0.
tvec = np.logspace(-6,0,31)
xmin, xmax, dx, zmin, zmax, dz = -30., 30., 0.3, -40., 20., 0.4
X,Z = np.mgrid[xmin:xmax+dx:dx, zmin:zmax+dz:dz]
X = np.transpose(X)
Z = np.transpose(Z)
Obj = SphereTEM(m,orient,xtx,0.,ztx)
Bxi,Byi,Bzi,Babsi = Obj.fcn_ComputeTimeResponse(tvec,sig,mur,a,x0,0.,z0,xrx,0.,zrx,Type)
Hxt,Hyt,Hzt = fcn_ComputePrimary(m,orient,xtx,0.,ztx,x0,0.,z0)
fig1 = plt.figure(figsize=(17,6))
Ax1 = fig1.add_axes([0.04,0,0.38,1])
Ax2 = fig1.add_axes([0.6,0,0.4,1])
Ax1 = plotProfileTxRxSphere(Ax1,xtx,ztx,x0,z0,a,xrx,zrx,X,Z,orient)
if Flag == 'Bp':
Hpx,Hpy,Hpz = fcn_ComputePrimary(m,orient,xtx,0.,ztx,X,0.,Z)
Ax1 = plotProfileTxRxArrow(Ax1,x0,z0,Hxt,Hzt,Flag)
Ax1 = plotProfileXZplane(Ax1,X,Z,Hpx,Hpz,Flag)
elif Flag == 'Bs':
Bx,By,Bz,Babs = Obj.fcn_ComputeTimeResponse(t,sig,mur,a,x0,0.,z0,X,0.,Z,'b')
Chi = fcn_ComputeExcitation_TEM(t,sig,mur,a,Type)
Ax1 = plotProfileTxRxArrow(Ax1,x0,z0,Chi*Hxt,Chi*Hzt,Flag)
Ax1 = plotProfileXZplane(Ax1,X,Z,Bx,Bz,Flag)
elif Flag == 'dBs/dt':
Bx,By,Bz,Babs = Obj.fcn_ComputeTimeResponse(t,sig,mur,a,x0,0.,z0,X,0.,Z,'dbdt')
Chi = fcn_ComputeExcitation_TEM(t,sig,mur,a,Type)
Ax1 = plotProfileTxRxArrow(Ax1,x0,z0,Chi*Hxt,Chi*Hzt,Flag)
Ax1 = plotProfileXZplane(Ax1,X,Z,Bx,Bz,Flag)
if (orient == 'x') & (Flag == 'Bp'):
Type = 'b'
Ax2 = plotResponseTEM(Ax2,t,tvec,Bxi,orient,Type)
elif (orient == 'z') & (Flag == 'Bp'):
Type = 'b'
Ax2 = plotResponseTEM(Ax2,t,tvec,Bzi,orient,Type)
elif (orient == 'x') & (Flag == 'Bs'):
Type = 'b'
Ax2 = plotResponseTEM(Ax2,t,tvec,Bxi,orient,Type)
elif (orient == 'z') & (Flag == 'Bs'):
Type = 'b'
Ax2 = plotResponseTEM(Ax2,t,tvec,Bzi,orient,Type)
elif (orient == 'x') & (Flag == 'dBs/dt'):
Type = 'dbdt'
Ax2 = plotResponseTEM(Ax2,t,tvec,Bxi,orient,Type)
elif (orient == 'z') & (Flag == 'dBs/dt'):
Type = 'dbdt'
Ax2 = plotResponseTEM(Ax2,t,tvec,Bzi,orient,Type)
plt.show(fig1)
##############################################
# GLOBAL FUNTIONS
##############################################
def fcn_ComputeExcitation_TEM(t,sig,mur,a,Type):
"""Compute Excitation Factor (TEM)"""
beta = np.sqrt(4*np.pi*1e-7*sig) * a
N = 2000
nvec = np.linspace(1,N,N)
if mur < 1.01:
chi = np.zeros(np.size(t))
if Type == 'b':
if np.size(t) == 1:
SUM_1 = np.sum(np.exp(-(nvec*beta)**2/t))
SUM_2 = np.sum(nvec*sp.special.erfc(nvec*beta/np.sqrt(t)))
chi = (9/2)*(1/3 + t/beta**2 - (2/beta)*np.sqrt(t/np.pi)*(1 + 2*SUM_1) + 4*SUM_2)
else:
for tt in range(0,np.size(t)):
SUM_1 = np.sum(np.exp(-(nvec*beta)**2/t[tt]))
SUM_2 = np.sum(nvec*sp.special.erfc(nvec*beta/np.sqrt(t[tt])))
chi[tt] = (9/2)*(1/3 + t[tt]/beta**2 - (2/beta)*np.sqrt(t[tt]/np.pi)*(1 + 2*SUM_1) + 4*SUM_2)
elif Type == 'dbdt':
if np.size(t) == 1:
SUM = np.sum(np.exp(-(nvec*beta)**2/t))
chi = (9/2)*(1/beta**2 - (1/(beta*np.sqrt(np.pi*t)))*(1 + 2*SUM))
else:
for tt in range(0,np.size(t)):
SUM = np.sum(np.exp(-(nvec*beta)**2/t[tt]))
chi[tt] = (9/2)*(1/beta**2 - (1/(beta*np.sqrt(np.pi*t[tt])))*(1 + 2*SUM))
else:
N = 2000 # Coefficients
eta = np.pi * (np.linspace(1,N,N) + 1/4)
eta0 = np.pi * np.linspace(1,N,N)
# Converge eta coefficients
for pp in range (0,10):
eta = eta0 + np.arctan((mur - 1)*eta/(mur - 1 + eta**2))
chi = np.zeros(np.size(t))
# Get Excitation Factor
if Type == 'b':
if np.size(t) == 1:
chi = (9*mur)*np.sum( np.exp(-t*(eta/beta)**2)/((mur+2)*(mur-1) + eta**2) )
else:
for tt in range(0,np.size(t)):
chi[tt] = (9*mur)*np.sum( np.exp(-t[tt]*(eta/beta)**2)/((mur+2)*(mur-1) + eta**2) )
elif Type == 'dbdt':
if np.size(t) == 1:
chi = -(9*mur)*np.sum( eta**2*np.exp(-t*(eta/beta)**2)/(beta**2*((mur+2)*(mu-1) + eta**2)) )
else:
for tt in range(0,np.size(t)):
chi[tt] = -(9*mur)*np.sum( eta**2*np.exp(-t[tt]*(eta/beta)**2)/(beta**2*((mur+2)*(mu-1) + eta**2)) )
return chi
def fcn_ComputePrimary(m,orient,xtx,ytx,ztx,X,Y,Z):
"""Computes Inducing Field at Sphere"""
R = np.sqrt((X-xtx)**2 + (Y-ytx)**2 + (Z-ztx)**2)
if orient == "x":
Hpx = (1/(4*np.pi))*(3*m*(X-xtx)*(X-xtx)/R**5 - m/R**3)
Hpy = (1/(4*np.pi))*(3*m*(Y-ytx)*(X-xtx)/R**5)
Hpz = (1/(4*np.pi))*(3*m*(Z-ztx)*(X-xtx)/R**5)
elif orient == "y":
Hpx = (1/(4*np.pi))*(3*m*(X-xtx)*(Y-ytx)/R**5)
Hpy = (1/(4*np.pi))*(3*m*(Y-ytx)*(Y-ytx)/R**5 - m/R**3)
Hpz = (1/(4*np.pi))*(3*m*(Z-ztx)*(Y-ytx)/R**5)
elif orient == "z":
Hpx = (1/(4*np.pi))*(3*m*(X-xtx)*(Z-ztx)/R**5)
Hpy = (1/(4*np.pi))*(3*m*(Y-ytx)*(Z-ztx)/R**5)
Hpz = (1/(4*np.pi))*(3*m*(Z-ztx)*(Z-ztx)/R**5 - m/R**3)
return Hpx, Hpy, Hpz
##############################################
# GLOBAL PLOTTING FUNTIONS
##############################################
def plotAnomalyXYplane(Ax,t,X,Y,Z,B,Comp,Type):
FS = 20
tol = 1e5
Sign = np.sign(B)
B = 1e9*np.abs(B) # convert to nT or nT/s
MAX = np.max(B)
B = np.log10(tol*B/MAX)
Sign[B<0] = 0.
B[B<0] = 0.
Cmap = 'RdYlBu'
#Cmap = 'seismic_r'
if Comp == 'abs':
TickLabels = MAX*np.array([1.,1e-1,1e-2,1e-3,1e-4,0.,-1e-4,-1e-3,-1e-2,-1e-1,-1])
TickLabels = ["%.1e" % x for x in TickLabels]
Cplot = Ax.contourf(X,Y,Sign*B,50,cmap=Cmap, vmin=-5, vmax=5)
cbar = plt.colorbar(Cplot, ax=Ax, pad=0.02, ticks=-np.linspace(-5,5,11))
else:
TickLabels = MAX*np.array([-1.,-1e-1,-1e-2,-1e-3,-1e-4,0.,1e-4,1e-3,1e-2,1e-1,1])
TickLabels = ["%.1e" % x for x in TickLabels]
Cplot = Ax.contourf(X,Y,Sign*B,50,cmap=Cmap, vmin=-5, vmax=5)
cbar = plt.colorbar(Cplot, ax=Ax, pad=0.02, ticks=np.linspace(-5,5,11))
if Comp == 'x' and Type == 'b':
cbar.set_label('[nT]', rotation=270, labelpad = 25, size=FS+4)
Ax.set_title("$\mathbf{Bx}$",fontsize=FS+6)
elif Comp == 'y' and Type == 'b':
cbar.set_label('[nT]', rotation=270, labelpad = 25, size=FS+4)
Ax.set_title("$\mathbf{By}$",fontsize=FS+6)
elif Comp == 'z' and Type == 'b':
cbar.set_label('[nT]', rotation=270, labelpad = 25, size=FS+4)
Ax.set_title("$\mathbf{Bz}$",fontsize=FS+6)
elif Comp == 'x' and Type == 'dbdt':
cbar.set_label('[nT/s]', rotation=270, labelpad = 25, size=FS+4)
Ax.set_title("$\mathbf{dBx/dt}$",fontsize=FS+6)
elif Comp == 'y' and Type == 'dbdt':
cbar.set_label('[nT/s]', rotation=270, labelpad = 25, size=FS+4)
Ax.set_title("$\mathbf{dBy/dt}$",fontsize=FS+6)
elif Comp == 'z' and Type == 'dbdt':
cbar.set_label('[nT/s]', rotation=270, labelpad = 25, size=FS+4)
Ax.set_title("$\mathbf{dBz/dt}$",fontsize=FS+6)
cbar.set_ticklabels(TickLabels)
cbar.ax.tick_params(labelsize=FS-2)
Ax.set_xbound(np.min(X),np.max(X))
Ax.set_ybound(np.min(Y),np.max(Y))
Ax.set_xlabel('X [m]',fontsize=FS+2)
Ax.set_ylabel('Y [m]',fontsize=FS+2,labelpad=-10)
Ax.tick_params(labelsize=FS-2)
return Ax
def plotPlaceTxRxSphereXY(Ax,xtx,ytx,xrx,yrx,x0,y0,a):
Xlim = Ax.get_xlim()
Ylim = Ax.get_ylim()
FS = 20
Ax.scatter(xtx,ytx,s=100,color='k')
Ax.text(xtx-0.75,ytx+1.5,'$\mathbf{Tx}$',fontsize=FS+6)
Ax.scatter(xrx,yrx,s=100,color='k')
Ax.text(xrx-0.75,yrx-4,'$\mathbf{Rx}$',fontsize=FS+6)
xs = x0 + a*np.cos(np.linspace(0,2*np.pi,41))
ys = y0 + a*np.sin(np.linspace(0,2*np.pi,41))
Ax.plot(xs,ys,ls=':',color='k',linewidth=3)
Ax.set_xbound(Xlim)
Ax.set_ybound(Ylim)
return Ax
def plotResponseTEM(Ax,ti,t,B,Comp,Type):
FS = 20
B = 1e9*np.abs(B) # turn to nT or nT/s and python can`t loglog negative values!
if Type == 'b':
Ylim = np.array([B[0]/1e3,B[0]])
elif Type == 'dbdt':
Ylim = np.array([B[0]/1e6,B[0]])
B[B < Ylim[0]] = 0.1*Ylim[0]
xTicks = (np.logspace(np.log(np.min(t)),np.log(np.max(t)),7))
Ax.grid('both', linestyle='-', linewidth=0.8, color=[0.8, 0.8, 0.8])
Ax.loglog(t,0*t,color='k',linewidth=2)
Ax.loglog(t,B,color='k',linewidth=4)
Ax.loglog(np.array([ti,ti]),1.1*Ylim,linewidth=3,color='r')
Ax.set_xbound(np.min(t),np.max(t))
Ax.set_ybound(1.1*Ylim)
Ax.set_xlabel('Times [s]',fontsize=FS+2)
Ax.tick_params(labelsize=FS-2)
Ax.yaxis.set_major_formatter(FormatStrFormatter('%.1e'))
if Comp == 'x' and Type == 'b':
Ax.set_ylabel('$\mathbf{|Bx|}$ [nT]',fontsize=FS+4,labelpad=-5)
Ax.set_title('$\mathbf{Bx}$ Response at $\mathbf{Rx}$',fontsize=FS+6)
elif Comp == 'z' and Type == 'b':
Ax.set_ylabel('$\mathbf{|Bz|}$ [nT]',fontsize=FS+4,labelpad=-5)
Ax.set_title('$\mathbf{Bz}$ Response at $\mathbf{Rx}$',fontsize=FS+6)
elif Comp == 'x' and Type == 'dbdt':
Ax.set_ylabel('$\mathbf{|dBx/dt|}$ [nT/s]',fontsize=FS+4,labelpad=-5)
Ax.set_title('$\mathbf{dBx/dt}$ Response at $\mathbf{Rx}$',fontsize=FS+6)
elif Comp == 'z' and Type == 'dbdt':
Ax.set_ylabel('$\mathbf{|dBz/dt|}$ [nT/s]',fontsize=FS+4,labelpad=-5)
Ax.set_title('$\mathbf{dBz/dt}$ Response at $\mathbf{Rx}$',fontsize=FS+6)
return Ax
def plotProfileTxRxSphere(Ax,xtx,ztx,x0,z0,a,xrx,zrx,X,Z,orient):
FS = 22
phi = np.linspace(0,2*np.pi,41)
psi = np.linspace(0,np.pi,21)
if orient == 'x':
Xtx = xtx + 0.5*np.cos(phi)
Ztx = ztx + 2*np.sin(phi)
Xrx = xrx + 0.5*np.cos(phi)
Zrx = zrx + 2*np.sin(phi)
elif orient == 'z':
Xtx = xtx + 2*np.cos(phi)
Ztx = ztx + 0.5*np.sin(phi)
Xrx = xrx + 2*np.cos(phi)
Zrx = zrx + 0.5*np.sin(phi)
# Xs = x0 + a*np.cos(psi)
# Zs1 = z0 + a*np.sin(psi)
# Zs2 = z0 - a*np.sin(psi)
XS = x0 + a*np.cos(phi)
ZS = z0 + a*np.sin(phi)
Ax.fill_between(np.array([np.min(X),np.max(X)]),np.array([0.,0.]),np.array([np.max(Z),np.max(Z)]),facecolor=(0.9,0.9,0.9))
Ax.fill_between(np.array([np.min(X),np.max(X)]),np.array([0.,0.]),np.array([np.min(Z),np.min(Z)]),facecolor=(0.6,0.6,0.6),linewidth=2)
# Ax.fill_between(Xs,Zs1,Zs2,facecolor=(0.4,0.4,0.4),linewidth=4)
polyObj = plt.Polygon(np.c_[XS,ZS],closed=True,facecolor=((0.4,0.4,0.4)),edgecolor='k',linewidth=2)
Ax.add_patch(polyObj)
Ax.plot(Xtx,Ztx,'k',linewidth=4)
Ax.plot(Xrx,Zrx,'k',linewidth=4)
# Ax.plot(x0+a*np.cos(phi),z0+a*np.sin(phi),'k',linewidth=2)
Ax.set_xbound(np.min(X),np.max(X))
Ax.set_ybound(np.min(Z),np.max(Z))
Ax.text(xtx-4,ztx+2,'$\mathbf{Tx}$',fontsize=FS)
Ax.text(xrx,zrx+2,'$\mathbf{Rx}$',fontsize=FS)
return Ax
def plotProfileXZplane(Ax,X,Z,Bx,Bz,Flag):
FS = 20
if Flag == 'Bp':
Ax.streamplot(X,Z,Bx,Bz,color='b',linewidth=3.5,arrowsize=2)
Ax.set_title('Primary Field',fontsize=FS+6)
elif Flag == 'Bs':
Ax.streamplot(X,Z,Bx,Bz,color='r',linewidth=3.5,arrowsize=2)
Ax.set_title('Secondary Field',fontsize=FS+6)
elif Flag == 'dBs/dt':
Ax.streamplot(X,Z,Bx,Bz,color='r',linewidth=3.5,arrowsize=2)
Ax.set_title('Secondary Time Derivative',fontsize=FS+6)
Ax.set_xbound(np.min(X),np.max(X))
Ax.set_ybound(np.min(Z),np.max(Z))
Ax.set_xlabel('X [m]',fontsize=FS+2)
Ax.set_ylabel('Z [m]',fontsize=FS+2,labelpad=-10)
Ax.tick_params(labelsize=FS-2)
def plotProfileTxRxArrow(Ax,x0,z0,Bxt,Bzt,Flag):
Babst = np.sqrt(Bxt**2 + Bzt**2)
dx = Bxt/Babst
dz = Bzt/Babst
if Flag == 'Bp':
Ax.arrow(x0-2.5*dx, z0-2.75*dz, 3*dx, 3*dz, fc=(0.,0.,0.8), ec="k",head_width=2.5, head_length=2.5,width=1,linewidth=2)
elif Flag == 'Bs':
Ax.arrow(x0-2.5*dx, z0-2.75*dz, 3*dx, 3*dz, fc=(0.8,0.,0.), ec="k",head_width=2.5, head_length=2.5,width=1,linewidth=2)
elif Flag == 'dBs/dt':
Ax.arrow(x0-2.5*dx, z0-2.75*dz, 3*dx, 3*dz, fc=(0.8,0.,0.), ec="k",head_width=2.5, head_length=2.5,width=1,linewidth=2)
return Ax
############################################
# CLASS: SPHERE TOP VIEW
############################################
############################################
# DEFINE CLASS
class SphereTEM():
"""Fucntionwhcihdf
Input variables:
Output variables:
"""
def __init__(self,m,orient,xtx,ytx,ztx):
"""Defines Initial Attributes"""
# INITIALIZES OBJECT
# m: Transmitter dipole moment
# orient: Transmitter dipole orentation 'x', 'y' or 'z'
# xtx: Transmitter x location
# ytx: Transmitter y location
# ztx: Transmitter z location
self.m = m
self.orient = orient
self.xtx = xtx
self.ytx = ytx
self.ztx = ztx
############################################
# DEFINE METHODS
def fcn_ComputeTimeResponse(self,t,sig,mur,a,x0,y0,z0,X,Y,Z,Type):
"""Compute Single Frequency Response at (X,Y,Z) in T or T/s"""
m = self.m
orient = self.orient
xtx = self.xtx
ytx = self.ytx
ztx = self.ztx
chi = fcn_ComputeExcitation_TEM(t,sig,mur,a,Type)
Hpx,Hpy,Hpz = fcn_ComputePrimary(m,orient,xtx,ytx,ztx,x0,y0,z0)
mx = 4*np.pi*a**3*chi*Hpx/3
my = 4*np.pi*a**3*chi*Hpy/3
mz = 4*np.pi*a**3*chi*Hpz/3
R = np.sqrt((X-x0)**2 + (Y-y0)**2 + (Z-z0)**2)
Bx = (1e-9)*(3*(X-x0)*(mx*(X-x0) + my*(Y-y0) + mz*(Z-z0))/R**5 - mx/R**3)
By = (1e-9)*(3*(Y-y0)*(mx*(X-x0) + my*(Y-y0) + mz*(Z-z0))/R**5 - my/R**3)
Bz = (1e-9)*(3*(Z-z0)*(mx*(X-x0) + my*(Y-y0) + mz*(Z-z0))/R**5 - mz/R**3)
Babs = np.sqrt(Bx**2 + By**2 + Bz**2)
return Bx, By, Bz, Babs
|
mit
|
chdecultot/frappe
|
frappe/email/doctype/newsletter/test_newsletter.py
|
9
|
2953
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, unittest
from frappe.email.doctype.newsletter.newsletter import confirmed_unsubscribe
from six.moves.urllib.parse import unquote
emails = ["[email protected]", "[email protected]",
"[email protected]", "[email protected]"]
class TestNewsletter(unittest.TestCase):
def setUp(self):
frappe.set_user("Administrator")
frappe.db.sql('delete from `tabEmail Group Member`')
for email in emails:
frappe.get_doc({
"doctype": "Email Group Member",
"email": email,
"email_group": "_Test Email Group"
}).insert()
def test_send(self):
name = self.send_newsletter()
email_queue_list = [frappe.get_doc('Email Queue', e.name) for e in frappe.get_all("Email Queue")]
self.assertEqual(len(email_queue_list), 4)
recipients = [e.recipients[0].recipient for e in email_queue_list]
for email in emails:
self.assertTrue(email in recipients)
def test_unsubscribe(self):
# test unsubscribe
name = self.send_newsletter()
from frappe.email.queue import flush
flush(from_test=True)
to_unsubscribe = unquote(frappe.local.flags.signed_query_string.split("email=")[1].split("&")[0])
confirmed_unsubscribe(to_unsubscribe, name)
name = self.send_newsletter()
email_queue_list = [frappe.get_doc('Email Queue', e.name) for e in frappe.get_all("Email Queue")]
self.assertEqual(len(email_queue_list), 3)
recipients = [e.recipients[0].recipient for e in email_queue_list]
for email in emails:
if email != to_unsubscribe:
self.assertTrue(email in recipients)
@staticmethod
def send_newsletter(published=0):
frappe.db.sql("delete from `tabEmail Queue`")
frappe.db.sql("delete from `tabEmail Queue Recipient`")
frappe.db.sql("delete from `tabNewsletter`")
newsletter = frappe.get_doc({
"doctype": "Newsletter",
"subject": "_Test Newsletter",
"send_from": "Test Sender <[email protected]>",
"message": "Testing my news.",
"published": published
}).insert(ignore_permissions=True)
newsletter.append("email_group", {"email_group": "_Test Email Group"})
newsletter.save()
newsletter.send_emails()
return newsletter.name
def test_portal(self):
self.send_newsletter(1)
frappe.set_user("[email protected]")
from frappe.email.doctype.newsletter.newsletter import get_newsletter_list
newsletters = get_newsletter_list("Newsletter", None, None, 0)
self.assertEqual(len(newsletters), 1)
def test_newsletter_context(self):
context = frappe._dict()
newsletter_name = self.send_newsletter(1)
frappe.set_user("[email protected]")
doc = frappe.get_doc("Newsletter", newsletter_name)
doc.get_context(context)
self.assertEqual(context.no_cache, 1)
self.assertTrue("attachments" not in list(context))
test_dependencies = ["Email Group"]
|
mit
|
wilebeast/FireFox-OS
|
B2G/gecko/testing/mochitest/pywebsocket/mod_pywebsocket/headerparserhandler.py
|
36
|
9460
|
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""PythonHeaderParserHandler for mod_pywebsocket.
Apache HTTP Server and mod_python must be configured such that this
function is called to handle WebSocket request.
"""
import logging
from mod_python import apache
from mod_pywebsocket import common
from mod_pywebsocket import dispatch
from mod_pywebsocket import handshake
from mod_pywebsocket import util
# PythonOption to specify the handler root directory.
_PYOPT_HANDLER_ROOT = 'mod_pywebsocket.handler_root'
# PythonOption to specify the handler scan directory.
# This must be a directory under the root directory.
# The default is the root directory.
_PYOPT_HANDLER_SCAN = 'mod_pywebsocket.handler_scan'
# PythonOption to allow handlers whose canonical path is
# not under the root directory. It's disallowed by default.
# Set this option with value of 'yes' to allow.
_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT = (
'mod_pywebsocket.allow_handlers_outside_root_dir')
# Map from values to their meanings. 'Yes' and 'No' are allowed just for
# compatibility.
_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT_DEFINITION = {
'off': False, 'no': False, 'on': True, 'yes': True}
# PythonOption to specify to allow draft75 handshake.
# The default is None (Off)
_PYOPT_ALLOW_DRAFT75 = 'mod_pywebsocket.allow_draft75'
# Map from values to their meanings.
_PYOPT_ALLOW_DRAFT75_DEFINITION = {'off': False, 'on': True}
class ApacheLogHandler(logging.Handler):
"""Wrapper logging.Handler to emit log message to apache's error.log."""
_LEVELS = {
logging.DEBUG: apache.APLOG_DEBUG,
logging.INFO: apache.APLOG_INFO,
logging.WARNING: apache.APLOG_WARNING,
logging.ERROR: apache.APLOG_ERR,
logging.CRITICAL: apache.APLOG_CRIT,
}
def __init__(self, request=None):
logging.Handler.__init__(self)
self._log_error = apache.log_error
if request is not None:
self._log_error = request.log_error
# Time and level will be printed by Apache.
self._formatter = logging.Formatter('%(name)s: %(message)s')
def emit(self, record):
apache_level = apache.APLOG_DEBUG
if record.levelno in ApacheLogHandler._LEVELS:
apache_level = ApacheLogHandler._LEVELS[record.levelno]
msg = self._formatter.format(record)
# "server" parameter must be passed to have "level" parameter work.
# If only "level" parameter is passed, nothing shows up on Apache's
# log. However, at this point, we cannot get the server object of the
# virtual host which will process WebSocket requests. The only server
# object we can get here is apache.main_server. But Wherever (server
# configuration context or virtual host context) we put
# PythonHeaderParserHandler directive, apache.main_server just points
# the main server instance (not any of virtual server instance). Then,
# Apache follows LogLevel directive in the server configuration context
# to filter logs. So, we need to specify LogLevel in the server
# configuration context. Even if we specify "LogLevel debug" in the
# virtual host context which actually handles WebSocket connections,
# DEBUG level logs never show up unless "LogLevel debug" is specified
# in the server configuration context.
#
# TODO(tyoshino): Provide logging methods on request object. When
# request is mp_request object (when used together with Apache), the
# methods call request.log_error indirectly. When request is
# _StandaloneRequest, the methods call Python's logging facility which
# we create in standalone.py.
self._log_error(msg, apache_level, apache.main_server)
def _configure_logging():
logger = logging.getLogger()
# Logs are filtered by Apache based on LogLevel directive in Apache
# configuration file. We must just pass logs for all levels to
# ApacheLogHandler.
logger.setLevel(logging.DEBUG)
logger.addHandler(ApacheLogHandler())
_configure_logging()
_LOGGER = logging.getLogger(__name__)
def _parse_option(name, value, definition):
if value is None:
return False
meaning = definition.get(value.lower())
if meaning is None:
raise Exception('Invalid value for PythonOption %s: %r' %
(name, value))
return meaning
def _create_dispatcher():
_LOGGER.info('Initializing Dispatcher')
options = apache.main_server.get_options()
handler_root = options.get(_PYOPT_HANDLER_ROOT, None)
if not handler_root:
raise Exception('PythonOption %s is not defined' % _PYOPT_HANDLER_ROOT,
apache.APLOG_ERR)
handler_scan = options.get(_PYOPT_HANDLER_SCAN, handler_root)
allow_handlers_outside_root = _parse_option(
_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT,
options.get(_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT),
_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT_DEFINITION)
dispatcher = dispatch.Dispatcher(
handler_root, handler_scan, allow_handlers_outside_root)
for warning in dispatcher.source_warnings():
apache.log_error('mod_pywebsocket: %s' % warning, apache.APLOG_WARNING)
return dispatcher
# Initialize
_dispatcher = _create_dispatcher()
def headerparserhandler(request):
"""Handle request.
Args:
request: mod_python request.
This function is named headerparserhandler because it is the default
name for a PythonHeaderParserHandler.
"""
handshake_is_done = False
try:
# Fallback to default http handler for request paths for which
# we don't have request handlers.
if not _dispatcher.get_handler_suite(request.uri):
request.log_error('No handler for resource: %r' % request.uri,
apache.APLOG_INFO)
request.log_error('Fallback to Apache', apache.APLOG_INFO)
return apache.DECLINED
except dispatch.DispatchException, e:
request.log_error('mod_pywebsocket: %s' % e, apache.APLOG_INFO)
if not handshake_is_done:
return e.status
try:
allow_draft75 = _parse_option(
_PYOPT_ALLOW_DRAFT75,
apache.main_server.get_options().get(_PYOPT_ALLOW_DRAFT75),
_PYOPT_ALLOW_DRAFT75_DEFINITION)
try:
handshake.do_handshake(
request, _dispatcher, allowDraft75=allow_draft75)
except handshake.VersionException, e:
request.log_error('mod_pywebsocket: %s' % e, apache.APLOG_INFO)
request.err_headers_out.add(common.SEC_WEBSOCKET_VERSION_HEADER,
e.supported_versions)
return apache.HTTP_BAD_REQUEST
except handshake.HandshakeException, e:
# Handshake for ws/wss failed.
# Send http response with error status.
request.log_error('mod_pywebsocket: %s' % e, apache.APLOG_INFO)
return e.status
handshake_is_done = True
request._dispatcher = _dispatcher
_dispatcher.transfer_data(request)
except handshake.AbortedByUserException, e:
request.log_error('mod_pywebsocket: %s' % e, apache.APLOG_INFO)
except Exception, e:
# DispatchException can also be thrown if something is wrong in
# pywebsocket code. It's caught here, then.
request.log_error('mod_pywebsocket: %s\n%s' %
(e, util.get_stack_trace()),
apache.APLOG_ERR)
# Unknown exceptions before handshake mean Apache must handle its
# request with another handler.
if not handshake_is_done:
return apache.DECLINED
# Set assbackwards to suppress response header generation by Apache.
request.assbackwards = 1
return apache.DONE # Return DONE such that no other handlers are invoked.
# vi:sts=4 sw=4 et
|
apache-2.0
|
Tche333/Hello_World
|
plugin.video.belgium/tvcom.py
|
1
|
1850
|
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
import re
import channel
class Channel(channel.Channel):
def get_main_url(self):
return 'http://www.tvcom.be'
def get_categories(self):
data = channel.get_url(self.main_url)
regex = r"""missions(.+?)</ul"""
res = re.findall(regex, data)
if not res:
return
cat_data = res[0]
regex = r"""<a href="([^"]+)"><span>([^<]+)"""
for url, name in re.findall(regex, cat_data):
channel.addDir(name, self.icon, channel_id=self.channel_id, url=url, action='show_videos')
def get_videos(self, datas):
url = datas.get('url')
data = channel.get_url(self.main_url + url)
regex = r"""class="contentheading"[^>]+>([^<]+)</td>\s+</tr>\s+</table>\s+<table[^>]+>\s+<tr>\s+<td[^>]+>\s+<p><a href="([^"]+)[^>]+><img.+? src="([^"]+)"""
for title, vurl, img in re.findall(regex, data):
title = title.strip()
vurl = channel.array2url(channel_id=self.channel_id, url=vurl, action='play_video')
channel.addLink(title, vurl, self.main_url + img)
def play_video(self, datas):
url = datas.get('url')
video_page_url = self.main_url + url
data = channel.get_url(video_page_url)
regex = r"""(http://www.tvcom.be/videos/.+?\.mp4)"""
video_url = re.findall(regex, data)[0]
video_url = video_url.replace(' ', '%20')
channel.playUrl(video_url)
if __name__ == "__main__":
import sys
args = sys.argv
if len(args) == 3:
Channel({'channel_id': 'tvcom', 'action': 'play_video', 'url':args[1]})
elif len(args) == 2:
Channel({'channel_id': 'tvcom', 'action': 'show_videos', 'url':args[1]})
else:
Channel({'channel_id': 'tvcom', 'action': 'show_categories'})
|
gpl-2.0
|
qtekfun/htcDesire820Kernel
|
external/chromium_org/chrome/test/chromedriver/test/run_all_tests.py
|
23
|
6672
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs all ChromeDriver end to end tests."""
import optparse
import os
import platform
import sys
import tempfile
_THIS_DIR = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(_THIS_DIR, os.pardir))
import archive
import chrome_paths
import util
sys.path.insert(0, os.path.join(chrome_paths.GetSrc(), 'build', 'android'))
from pylib import constants
def _GenerateTestCommand(script,
chromedriver,
ref_chromedriver=None,
chrome=None,
chrome_version=None,
android_package=None,
verbose=False):
_, log_path = tempfile.mkstemp(prefix='chromedriver_')
print 'chromedriver server log: %s' % log_path
cmd = [
sys.executable,
os.path.join(_THIS_DIR, script),
'--chromedriver=%s' % chromedriver,
'--log-path=%s' % log_path,
]
if ref_chromedriver:
cmd.append('--reference-chromedriver=' + ref_chromedriver)
if chrome:
cmd.append('--chrome=' + chrome)
if chrome_version:
cmd.append('--chrome-version=' + chrome_version)
if verbose:
cmd.append('--verbose')
if android_package:
cmd = ['xvfb-run', '-a'] + cmd
cmd.append('--android-package=' + android_package)
return cmd
def RunPythonTests(chromedriver, ref_chromedriver,
chrome=None, chrome_version=None,
chrome_version_name=None, android_package=None):
version_info = ''
if chrome_version_name:
version_info = '(%s)' % chrome_version_name
util.MarkBuildStepStart('python_tests%s' % version_info)
code = util.RunCommand(
_GenerateTestCommand('run_py_tests.py',
chromedriver,
ref_chromedriver=ref_chromedriver,
chrome=chrome,
chrome_version=chrome_version,
android_package=android_package))
if code:
util.MarkBuildStepError()
return code
def RunJavaTests(chromedriver, chrome=None, chrome_version=None,
chrome_version_name=None, android_package=None,
verbose=False):
version_info = ''
if chrome_version_name:
version_info = '(%s)' % chrome_version_name
util.MarkBuildStepStart('java_tests%s' % version_info)
code = util.RunCommand(
_GenerateTestCommand('run_java_tests.py',
chromedriver,
ref_chromedriver=None,
chrome=chrome,
chrome_version=chrome_version,
android_package=android_package,
verbose=verbose))
if code:
util.MarkBuildStepError()
return code
def RunCppTests(cpp_tests):
util.MarkBuildStepStart('chromedriver_tests')
code = util.RunCommand([cpp_tests])
if code:
util.MarkBuildStepError()
return code
def DownloadChrome(version_name, revision, download_site):
util.MarkBuildStepStart('download %s' % version_name)
return archive.DownloadChrome(revision, util.MakeTempDir(), download_site)
def main():
parser = optparse.OptionParser()
parser.add_option(
'', '--android-packages',
help='Comma separated list of application package names, '
'if running tests on Android.')
# Option 'chrome-version' is for desktop only.
parser.add_option(
'', '--chrome-version',
help='Version of chrome, e.g., \'HEAD\', \'27\', or \'26\'.'
'Default is to run tests against all of these versions.'
'Notice: this option only applies to desktop.')
options, _ = parser.parse_args()
exe_postfix = ''
if util.IsWindows():
exe_postfix = '.exe'
cpp_tests_name = 'chromedriver_tests' + exe_postfix
server_name = 'chromedriver' + exe_postfix
required_build_outputs = [server_name]
if not options.android_packages:
required_build_outputs += [cpp_tests_name]
build_dir = chrome_paths.GetBuildDir(required_build_outputs)
constants.SetBuildType(os.path.basename(build_dir))
print 'Using build outputs from', build_dir
chromedriver = os.path.join(build_dir, server_name)
platform_name = util.GetPlatformName()
if util.IsLinux() and platform.architecture()[0] == '64bit':
platform_name += '64'
ref_chromedriver = os.path.join(
chrome_paths.GetSrc(),
'chrome', 'test', 'chromedriver', 'third_party', 'java_tests',
'reference_builds',
'chromedriver_%s%s' % (platform_name, exe_postfix))
if options.android_packages:
os.environ['PATH'] += os.pathsep + os.path.join(
_THIS_DIR, os.pardir, 'chrome')
code = 0
for package in options.android_packages.split(','):
code1 = RunPythonTests(chromedriver,
ref_chromedriver,
chrome_version_name=package,
android_package=package)
code2 = RunJavaTests(chromedriver,
chrome_version_name=package,
android_package=package,
verbose=True)
code = code or code1 or code2
return code
else:
latest_snapshot_revision = archive.GetLatestRevision(archive.Site.SNAPSHOT)
versions = [
['HEAD', latest_snapshot_revision],
['32', archive.CHROME_32_REVISION],
['31', archive.CHROME_31_REVISION],
['30', archive.CHROME_30_REVISION]
]
code = 0
for version in versions:
if options.chrome_version and version[0] != options.chrome_version:
continue
download_site = archive.Site.CONTINUOUS
version_name = version[0]
if version_name == 'HEAD':
version_name = version[1]
download_site = archive.Site.SNAPSHOT
chrome_path = DownloadChrome(version_name, version[1], download_site)
code1 = RunPythonTests(chromedriver,
ref_chromedriver,
chrome=chrome_path,
chrome_version=version[0],
chrome_version_name='v%s' % version_name)
code2 = RunJavaTests(chromedriver, chrome=chrome_path,
chrome_version=version[0],
chrome_version_name='v%s' % version_name)
code = code or code1 or code2
cpp_tests = os.path.join(build_dir, cpp_tests_name)
return RunCppTests(cpp_tests) or code
if __name__ == '__main__':
sys.exit(main())
|
gpl-2.0
|
KevinMidboe/statusHandler
|
flask/lib/python3.4/site-packages/pip/_vendor/requests/packages/urllib3/util/ssl_.py
|
360
|
11622
|
from __future__ import absolute_import
import errno
import warnings
import hmac
from binascii import hexlify, unhexlify
from hashlib import md5, sha1, sha256
from ..exceptions import SSLError, InsecurePlatformWarning, SNIMissingWarning
SSLContext = None
HAS_SNI = False
create_default_context = None
IS_PYOPENSSL = False
# Maps the length of a digest to a possible hash function producing this digest
HASHFUNC_MAP = {
32: md5,
40: sha1,
64: sha256,
}
def _const_compare_digest_backport(a, b):
"""
Compare two digests of equal length in constant time.
The digests must be of type str/bytes.
Returns True if the digests match, and False otherwise.
"""
result = abs(len(a) - len(b))
for l, r in zip(bytearray(a), bytearray(b)):
result |= l ^ r
return result == 0
_const_compare_digest = getattr(hmac, 'compare_digest',
_const_compare_digest_backport)
try: # Test for SSL features
import ssl
from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23
from ssl import HAS_SNI # Has SNI?
except ImportError:
pass
try:
from ssl import OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION
except ImportError:
OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000
OP_NO_COMPRESSION = 0x20000
# A secure default.
# Sources for more information on TLS ciphers:
#
# - https://wiki.mozilla.org/Security/Server_Side_TLS
# - https://www.ssllabs.com/projects/best-practices/index.html
# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
#
# The general intent is:
# - Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE),
# - prefer ECDHE over DHE for better performance,
# - prefer any AES-GCM over any AES-CBC for better performance and security,
# - use 3DES as fallback which is secure but slow,
# - disable NULL authentication, MD5 MACs and DSS for security reasons.
DEFAULT_CIPHERS = (
'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+HIGH:'
'DH+HIGH:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+HIGH:RSA+3DES:!aNULL:'
'!eNULL:!MD5'
)
try:
from ssl import SSLContext # Modern SSL?
except ImportError:
import sys
class SSLContext(object): # Platform-specific: Python 2 & 3.1
supports_set_ciphers = ((2, 7) <= sys.version_info < (3,) or
(3, 2) <= sys.version_info)
def __init__(self, protocol_version):
self.protocol = protocol_version
# Use default values from a real SSLContext
self.check_hostname = False
self.verify_mode = ssl.CERT_NONE
self.ca_certs = None
self.options = 0
self.certfile = None
self.keyfile = None
self.ciphers = None
def load_cert_chain(self, certfile, keyfile):
self.certfile = certfile
self.keyfile = keyfile
def load_verify_locations(self, cafile=None, capath=None):
self.ca_certs = cafile
if capath is not None:
raise SSLError("CA directories not supported in older Pythons")
def set_ciphers(self, cipher_suite):
if not self.supports_set_ciphers:
raise TypeError(
'Your version of Python does not support setting '
'a custom cipher suite. Please upgrade to Python '
'2.7, 3.2, or later if you need this functionality.'
)
self.ciphers = cipher_suite
def wrap_socket(self, socket, server_hostname=None, server_side=False):
warnings.warn(
'A true SSLContext object is not available. This prevents '
'urllib3 from configuring SSL appropriately and may cause '
'certain SSL connections to fail. You can upgrade to a newer '
'version of Python to solve this. For more information, see '
'https://urllib3.readthedocs.io/en/latest/security.html'
'#insecureplatformwarning.',
InsecurePlatformWarning
)
kwargs = {
'keyfile': self.keyfile,
'certfile': self.certfile,
'ca_certs': self.ca_certs,
'cert_reqs': self.verify_mode,
'ssl_version': self.protocol,
'server_side': server_side,
}
if self.supports_set_ciphers: # Platform-specific: Python 2.7+
return wrap_socket(socket, ciphers=self.ciphers, **kwargs)
else: # Platform-specific: Python 2.6
return wrap_socket(socket, **kwargs)
def assert_fingerprint(cert, fingerprint):
"""
Checks if given fingerprint matches the supplied certificate.
:param cert:
Certificate as bytes object.
:param fingerprint:
Fingerprint as string of hexdigits, can be interspersed by colons.
"""
fingerprint = fingerprint.replace(':', '').lower()
digest_length = len(fingerprint)
hashfunc = HASHFUNC_MAP.get(digest_length)
if not hashfunc:
raise SSLError(
'Fingerprint of invalid length: {0}'.format(fingerprint))
# We need encode() here for py32; works on py2 and p33.
fingerprint_bytes = unhexlify(fingerprint.encode())
cert_digest = hashfunc(cert).digest()
if not _const_compare_digest(cert_digest, fingerprint_bytes):
raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".'
.format(fingerprint, hexlify(cert_digest)))
def resolve_cert_reqs(candidate):
"""
Resolves the argument to a numeric constant, which can be passed to
the wrap_socket function/method from the ssl module.
Defaults to :data:`ssl.CERT_NONE`.
If given a string it is assumed to be the name of the constant in the
:mod:`ssl` module or its abbrevation.
(So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
If it's neither `None` nor a string we assume it is already the numeric
constant which can directly be passed to wrap_socket.
"""
if candidate is None:
return CERT_NONE
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'CERT_' + candidate)
return res
return candidate
def resolve_ssl_version(candidate):
"""
like resolve_cert_reqs
"""
if candidate is None:
return PROTOCOL_SSLv23
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'PROTOCOL_' + candidate)
return res
return candidate
def create_urllib3_context(ssl_version=None, cert_reqs=None,
options=None, ciphers=None):
"""All arguments have the same meaning as ``ssl_wrap_socket``.
By default, this function does a lot of the same work that
``ssl.create_default_context`` does on Python 3.4+. It:
- Disables SSLv2, SSLv3, and compression
- Sets a restricted set of server ciphers
If you wish to enable SSLv3, you can do::
from urllib3.util import ssl_
context = ssl_.create_urllib3_context()
context.options &= ~ssl_.OP_NO_SSLv3
You can do the same to enable compression (substituting ``COMPRESSION``
for ``SSLv3`` in the last line above).
:param ssl_version:
The desired protocol version to use. This will default to
PROTOCOL_SSLv23 which will negotiate the highest protocol that both
the server and your installation of OpenSSL support.
:param cert_reqs:
Whether to require the certificate verification. This defaults to
``ssl.CERT_REQUIRED``.
:param options:
Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``,
``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``.
:param ciphers:
Which cipher suites to allow the server to select.
:returns:
Constructed SSLContext object with specified options
:rtype: SSLContext
"""
context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23)
# Setting the default here, as we may have no ssl module on import
cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs
if options is None:
options = 0
# SSLv2 is easily broken and is considered harmful and dangerous
options |= OP_NO_SSLv2
# SSLv3 has several problems and is now dangerous
options |= OP_NO_SSLv3
# Disable compression to prevent CRIME attacks for OpenSSL 1.0+
# (issue #309)
options |= OP_NO_COMPRESSION
context.options |= options
if getattr(context, 'supports_set_ciphers', True): # Platform-specific: Python 2.6
context.set_ciphers(ciphers or DEFAULT_CIPHERS)
context.verify_mode = cert_reqs
if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2
# We do our own verification, including fingerprints and alternative
# hostnames. So disable it here
context.check_hostname = False
return context
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None, ciphers=None, ssl_context=None,
ca_cert_dir=None):
"""
All arguments except for server_hostname, ssl_context, and ca_cert_dir have
the same meaning as they do when using :func:`ssl.wrap_socket`.
:param server_hostname:
When SNI is supported, the expected hostname of the certificate
:param ssl_context:
A pre-made :class:`SSLContext` object. If none is provided, one will
be created using :func:`create_urllib3_context`.
:param ciphers:
A string of ciphers we wish the client to support. This is not
supported on Python 2.6 as the ssl module does not support it.
:param ca_cert_dir:
A directory containing CA certificates in multiple separate files, as
supported by OpenSSL's -CApath flag or the capath argument to
SSLContext.load_verify_locations().
"""
context = ssl_context
if context is None:
context = create_urllib3_context(ssl_version, cert_reqs,
ciphers=ciphers)
if ca_certs or ca_cert_dir:
try:
context.load_verify_locations(ca_certs, ca_cert_dir)
except IOError as e: # Platform-specific: Python 2.6, 2.7, 3.2
raise SSLError(e)
# Py33 raises FileNotFoundError which subclasses OSError
# These are not equivalent unless we check the errno attribute
except OSError as e: # Platform-specific: Python 3.3 and beyond
if e.errno == errno.ENOENT:
raise SSLError(e)
raise
if certfile:
context.load_cert_chain(certfile, keyfile)
if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI
return context.wrap_socket(sock, server_hostname=server_hostname)
warnings.warn(
'An HTTPS request has been made, but the SNI (Subject Name '
'Indication) extension to TLS is not available on this platform. '
'This may cause the server to present an incorrect TLS '
'certificate, which can cause validation failures. You can upgrade to '
'a newer version of Python to solve this. For more information, see '
'https://urllib3.readthedocs.io/en/latest/security.html'
'#snimissingwarning.',
SNIMissingWarning
)
return context.wrap_socket(sock)
|
mit
|
cypsun/FreeCAD
|
src/Mod/Sketcher/TestSketcherApp.py
|
27
|
7131
|
# (c) Juergen Riegel ([email protected]) 2011 LGPL *
# *
# This file is part of the FreeCAD CAx development system. *
# *
# This program is free software; you can redistribute it and/or modify *
# it under the terms of the GNU Lesser General Public License (LGPL) *
# as published by the Free Software Foundation; either version 2 of *
# the License, or (at your option) any later version. *
# for detail see the LICENCE text file. *
# *
# FreeCAD is distributed in the hope that it will be useful, *
# but WITHOUT ANY WARRANTY; without even the implied warranty of *
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# GNU Library General Public License for more details. *
# *
# You should have received a copy of the GNU Library General Public *
# License along with FreeCAD; if not, write to the Free Software *
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# USA *
#**************************************************************************
import FreeCAD, os, sys, unittest, Part, Sketcher
App = FreeCAD
def CreateBoxSketchSet(SketchFeature):
SketchFeature.addGeometry(Part.Line(FreeCAD.Vector(-99.230339,36.960674,0),FreeCAD.Vector(69.432587,36.960674,0)))
SketchFeature.addGeometry(Part.Line(FreeCAD.Vector(69.432587,36.960674,0),FreeCAD.Vector(69.432587,-53.196629,0)))
SketchFeature.addGeometry(Part.Line(FreeCAD.Vector(69.432587,-53.196629,0),FreeCAD.Vector(-99.230339,-53.196629,0)))
SketchFeature.addGeometry(Part.Line(FreeCAD.Vector(-99.230339,-53.196629,0),FreeCAD.Vector(-99.230339,36.960674,0)))
# add the constraints
SketchFeature.addConstraint(Sketcher.Constraint('Coincident',0,2,1,1))
SketchFeature.addConstraint(Sketcher.Constraint('Coincident',1,2,2,1))
SketchFeature.addConstraint(Sketcher.Constraint('Coincident',2,2,3,1))
SketchFeature.addConstraint(Sketcher.Constraint('Coincident',3,2,0,1))
SketchFeature.addConstraint(Sketcher.Constraint('Horizontal',0))
SketchFeature.addConstraint(Sketcher.Constraint('Horizontal',2))
SketchFeature.addConstraint(Sketcher.Constraint('Vertical',1))
SketchFeature.addConstraint(Sketcher.Constraint('Vertical',3))
# add dimensions
SketchFeature.addConstraint(Sketcher.Constraint('Distance',1,81.370787))
SketchFeature.addConstraint(Sketcher.Constraint('Distance',0,187.573036))
def CreateSlotPlateSet(SketchFeature):
SketchFeature.addGeometry(Part.Line(App.Vector(60.029362,-30.279360,0),App.Vector(-120.376335,-30.279360,0)))
SketchFeature.addConstraint(Sketcher.Constraint('Horizontal',0))
SketchFeature.addGeometry(Part.Line(App.Vector(-120.376335,-30.279360,0),App.Vector(-70.193062,38.113884,0)))
SketchFeature.addConstraint(Sketcher.Constraint('Coincident',0,2,1,1))
SketchFeature.addGeometry(Part.Line(App.Vector(-70.193062,38.113884,0),App.Vector(60.241116,37.478645,0)))
SketchFeature.addConstraint(Sketcher.Constraint('Coincident',1,2,2,1))
SketchFeature.addConstraint(Sketcher.Constraint('Horizontal',2))
SketchFeature.addGeometry(Part.ArcOfCircle(Part.Circle(App.Vector(60.039921,3.811391,0),App.Vector(0,0,1),35.127132),-1.403763,1.419522))
SketchFeature.addConstraint(Sketcher.Constraint('Coincident',3,2,2,2))
SketchFeature.addConstraint(Sketcher.Constraint('Coincident',3,1,0,1))
SketchFeature.addConstraint(Sketcher.Constraint('Tangent',3,2))
SketchFeature.addConstraint(Sketcher.Constraint('Tangent',3,0))
SketchFeature.addConstraint(Sketcher.Constraint('Angle',0,2,1,1,0.947837))
SketchFeature.addConstraint(Sketcher.Constraint('Distance',0,184.127425))
SketchFeature.setDatum(9,200.000000)
SketchFeature.addConstraint(Sketcher.Constraint('Radius',3,38.424808))
SketchFeature.setDatum(10,40.000000)
SketchFeature.setDatum(8,0.872665)
SketchFeature.addConstraint(Sketcher.Constraint('DistanceX',0,2,0.0))
SketchFeature.setDatum(11,0.000000)
SketchFeature.movePoint(0,2,App.Vector(-0.007829,-33.376450,0))
SketchFeature.movePoint(0,2,App.Vector(-0.738149,-10.493386,0))
SketchFeature.movePoint(0,2,App.Vector(-0.007829,2.165328,0))
SketchFeature.addConstraint(Sketcher.Constraint('DistanceY',0,2,2.165328))
SketchFeature.setDatum(12,0.000000)
def CreateSlotPlateInnerSet(SketchFeature):
SketchFeature.addGeometry(Part.Circle(App.Vector(195.055893,39.562252,0),App.Vector(0,0,1),29.846098))
SketchFeature.addGeometry(Part.Line(App.Vector(150.319031,13.449363,0),App.Vector(36.700474,13.139774,0)))
SketchFeature.addConstraint(Sketcher.Constraint('Horizontal',5))
SketchFeature.addGeometry(Part.Line(App.Vector(36.700474,13.139774,0),App.Vector(77.566010,63.292927,0)))
SketchFeature.addConstraint(Sketcher.Constraint('Coincident',5,2,6,1))
SketchFeature.addGeometry(Part.Line(App.Vector(77.566010,63.292927,0),App.Vector(148.151917,63.602505,0)))
SketchFeature.addConstraint(Sketcher.Constraint('Coincident',6,2,7,1))
SketchFeature.addConstraint(Sketcher.Constraint('Horizontal',7))
SketchFeature.addConstraint(Sketcher.Constraint('Parallel',1,6))
SketchFeature.addGeometry(Part.ArcOfCircle(Part.Circle(App.Vector(192.422913,38.216347,0),App.Vector(0,0,1),45.315174),2.635158,3.602228))
SketchFeature.addConstraint(Sketcher.Constraint('Coincident',7,2,8,1))
SketchFeature.addConstraint(Sketcher.Constraint('Coincident',8,2,5,1))
#---------------------------------------------------------------------------
# define the test cases to test the FreeCAD Sketcher module
#---------------------------------------------------------------------------
class SketcherSolverTestCases(unittest.TestCase):
def setUp(self):
self.Doc = FreeCAD.newDocument("SketchSolverTest")
def testBoxCase(self):
self.Box = self.Doc.addObject('Sketcher::SketchObject','SketchBox')
CreateBoxSketchSet(self.Box)
self.Doc.recompute()
# moving a point of the sketch
self.Box.movePoint(0,2,App.Vector(88.342697,28.174158,0))
# fully constrain
self.Box.addConstraint(Sketcher.Constraint('DistanceX',1,2,90.0))
self.Box.addConstraint(Sketcher.Constraint('DistanceY',1,2,-50.0))
self.Doc.recompute()
def testSlotCase(self):
self.Slot = self.Doc.addObject('Sketcher::SketchObject','SketchSlot')
CreateSlotPlateSet(self.Slot)
self.Doc.recompute()
# test if all edges created
self.failUnless(len(self.Slot.Shape.Edges) == 4)
CreateSlotPlateInnerSet(self.Slot)
self.Doc.recompute()
self.failUnless(len(self.Slot.Shape.Edges) == 9)
def tearDown(self):
#closing doc
FreeCAD.closeDocument("SketchSolverTest")
#print ("omit close document for debuging")
|
lgpl-2.1
|
timthelion/FreeCAD_sf_master
|
src/Mod/Fem/convert2TetGen.py
|
27
|
10679
|
# (c) 2010 LGPL
#Make mesh of pn junction in TetGen format
import FreeCAD, FreeCADGui, Part, Mesh
App = FreeCAD # shortcut
Gui = FreeCADGui # shortcut
def exportMeshToTetGenPoly(meshToExport,filePath,beVerbose=1):
"""Export mesh to TetGen *.poly file format"""
## Part 1 - write node list to output file
if beVerbose == 1:
FreeCAD.Console.PrintMessage("\nExport of mesh to TetGen file ...")
(allVertices,allFacets) = meshToExport.Topology
f = open(filePath, 'w')
f.write("# This file was generated from FreeCAD geometry\n")
f.write("# Part 1 - node list\n")
f.write("%(TotalNumOfPoints)i %(NumOfDimensions)i %(NumOfProperties)i %(BoundaryMarkerExists)i\n" % \
{'TotalNumOfPoints':len(allVertices), \
'NumOfDimensions':3, \
'NumOfProperties':0, \
'BoundaryMarkerExists':0})
for PointIndex in range(len(allVertices)):
f.write("%(PointIndex)5i %(x) e %(y) e %(z) e\n" % \
{'PointIndex':PointIndex, \
'x':allVertices[PointIndex].x, \
'y':allVertices[PointIndex].y, \
'z':allVertices[PointIndex].z})
## Find out BoundaryMarker for each facet. If edge connects only two facets,
# then this facets should have the same BoundaryMarker
BoundaryMarkerExists = 1
PointList=[allFacets[0][1],allFacets[0][0]]
PointList.sort()
EdgeFacets = {(PointList[0],PointList[1]):set([0])}
Edge = []
# Finde all facets for each edge
for FacetIndex in range(len(allFacets)):
Facet = allFacets[FacetIndex]
for i in range(0,-len(Facet),-1):
tmpEdge = [Facet[i],Facet[i+1]]
tmpEdge.sort()
Edge.append(tmpEdge)
for i in range(len(Edge)):
EdgeIndex = (Edge[i][0],Edge[i][1])
if EdgeIndex in EdgeFacets:
EdgeFacets[EdgeIndex].add(FacetIndex)
else:
EdgeFacets[EdgeIndex] = set([FacetIndex])
Edge = []
# Find BoundaryMarker for each facet
BoundaryMarker = []
for index in range(len(allFacets)):
BoundaryMarker.append(0)
MinMarker = -1
InitialFacet = 0
BoundaryMarker[InitialFacet] = MinMarker
EdgeKeys = EdgeFacets.keys()
disconnectedEdges = len(EdgeKeys)
if beVerbose == 1:
FreeCAD.Console.PrintMessage('\nBoundaryMarker:'+repr(BoundaryMarker)+' '+repr(len(EdgeFacets)))
searchForPair = 1
# Main loop: first search for all complementary facets, then fill one branch and repeat while edges are available
while len(EdgeFacets)>0:
removeEdge = 0
for EdgeIndex in EdgeKeys:
if len(EdgeFacets[EdgeIndex]) == 1:
removeEdge = 1
break
if len(EdgeFacets[EdgeIndex]) == 2:
FacetPair = []
for facet in EdgeFacets[EdgeIndex]:
FacetPair.append(facet)
if (BoundaryMarker[FacetPair[0]]==0) and (BoundaryMarker[FacetPair[1]]==0):
continue
if (BoundaryMarker[FacetPair[0]]!=0) and (BoundaryMarker[FacetPair[1]]!=0):
removeEdge = 1
break
if (BoundaryMarker[FacetPair[0]]!=0):
BoundaryMarker[FacetPair[1]] = BoundaryMarker[FacetPair[0]]
else:
BoundaryMarker[FacetPair[0]] = BoundaryMarker[FacetPair[1]]
removeEdge = 1
break
if searchForPair == 1:
continue
FacetTree = []
AllMarkers = 1
MarkerSum = 0
for facet in EdgeFacets[EdgeIndex]:
FacetTree.append(facet)
MarkerSum += BoundaryMarker[facet]
if MarkerSum == 0:
continue
for facet in EdgeFacets[EdgeIndex]:
if BoundaryMarker[facet] == 0:
MinMarker -= 1
BoundaryMarker[facet] = MinMarker
searchForPair = 1
removeEdge = 1
break
if removeEdge == 1:
del EdgeFacets[EdgeIndex]
EdgeKeys = EdgeFacets.keys()
continue
searchForPair = 0
# End of main loop
if beVerbose == 1:
FreeCAD.Console.PrintMessage('\nNew BoundaryMarker:'+repr(BoundaryMarker)+' '+repr(len(EdgeFacets)))
## Part 2 - write all facets to *.poly file
f.write("# Part 2 - facet list\n")
f.write("%(TotalNumOfFacets)i %(BoundaryMarkerExists)i\n" %\
{'TotalNumOfFacets':len(allFacets),\
'BoundaryMarkerExists':BoundaryMarkerExists})
for FacetIndex in range(len(allFacets)):
f.write("# FacetIndex = %(Index)i\n"%{'Index':FacetIndex})
f.write("%(NumOfPolygons)3i "%{'NumOfPolygons':1})
if BoundaryMarkerExists == 1:
f.write("0 %(BoundaryMarker)i"%{'BoundaryMarker':BoundaryMarker[FacetIndex]})
f.write("\n%(NumOfConers)3i "%{'NumOfConers':len(allFacets[FacetIndex])})
for PointIndex in range(len(allFacets[FacetIndex])):
# f.write(repr(allFacets[FacetIndex][PointIndex]))
f.write("%(PointIndex)i "%{'PointIndex':allFacets[FacetIndex][PointIndex]})
f.write("\n")
## Part 3 and Part 4 are zero
f.write("# Part 3 - the hole list.\n# There is no hole in bar.\n0\n")
f.write("# Part 4 - the region list.\n# There is no region defined.\n0\n")
f.write("# This file was generated from FreeCAD geometry\n")
f.close()
def export(objectslist,filename):
"""Called when freecad exports a mesh to poly format"""
for obj in objectslist:
if isinstance(obj,Mesh.Feature):
exportMeshToTetGenPoly(obj.Mesh,filename,False)
break
def createMesh():
## ======================== Script beginning... ========================
beVerbose = 1
if beVerbose == 1:
FreeCAD.Console.PrintMessage("\n\n\n\n\n\n\n\nScript starts...")
## Geometry defenition
# Define objects names
PyDocumentName = "pnJunction"
PSideBoxName = "PSide"
NSideBoxName = "NSide"
DepletionBoxName = "Depletion"
SurfDepletionBoxName = "SurfDepletion"
OxideBoxName = "Oxide"
AdsorbtionBoxName = "Adsorbtion"
pnMeshName = "pnMesh"
# Init objects
if beVerbose == 1:
FreeCAD.Console.PrintMessage("\nInit Objects...")
# App.closeDocument(App.ActiveDocument.Label) #closeDocument after restart of macro. Needs any ActiveDocument.
AppPyDoc = App.newDocument(PyDocumentName)
NSideBox = AppPyDoc.addObject("Part::Box",NSideBoxName)
PSideBox = AppPyDoc.addObject("Part::Box",PSideBoxName)
DepletionBox = AppPyDoc.addObject("Part::Box",DepletionBoxName)
SurfDepletionBox = AppPyDoc.addObject("Part::Box",SurfDepletionBoxName)
OxideBox = AppPyDoc.addObject("Part::Box",OxideBoxName)
AdsorbtionBox = AppPyDoc.addObject("Part::Box",AdsorbtionBoxName)
pnMesh = AppPyDoc.addObject("Mesh::Feature",pnMeshName)
BoxList=[NSideBox, DepletionBox, PSideBox, OxideBox, AdsorbtionBox, SurfDepletionBox]
NSideBoxMesh = Mesh.Mesh()
PSideBoxMesh = Mesh.Mesh()
DepletionBoxMesh = Mesh.Mesh()
SurfDepletionBoxMesh = Mesh.Mesh()
OxideBoxMesh = Mesh.Mesh()
AdsorbtionBoxMesh = Mesh.Mesh()
BoxMeshList = [NSideBoxMesh, DepletionBoxMesh, PSideBoxMesh, OxideBoxMesh, AdsorbtionBoxMesh, SurfDepletionBoxMesh]
if beVerbose == 1:
if len(BoxList)!=len(BoxMeshList):
FreeCAD.Console.PrintMessage("\n ERROR! Input len() of BoxList and BoxMeshList is not the same! ")
## Set sizes in nanometers
if beVerbose == 1:
FreeCAD.Console.PrintMessage("\nSet sizes...")
tessellationTollerance = 0.05
ModelWidth = 300
BulkHeight = 300
BulkLength = 300
DepletionSize = 50
OxideThickness = 5
AdsorbtionThickness = 10
# Big volumes of n and p material
NSideBox.Height = BulkHeight #Z-direction
NSideBox.Width = ModelWidth #Y-direction = const
NSideBox.Length = BulkLength #X-direction
PSideBox.Height = BulkHeight
PSideBox.Width = ModelWidth
PSideBox.Length = BulkLength
# Thin depletion layer between
DepletionBox.Height = BulkHeight
DepletionBox.Width = ModelWidth
DepletionBox.Length = DepletionSize*2
# Surface deplation layer
SurfDepletionBox.Height = DepletionSize
SurfDepletionBox.Width = ModelWidth
SurfDepletionBox.Length = BulkLength*2 + DepletionSize*2
# Oxide on the top
OxideBox.Height = OxideThickness
OxideBox.Width = ModelWidth
OxideBox.Length = BulkLength*2 + DepletionSize*2
# Adsorbtion layer
AdsorbtionBox.Height = AdsorbtionThickness
AdsorbtionBox.Width = ModelWidth
AdsorbtionBox.Length = BulkLength*2 + DepletionSize*2
# Object placement
Rot = App.Rotation(0,0,0,1)
NSideBox.Placement = App.Placement(App.Vector(0,0,-BulkHeight),Rot)
PSideBox.Placement = App.Placement(App.Vector(DepletionSize*2+BulkLength,0,-BulkHeight),Rot)
DepletionBox.Placement = App.Placement(App.Vector(BulkLength,0,-BulkHeight),Rot)
SurfDepletionBox.Placement = App.Placement(App.Vector(0,0,0),Rot)
OxideBox.Placement = App.Placement(App.Vector(0,0,DepletionSize),Rot)
AdsorbtionBox.Placement = App.Placement(App.Vector(0,0,DepletionSize+OxideThickness),Rot)
## Unite
if beVerbose == 1:
FreeCAD.Console.PrintMessage("\nFuse objects...")
fuseShape = BoxList[0].Shape
for index in range(1,len(BoxList),1):
fuseShape = fuseShape.fuse(BoxList[index].Shape)
nmesh = Mesh.Mesh()
nmesh.addFacets(fuseShape.tessellate(tessellationTollerance))
# for index in range(len(BoxList)):
for index in range(len(BoxList)-1): # Manual hack
BoxMeshList[index].addFacets(BoxList[index].Shape.tessellate(tessellationTollerance))
nmesh.addMesh(BoxMeshList[index])
nmesh.removeDuplicatedPoints()
nmesh.removeDuplicatedFacets()
pnMesh.Mesh = nmesh
# Hide all boxes
for box in BoxList:
Gui.hideObject(box)
# # Remove all boxes
# for box in BoxList:
# App.ActiveDocument.removeObject(box.Name)
# Update document
AppPyDoc.recompute()
## export to TenGen *.poly (use File|Export instead)
#filePath = "/home/tig/tmp/tetgen/pnJunction.poly"
#exportMeshToTetGenPoly(pnMesh.Mesh,filePath,beVerbose)
Gui.activeDocument().activeView().viewAxometric()
Gui.SendMsgToActiveView("ViewFit")
if beVerbose == 1:
FreeCAD.Console.PrintMessage("\nScript finished without errors.")
|
lgpl-2.1
|
ArcherSys/ArcherSys
|
Lib/site-packages/tornado/test/httpclient_test.py
|
12
|
26340
|
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function, with_statement
import base64
import binascii
from contextlib import closing
import copy
import functools
import sys
import threading
import datetime
from io import BytesIO
from tornado.escape import utf8
from tornado import gen
from tornado.httpclient import HTTPRequest, HTTPResponse, _RequestProxy, HTTPError, HTTPClient
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.iostream import IOStream
from tornado.log import gen_log
from tornado import netutil
from tornado.stack_context import ExceptionStackContext, NullContext
from tornado.testing import AsyncHTTPTestCase, bind_unused_port, gen_test, ExpectLog
from tornado.test.util import unittest, skipOnTravis
from tornado.util import u
from tornado.web import Application, RequestHandler, url
from tornado.httputil import format_timestamp, HTTPHeaders
class HelloWorldHandler(RequestHandler):
def get(self):
name = self.get_argument("name", "world")
self.set_header("Content-Type", "text/plain")
self.finish("Hello %s!" % name)
class PostHandler(RequestHandler):
def post(self):
self.finish("Post arg1: %s, arg2: %s" % (
self.get_argument("arg1"), self.get_argument("arg2")))
class PutHandler(RequestHandler):
def put(self):
self.write("Put body: ")
self.write(self.request.body)
class RedirectHandler(RequestHandler):
def prepare(self):
self.write('redirects can have bodies too')
self.redirect(self.get_argument("url"),
status=int(self.get_argument("status", "302")))
class ChunkHandler(RequestHandler):
@gen.coroutine
def get(self):
self.write("asdf")
self.flush()
# Wait a bit to ensure the chunks are sent and received separately.
yield gen.sleep(0.01)
self.write("qwer")
class AuthHandler(RequestHandler):
def get(self):
self.finish(self.request.headers["Authorization"])
class CountdownHandler(RequestHandler):
def get(self, count):
count = int(count)
if count > 0:
self.redirect(self.reverse_url("countdown", count - 1))
else:
self.write("Zero")
class EchoPostHandler(RequestHandler):
def post(self):
self.write(self.request.body)
class UserAgentHandler(RequestHandler):
def get(self):
self.write(self.request.headers.get('User-Agent', 'User agent not set'))
class ContentLength304Handler(RequestHandler):
def get(self):
self.set_status(304)
self.set_header('Content-Length', 42)
def _clear_headers_for_304(self):
# Tornado strips content-length from 304 responses, but here we
# want to simulate servers that include the headers anyway.
pass
class PatchHandler(RequestHandler):
def patch(self):
"Return the request payload - so we can check it is being kept"
self.write(self.request.body)
class AllMethodsHandler(RequestHandler):
SUPPORTED_METHODS = RequestHandler.SUPPORTED_METHODS + ('OTHER',)
def method(self):
self.write(self.request.method)
get = post = put = delete = options = patch = other = method
# These tests end up getting run redundantly: once here with the default
# HTTPClient implementation, and then again in each implementation's own
# test suite.
class HTTPClientCommonTestCase(AsyncHTTPTestCase):
def get_app(self):
return Application([
url("/hello", HelloWorldHandler),
url("/post", PostHandler),
url("/put", PutHandler),
url("/redirect", RedirectHandler),
url("/chunk", ChunkHandler),
url("/auth", AuthHandler),
url("/countdown/([0-9]+)", CountdownHandler, name="countdown"),
url("/echopost", EchoPostHandler),
url("/user_agent", UserAgentHandler),
url("/304_with_content_length", ContentLength304Handler),
url("/all_methods", AllMethodsHandler),
url('/patch', PatchHandler),
], gzip=True)
def test_patch_receives_payload(self):
body = b"some patch data"
response = self.fetch("/patch", method='PATCH', body=body)
self.assertEqual(response.code, 200)
self.assertEqual(response.body, body)
@skipOnTravis
def test_hello_world(self):
response = self.fetch("/hello")
self.assertEqual(response.code, 200)
self.assertEqual(response.headers["Content-Type"], "text/plain")
self.assertEqual(response.body, b"Hello world!")
self.assertEqual(int(response.request_time), 0)
response = self.fetch("/hello?name=Ben")
self.assertEqual(response.body, b"Hello Ben!")
def test_streaming_callback(self):
# streaming_callback is also tested in test_chunked
chunks = []
response = self.fetch("/hello",
streaming_callback=chunks.append)
# with streaming_callback, data goes to the callback and not response.body
self.assertEqual(chunks, [b"Hello world!"])
self.assertFalse(response.body)
def test_post(self):
response = self.fetch("/post", method="POST",
body="arg1=foo&arg2=bar")
self.assertEqual(response.code, 200)
self.assertEqual(response.body, b"Post arg1: foo, arg2: bar")
def test_chunked(self):
response = self.fetch("/chunk")
self.assertEqual(response.body, b"asdfqwer")
chunks = []
response = self.fetch("/chunk",
streaming_callback=chunks.append)
self.assertEqual(chunks, [b"asdf", b"qwer"])
self.assertFalse(response.body)
def test_chunked_close(self):
# test case in which chunks spread read-callback processing
# over several ioloop iterations, but the connection is already closed.
sock, port = bind_unused_port()
with closing(sock):
def write_response(stream, request_data):
if b"HTTP/1." not in request_data:
self.skipTest("requires HTTP/1.x")
stream.write(b"""\
HTTP/1.1 200 OK
Transfer-Encoding: chunked
1
1
1
2
0
""".replace(b"\n", b"\r\n"), callback=stream.close)
def accept_callback(conn, address):
# fake an HTTP server using chunked encoding where the final chunks
# and connection close all happen at once
stream = IOStream(conn, io_loop=self.io_loop)
stream.read_until(b"\r\n\r\n",
functools.partial(write_response, stream))
netutil.add_accept_handler(sock, accept_callback, self.io_loop)
self.http_client.fetch("http://127.0.0.1:%d/" % port, self.stop)
resp = self.wait()
resp.rethrow()
self.assertEqual(resp.body, b"12")
self.io_loop.remove_handler(sock.fileno())
def test_streaming_stack_context(self):
chunks = []
exc_info = []
def error_handler(typ, value, tb):
exc_info.append((typ, value, tb))
return True
def streaming_cb(chunk):
chunks.append(chunk)
if chunk == b'qwer':
1 / 0
with ExceptionStackContext(error_handler):
self.fetch('/chunk', streaming_callback=streaming_cb)
self.assertEqual(chunks, [b'asdf', b'qwer'])
self.assertEqual(1, len(exc_info))
self.assertIs(exc_info[0][0], ZeroDivisionError)
def test_basic_auth(self):
self.assertEqual(self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame").body,
b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
def test_basic_auth_explicit_mode(self):
self.assertEqual(self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame",
auth_mode="basic").body,
b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
def test_unsupported_auth_mode(self):
# curl and simple clients handle errors a bit differently; the
# important thing is that they don't fall back to basic auth
# on an unknown mode.
with ExpectLog(gen_log, "uncaught exception", required=False):
with self.assertRaises((ValueError, HTTPError)):
response = self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame",
auth_mode="asdf")
response.rethrow()
def test_follow_redirect(self):
response = self.fetch("/countdown/2", follow_redirects=False)
self.assertEqual(302, response.code)
self.assertTrue(response.headers["Location"].endswith("/countdown/1"))
response = self.fetch("/countdown/2")
self.assertEqual(200, response.code)
self.assertTrue(response.effective_url.endswith("/countdown/0"))
self.assertEqual(b"Zero", response.body)
def test_credentials_in_url(self):
url = self.get_url("/auth").replace("http://", "http://me:secret@")
self.http_client.fetch(url, self.stop)
response = self.wait()
self.assertEqual(b"Basic " + base64.b64encode(b"me:secret"),
response.body)
def test_body_encoding(self):
unicode_body = u("\xe9")
byte_body = binascii.a2b_hex(b"e9")
# unicode string in body gets converted to utf8
response = self.fetch("/echopost", method="POST", body=unicode_body,
headers={"Content-Type": "application/blah"})
self.assertEqual(response.headers["Content-Length"], "2")
self.assertEqual(response.body, utf8(unicode_body))
# byte strings pass through directly
response = self.fetch("/echopost", method="POST",
body=byte_body,
headers={"Content-Type": "application/blah"})
self.assertEqual(response.headers["Content-Length"], "1")
self.assertEqual(response.body, byte_body)
# Mixing unicode in headers and byte string bodies shouldn't
# break anything
response = self.fetch("/echopost", method="POST", body=byte_body,
headers={"Content-Type": "application/blah"},
user_agent=u("foo"))
self.assertEqual(response.headers["Content-Length"], "1")
self.assertEqual(response.body, byte_body)
def test_types(self):
response = self.fetch("/hello")
self.assertEqual(type(response.body), bytes)
self.assertEqual(type(response.headers["Content-Type"]), str)
self.assertEqual(type(response.code), int)
self.assertEqual(type(response.effective_url), str)
def test_header_callback(self):
first_line = []
headers = {}
chunks = []
def header_callback(header_line):
if header_line.startswith('HTTP/1.1 101'):
# Upgrading to HTTP/2
pass
elif header_line.startswith('HTTP/'):
first_line.append(header_line)
elif header_line != '\r\n':
k, v = header_line.split(':', 1)
headers[k.lower()] = v.strip()
def streaming_callback(chunk):
# All header callbacks are run before any streaming callbacks,
# so the header data is available to process the data as it
# comes in.
self.assertEqual(headers['content-type'], 'text/html; charset=UTF-8')
chunks.append(chunk)
self.fetch('/chunk', header_callback=header_callback,
streaming_callback=streaming_callback)
self.assertEqual(len(first_line), 1, first_line)
self.assertRegexpMatches(first_line[0], 'HTTP/[0-9]\\.[0-9] 200.*\r\n')
self.assertEqual(chunks, [b'asdf', b'qwer'])
def test_header_callback_stack_context(self):
exc_info = []
def error_handler(typ, value, tb):
exc_info.append((typ, value, tb))
return True
def header_callback(header_line):
if header_line.lower().startswith('content-type:'):
1 / 0
with ExceptionStackContext(error_handler):
self.fetch('/chunk', header_callback=header_callback)
self.assertEqual(len(exc_info), 1)
self.assertIs(exc_info[0][0], ZeroDivisionError)
def test_configure_defaults(self):
defaults = dict(user_agent='TestDefaultUserAgent', allow_ipv6=False)
# Construct a new instance of the configured client class
client = self.http_client.__class__(self.io_loop, force_instance=True,
defaults=defaults)
try:
client.fetch(self.get_url('/user_agent'), callback=self.stop)
response = self.wait()
self.assertEqual(response.body, b'TestDefaultUserAgent')
finally:
client.close()
def test_header_types(self):
# Header values may be passed as character or utf8 byte strings,
# in a plain dictionary or an HTTPHeaders object.
# Keys must always be the native str type.
# All combinations should have the same results on the wire.
for value in [u("MyUserAgent"), b"MyUserAgent"]:
for container in [dict, HTTPHeaders]:
headers = container()
headers['User-Agent'] = value
resp = self.fetch('/user_agent', headers=headers)
self.assertEqual(
resp.body, b"MyUserAgent",
"response=%r, value=%r, container=%r" %
(resp.body, value, container))
def test_multi_line_headers(self):
# Multi-line http headers are rare but rfc-allowed
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2
sock, port = bind_unused_port()
with closing(sock):
def write_response(stream, request_data):
if b"HTTP/1." not in request_data:
self.skipTest("requires HTTP/1.x")
stream.write(b"""\
HTTP/1.1 200 OK
X-XSS-Protection: 1;
\tmode=block
""".replace(b"\n", b"\r\n"), callback=stream.close)
def accept_callback(conn, address):
stream = IOStream(conn, io_loop=self.io_loop)
stream.read_until(b"\r\n\r\n",
functools.partial(write_response, stream))
netutil.add_accept_handler(sock, accept_callback, self.io_loop)
self.http_client.fetch("http://127.0.0.1:%d/" % port, self.stop)
resp = self.wait()
resp.rethrow()
self.assertEqual(resp.headers['X-XSS-Protection'], "1; mode=block")
self.io_loop.remove_handler(sock.fileno())
def test_304_with_content_length(self):
# According to the spec 304 responses SHOULD NOT include
# Content-Length or other entity headers, but some servers do it
# anyway.
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
response = self.fetch('/304_with_content_length')
self.assertEqual(response.code, 304)
self.assertEqual(response.headers['Content-Length'], '42')
def test_final_callback_stack_context(self):
# The final callback should be run outside of the httpclient's
# stack_context. We want to ensure that there is not stack_context
# between the user's callback and the IOLoop, so monkey-patch
# IOLoop.handle_callback_exception and disable the test harness's
# context with a NullContext.
# Note that this does not apply to secondary callbacks (header
# and streaming_callback), as errors there must be seen as errors
# by the http client so it can clean up the connection.
exc_info = []
def handle_callback_exception(callback):
exc_info.append(sys.exc_info())
self.stop()
self.io_loop.handle_callback_exception = handle_callback_exception
with NullContext():
self.http_client.fetch(self.get_url('/hello'),
lambda response: 1 / 0)
self.wait()
self.assertEqual(exc_info[0][0], ZeroDivisionError)
@gen_test
def test_future_interface(self):
response = yield self.http_client.fetch(self.get_url('/hello'))
self.assertEqual(response.body, b'Hello world!')
@gen_test
def test_future_http_error(self):
with self.assertRaises(HTTPError) as context:
yield self.http_client.fetch(self.get_url('/notfound'))
self.assertEqual(context.exception.code, 404)
self.assertEqual(context.exception.response.code, 404)
@gen_test
def test_future_http_error_no_raise(self):
response = yield self.http_client.fetch(self.get_url('/notfound'), raise_error=False)
self.assertEqual(response.code, 404)
@gen_test
def test_reuse_request_from_response(self):
# The response.request attribute should be an HTTPRequest, not
# a _RequestProxy.
# This test uses self.http_client.fetch because self.fetch calls
# self.get_url on the input unconditionally.
url = self.get_url('/hello')
response = yield self.http_client.fetch(url)
self.assertEqual(response.request.url, url)
self.assertTrue(isinstance(response.request, HTTPRequest))
response2 = yield self.http_client.fetch(response.request)
self.assertEqual(response2.body, b'Hello world!')
def test_all_methods(self):
for method in ['GET', 'DELETE', 'OPTIONS']:
response = self.fetch('/all_methods', method=method)
self.assertEqual(response.body, utf8(method))
for method in ['POST', 'PUT', 'PATCH']:
response = self.fetch('/all_methods', method=method, body=b'')
self.assertEqual(response.body, utf8(method))
response = self.fetch('/all_methods', method='HEAD')
self.assertEqual(response.body, b'')
response = self.fetch('/all_methods', method='OTHER',
allow_nonstandard_methods=True)
self.assertEqual(response.body, b'OTHER')
def test_body_sanity_checks(self):
# These methods require a body.
for method in ('POST', 'PUT', 'PATCH'):
with self.assertRaises(ValueError) as context:
resp = self.fetch('/all_methods', method=method)
resp.rethrow()
self.assertIn('must not be None', str(context.exception))
resp = self.fetch('/all_methods', method=method,
allow_nonstandard_methods=True)
self.assertEqual(resp.code, 200)
# These methods don't allow a body.
for method in ('GET', 'DELETE', 'OPTIONS'):
with self.assertRaises(ValueError) as context:
resp = self.fetch('/all_methods', method=method, body=b'asdf')
resp.rethrow()
self.assertIn('must be None', str(context.exception))
# In most cases this can be overridden, but curl_httpclient
# does not allow body with a GET at all.
if method != 'GET':
resp = self.fetch('/all_methods', method=method, body=b'asdf',
allow_nonstandard_methods=True)
resp.rethrow()
self.assertEqual(resp.code, 200)
# This test causes odd failures with the combination of
# curl_httpclient (at least with the version of libcurl available
# on ubuntu 12.04), TwistedIOLoop, and epoll. For POST (but not PUT),
# curl decides the response came back too soon and closes the connection
# to start again. It does this *before* telling the socket callback to
# unregister the FD. Some IOLoop implementations have special kernel
# integration to discover this immediately. Tornado's IOLoops
# ignore errors on remove_handler to accommodate this behavior, but
# Twisted's reactor does not. The removeReader call fails and so
# do all future removeAll calls (which our tests do at cleanup).
#
# def test_post_307(self):
# response = self.fetch("/redirect?status=307&url=/post",
# method="POST", body=b"arg1=foo&arg2=bar")
# self.assertEqual(response.body, b"Post arg1: foo, arg2: bar")
def test_put_307(self):
response = self.fetch("/redirect?status=307&url=/put",
method="PUT", body=b"hello")
response.rethrow()
self.assertEqual(response.body, b"Put body: hello")
class RequestProxyTest(unittest.TestCase):
def test_request_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/',
user_agent='foo'),
dict())
self.assertEqual(proxy.user_agent, 'foo')
def test_default_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict(network_interface='foo'))
self.assertEqual(proxy.network_interface, 'foo')
def test_both_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/',
proxy_host='foo'),
dict(proxy_host='bar'))
self.assertEqual(proxy.proxy_host, 'foo')
def test_neither_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict())
self.assertIs(proxy.auth_username, None)
def test_bad_attribute(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict())
with self.assertRaises(AttributeError):
proxy.foo
def test_defaults_none(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'), None)
self.assertIs(proxy.auth_username, None)
class HTTPResponseTestCase(unittest.TestCase):
def test_str(self):
response = HTTPResponse(HTTPRequest('http://example.com'),
200, headers={}, buffer=BytesIO())
s = str(response)
self.assertTrue(s.startswith('HTTPResponse('))
self.assertIn('code=200', s)
class SyncHTTPClientTest(unittest.TestCase):
def setUp(self):
if IOLoop.configured_class().__name__ in ('TwistedIOLoop',
'AsyncIOMainLoop'):
# TwistedIOLoop only supports the global reactor, so we can't have
# separate IOLoops for client and server threads.
# AsyncIOMainLoop doesn't work with the default policy
# (although it could with some tweaks to this test and a
# policy that created loops for non-main threads).
raise unittest.SkipTest(
'Sync HTTPClient not compatible with TwistedIOLoop or '
'AsyncIOMainLoop')
self.server_ioloop = IOLoop()
sock, self.port = bind_unused_port()
app = Application([('/', HelloWorldHandler)])
self.server = HTTPServer(app, io_loop=self.server_ioloop)
self.server.add_socket(sock)
self.server_thread = threading.Thread(target=self.server_ioloop.start)
self.server_thread.start()
self.http_client = HTTPClient()
def tearDown(self):
def stop_server():
self.server.stop()
# Delay the shutdown of the IOLoop by one iteration because
# the server may still have some cleanup work left when
# the client finishes with the response (this is noticable
# with http/2, which leaves a Future with an unexamined
# StreamClosedError on the loop).
self.server_ioloop.add_callback(self.server_ioloop.stop)
self.server_ioloop.add_callback(stop_server)
self.server_thread.join()
self.http_client.close()
self.server_ioloop.close(all_fds=True)
def get_url(self, path):
return 'http://127.0.0.1:%d%s' % (self.port, path)
def test_sync_client(self):
response = self.http_client.fetch(self.get_url('/'))
self.assertEqual(b'Hello world!', response.body)
def test_sync_client_error(self):
# Synchronous HTTPClient raises errors directly; no need for
# response.rethrow()
with self.assertRaises(HTTPError) as assertion:
self.http_client.fetch(self.get_url('/notfound'))
self.assertEqual(assertion.exception.code, 404)
class HTTPRequestTestCase(unittest.TestCase):
def test_headers(self):
request = HTTPRequest('http://example.com', headers={'foo': 'bar'})
self.assertEqual(request.headers, {'foo': 'bar'})
def test_headers_setter(self):
request = HTTPRequest('http://example.com')
request.headers = {'bar': 'baz'}
self.assertEqual(request.headers, {'bar': 'baz'})
def test_null_headers_setter(self):
request = HTTPRequest('http://example.com')
request.headers = None
self.assertEqual(request.headers, {})
def test_body(self):
request = HTTPRequest('http://example.com', body='foo')
self.assertEqual(request.body, utf8('foo'))
def test_body_setter(self):
request = HTTPRequest('http://example.com')
request.body = 'foo'
self.assertEqual(request.body, utf8('foo'))
def test_if_modified_since(self):
http_date = datetime.datetime.utcnow()
request = HTTPRequest('http://example.com', if_modified_since=http_date)
self.assertEqual(request.headers,
{'If-Modified-Since': format_timestamp(http_date)})
class HTTPErrorTestCase(unittest.TestCase):
def test_copy(self):
e = HTTPError(403)
e2 = copy.copy(e)
self.assertIsNot(e, e2)
self.assertEqual(e.code, e2.code)
def test_str(self):
e = HTTPError(403)
self.assertEqual(str(e), "HTTP 403: Forbidden")
|
mit
|
gedads/Neodynamis
|
migrations/migrate.py
|
33
|
1735
|
import MySQLdb
import re
import spell_blobs_to_spell_table
credentials = {}
db = None
cur = None
def connect():
print("Loading conf/map_darkstar.conf")
# Grab mysql credentials
filename = "../conf/map_darkstar.conf"
global credentials
global db
global cur
with open(filename) as f:
while True:
line = f.readline()
if not line: break
match = re.match(r"(mysql_\w+):\s+(\S+)", line)
if match:
credentials[match.group(1)] = match.group(2)
database = credentials["mysql_database"]
host = credentials["mysql_host"]
port = int(credentials["mysql_port"])
login = credentials["mysql_login"]
password = credentials["mysql_password"]
print(database, host, port, login, password)
db = MySQLdb.connect(host=host,
user=login,
passwd=password,
db=database,
port=port)
cur = db.cursor()
print("Connected to database " + database)
def close():
print("Closing connection...")
cur.close()
db.close()
def run_all_migrations():
connect()
run_migration(spell_blobs_to_spell_table)
print("Finished running all migrations")
close()
def run_migration(migration):
# Ensure things like new table exists
migration.check_preconditions(cur)
# Don't run migration twice
if not migration.needs_to_run(cur):
print("Already ran " + migration.migration_name() + " skipping...")
return
print("Running migrations for " + migration.migration_name())
migration.migrate(cur, db)
print("[Success] Done running " + migration.migration_name())
if __name__ == "__main__":
run_all_migrations()
|
gpl-3.0
|
owlzhou/ttornado
|
env/Lib/site-packages/pip/_vendor/requests/packages/urllib3/fields.py
|
1007
|
5833
|
import email.utils
import mimetypes
from .packages import six
def guess_content_type(filename, default='application/octet-stream'):
"""
Guess the "Content-Type" of a file.
:param filename:
The filename to guess the "Content-Type" of using :mod:`mimetypes`.
:param default:
If no "Content-Type" can be guessed, default to `default`.
"""
if filename:
return mimetypes.guess_type(filename)[0] or default
return default
def format_header_param(name, value):
"""
Helper function to format and quote a single header parameter.
Particularly useful for header parameters which might contain
non-ASCII values, like file names. This follows RFC 2231, as
suggested by RFC 2388 Section 4.4.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
if not any(ch in value for ch in '"\\\r\n'):
result = '%s="%s"' % (name, value)
try:
result.encode('ascii')
except UnicodeEncodeError:
pass
else:
return result
if not six.PY3: # Python 2:
value = value.encode('utf-8')
value = email.utils.encode_rfc2231(value, 'utf-8')
value = '%s*=%s' % (name, value)
return value
class RequestField(object):
"""
A data container for request body parameters.
:param name:
The name of this request field.
:param data:
The data/value body.
:param filename:
An optional filename of the request field.
:param headers:
An optional dict-like object of headers to initially use for the field.
"""
def __init__(self, name, data, filename=None, headers=None):
self._name = name
self._filename = filename
self.data = data
self.headers = {}
if headers:
self.headers = dict(headers)
@classmethod
def from_tuples(cls, fieldname, value):
"""
A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
Supports constructing :class:`~urllib3.fields.RequestField` from
parameter of key/value strings AND key/filetuple. A filetuple is a
(filename, data, MIME type) tuple where the MIME type is optional.
For example::
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
Field names and filenames must be unicode.
"""
if isinstance(value, tuple):
if len(value) == 3:
filename, data, content_type = value
else:
filename, data = value
content_type = guess_content_type(filename)
else:
filename = None
content_type = None
data = value
request_param = cls(fieldname, data, filename=filename)
request_param.make_multipart(content_type=content_type)
return request_param
def _render_part(self, name, value):
"""
Overridable helper function to format a single header parameter.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
return format_header_param(name, value)
def _render_parts(self, header_parts):
"""
Helper function to format and quote a single header.
Useful for single headers that are composed of multiple items. E.g.,
'Content-Disposition' fields.
:param header_parts:
A sequence of (k, v) typles or a :class:`dict` of (k, v) to format
as `k1="v1"; k2="v2"; ...`.
"""
parts = []
iterable = header_parts
if isinstance(header_parts, dict):
iterable = header_parts.items()
for name, value in iterable:
if value:
parts.append(self._render_part(name, value))
return '; '.join(parts)
def render_headers(self):
"""
Renders the headers for this request field.
"""
lines = []
sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']
for sort_key in sort_keys:
if self.headers.get(sort_key, False):
lines.append('%s: %s' % (sort_key, self.headers[sort_key]))
for header_name, header_value in self.headers.items():
if header_name not in sort_keys:
if header_value:
lines.append('%s: %s' % (header_name, header_value))
lines.append('\r\n')
return '\r\n'.join(lines)
def make_multipart(self, content_disposition=None, content_type=None,
content_location=None):
"""
Makes this request field into a multipart request field.
This method overrides "Content-Disposition", "Content-Type" and
"Content-Location" headers to the request parameter.
:param content_type:
The 'Content-Type' of the request body.
:param content_location:
The 'Content-Location' of the request body.
"""
self.headers['Content-Disposition'] = content_disposition or 'form-data'
self.headers['Content-Disposition'] += '; '.join([
'', self._render_parts(
(('name', self._name), ('filename', self._filename))
)
])
self.headers['Content-Type'] = content_type
self.headers['Content-Location'] = content_location
|
apache-2.0
|
thaddeusdiamond/Social-Hierarchical-Learning
|
ext/protobuf/python/mox.py
|
603
|
38237
|
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is used for testing. The original is at:
# http://code.google.com/p/pymox/
"""Mox, an object-mocking framework for Python.
Mox works in the record-replay-verify paradigm. When you first create
a mock object, it is in record mode. You then programmatically set
the expected behavior of the mock object (what methods are to be
called on it, with what parameters, what they should return, and in
what order).
Once you have set up the expected mock behavior, you put it in replay
mode. Now the mock responds to method calls just as you told it to.
If an unexpected method (or an expected method with unexpected
parameters) is called, then an exception will be raised.
Once you are done interacting with the mock, you need to verify that
all the expected interactions occured. (Maybe your code exited
prematurely without calling some cleanup method!) The verify phase
ensures that every expected method was called; otherwise, an exception
will be raised.
Suggested usage / workflow:
# Create Mox factory
my_mox = Mox()
# Create a mock data access object
mock_dao = my_mox.CreateMock(DAOClass)
# Set up expected behavior
mock_dao.RetrievePersonWithIdentifier('1').AndReturn(person)
mock_dao.DeletePerson(person)
# Put mocks in replay mode
my_mox.ReplayAll()
# Inject mock object and run test
controller.SetDao(mock_dao)
controller.DeletePersonById('1')
# Verify all methods were called as expected
my_mox.VerifyAll()
"""
from collections import deque
import re
import types
import unittest
import stubout
class Error(AssertionError):
"""Base exception for this module."""
pass
class ExpectedMethodCallsError(Error):
"""Raised when Verify() is called before all expected methods have been called
"""
def __init__(self, expected_methods):
"""Init exception.
Args:
# expected_methods: A sequence of MockMethod objects that should have been
# called.
expected_methods: [MockMethod]
Raises:
ValueError: if expected_methods contains no methods.
"""
if not expected_methods:
raise ValueError("There must be at least one expected method")
Error.__init__(self)
self._expected_methods = expected_methods
def __str__(self):
calls = "\n".join(["%3d. %s" % (i, m)
for i, m in enumerate(self._expected_methods)])
return "Verify: Expected methods never called:\n%s" % (calls,)
class UnexpectedMethodCallError(Error):
"""Raised when an unexpected method is called.
This can occur if a method is called with incorrect parameters, or out of the
specified order.
"""
def __init__(self, unexpected_method, expected):
"""Init exception.
Args:
# unexpected_method: MockMethod that was called but was not at the head of
# the expected_method queue.
# expected: MockMethod or UnorderedGroup the method should have
# been in.
unexpected_method: MockMethod
expected: MockMethod or UnorderedGroup
"""
Error.__init__(self)
self._unexpected_method = unexpected_method
self._expected = expected
def __str__(self):
return "Unexpected method call: %s. Expecting: %s" % \
(self._unexpected_method, self._expected)
class UnknownMethodCallError(Error):
"""Raised if an unknown method is requested of the mock object."""
def __init__(self, unknown_method_name):
"""Init exception.
Args:
# unknown_method_name: Method call that is not part of the mocked class's
# public interface.
unknown_method_name: str
"""
Error.__init__(self)
self._unknown_method_name = unknown_method_name
def __str__(self):
return "Method called is not a member of the object: %s" % \
self._unknown_method_name
class Mox(object):
"""Mox: a factory for creating mock objects."""
# A list of types that should be stubbed out with MockObjects (as
# opposed to MockAnythings).
_USE_MOCK_OBJECT = [types.ClassType, types.InstanceType, types.ModuleType,
types.ObjectType, types.TypeType]
def __init__(self):
"""Initialize a new Mox."""
self._mock_objects = []
self.stubs = stubout.StubOutForTesting()
def CreateMock(self, class_to_mock):
"""Create a new mock object.
Args:
# class_to_mock: the class to be mocked
class_to_mock: class
Returns:
MockObject that can be used as the class_to_mock would be.
"""
new_mock = MockObject(class_to_mock)
self._mock_objects.append(new_mock)
return new_mock
def CreateMockAnything(self):
"""Create a mock that will accept any method calls.
This does not enforce an interface.
"""
new_mock = MockAnything()
self._mock_objects.append(new_mock)
return new_mock
def ReplayAll(self):
"""Set all mock objects to replay mode."""
for mock_obj in self._mock_objects:
mock_obj._Replay()
def VerifyAll(self):
"""Call verify on all mock objects created."""
for mock_obj in self._mock_objects:
mock_obj._Verify()
def ResetAll(self):
"""Call reset on all mock objects. This does not unset stubs."""
for mock_obj in self._mock_objects:
mock_obj._Reset()
def StubOutWithMock(self, obj, attr_name, use_mock_anything=False):
"""Replace a method, attribute, etc. with a Mock.
This will replace a class or module with a MockObject, and everything else
(method, function, etc) with a MockAnything. This can be overridden to
always use a MockAnything by setting use_mock_anything to True.
Args:
obj: A Python object (class, module, instance, callable).
attr_name: str. The name of the attribute to replace with a mock.
use_mock_anything: bool. True if a MockAnything should be used regardless
of the type of attribute.
"""
attr_to_replace = getattr(obj, attr_name)
if type(attr_to_replace) in self._USE_MOCK_OBJECT and not use_mock_anything:
stub = self.CreateMock(attr_to_replace)
else:
stub = self.CreateMockAnything()
self.stubs.Set(obj, attr_name, stub)
def UnsetStubs(self):
"""Restore stubs to their original state."""
self.stubs.UnsetAll()
def Replay(*args):
"""Put mocks into Replay mode.
Args:
# args is any number of mocks to put into replay mode.
"""
for mock in args:
mock._Replay()
def Verify(*args):
"""Verify mocks.
Args:
# args is any number of mocks to be verified.
"""
for mock in args:
mock._Verify()
def Reset(*args):
"""Reset mocks.
Args:
# args is any number of mocks to be reset.
"""
for mock in args:
mock._Reset()
class MockAnything:
"""A mock that can be used to mock anything.
This is helpful for mocking classes that do not provide a public interface.
"""
def __init__(self):
""" """
self._Reset()
def __getattr__(self, method_name):
"""Intercept method calls on this object.
A new MockMethod is returned that is aware of the MockAnything's
state (record or replay). The call will be recorded or replayed
by the MockMethod's __call__.
Args:
# method name: the name of the method being called.
method_name: str
Returns:
A new MockMethod aware of MockAnything's state (record or replay).
"""
return self._CreateMockMethod(method_name)
def _CreateMockMethod(self, method_name):
"""Create a new mock method call and return it.
Args:
# method name: the name of the method being called.
method_name: str
Returns:
A new MockMethod aware of MockAnything's state (record or replay).
"""
return MockMethod(method_name, self._expected_calls_queue,
self._replay_mode)
def __nonzero__(self):
"""Return 1 for nonzero so the mock can be used as a conditional."""
return 1
def __eq__(self, rhs):
"""Provide custom logic to compare objects."""
return (isinstance(rhs, MockAnything) and
self._replay_mode == rhs._replay_mode and
self._expected_calls_queue == rhs._expected_calls_queue)
def __ne__(self, rhs):
"""Provide custom logic to compare objects."""
return not self == rhs
def _Replay(self):
"""Start replaying expected method calls."""
self._replay_mode = True
def _Verify(self):
"""Verify that all of the expected calls have been made.
Raises:
ExpectedMethodCallsError: if there are still more method calls in the
expected queue.
"""
# If the list of expected calls is not empty, raise an exception
if self._expected_calls_queue:
# The last MultipleTimesGroup is not popped from the queue.
if (len(self._expected_calls_queue) == 1 and
isinstance(self._expected_calls_queue[0], MultipleTimesGroup) and
self._expected_calls_queue[0].IsSatisfied()):
pass
else:
raise ExpectedMethodCallsError(self._expected_calls_queue)
def _Reset(self):
"""Reset the state of this mock to record mode with an empty queue."""
# Maintain a list of method calls we are expecting
self._expected_calls_queue = deque()
# Make sure we are in setup mode, not replay mode
self._replay_mode = False
class MockObject(MockAnything, object):
"""A mock object that simulates the public/protected interface of a class."""
def __init__(self, class_to_mock):
"""Initialize a mock object.
This determines the methods and properties of the class and stores them.
Args:
# class_to_mock: class to be mocked
class_to_mock: class
"""
# This is used to hack around the mixin/inheritance of MockAnything, which
# is not a proper object (it can be anything. :-)
MockAnything.__dict__['__init__'](self)
# Get a list of all the public and special methods we should mock.
self._known_methods = set()
self._known_vars = set()
self._class_to_mock = class_to_mock
for method in dir(class_to_mock):
if callable(getattr(class_to_mock, method)):
self._known_methods.add(method)
else:
self._known_vars.add(method)
def __getattr__(self, name):
"""Intercept attribute request on this object.
If the attribute is a public class variable, it will be returned and not
recorded as a call.
If the attribute is not a variable, it is handled like a method
call. The method name is checked against the set of mockable
methods, and a new MockMethod is returned that is aware of the
MockObject's state (record or replay). The call will be recorded
or replayed by the MockMethod's __call__.
Args:
# name: the name of the attribute being requested.
name: str
Returns:
Either a class variable or a new MockMethod that is aware of the state
of the mock (record or replay).
Raises:
UnknownMethodCallError if the MockObject does not mock the requested
method.
"""
if name in self._known_vars:
return getattr(self._class_to_mock, name)
if name in self._known_methods:
return self._CreateMockMethod(name)
raise UnknownMethodCallError(name)
def __eq__(self, rhs):
"""Provide custom logic to compare objects."""
return (isinstance(rhs, MockObject) and
self._class_to_mock == rhs._class_to_mock and
self._replay_mode == rhs._replay_mode and
self._expected_calls_queue == rhs._expected_calls_queue)
def __setitem__(self, key, value):
"""Provide custom logic for mocking classes that support item assignment.
Args:
key: Key to set the value for.
value: Value to set.
Returns:
Expected return value in replay mode. A MockMethod object for the
__setitem__ method that has already been called if not in replay mode.
Raises:
TypeError if the underlying class does not support item assignment.
UnexpectedMethodCallError if the object does not expect the call to
__setitem__.
"""
setitem = self._class_to_mock.__dict__.get('__setitem__', None)
# Verify the class supports item assignment.
if setitem is None:
raise TypeError('object does not support item assignment')
# If we are in replay mode then simply call the mock __setitem__ method.
if self._replay_mode:
return MockMethod('__setitem__', self._expected_calls_queue,
self._replay_mode)(key, value)
# Otherwise, create a mock method __setitem__.
return self._CreateMockMethod('__setitem__')(key, value)
def __getitem__(self, key):
"""Provide custom logic for mocking classes that are subscriptable.
Args:
key: Key to return the value for.
Returns:
Expected return value in replay mode. A MockMethod object for the
__getitem__ method that has already been called if not in replay mode.
Raises:
TypeError if the underlying class is not subscriptable.
UnexpectedMethodCallError if the object does not expect the call to
__setitem__.
"""
getitem = self._class_to_mock.__dict__.get('__getitem__', None)
# Verify the class supports item assignment.
if getitem is None:
raise TypeError('unsubscriptable object')
# If we are in replay mode then simply call the mock __getitem__ method.
if self._replay_mode:
return MockMethod('__getitem__', self._expected_calls_queue,
self._replay_mode)(key)
# Otherwise, create a mock method __getitem__.
return self._CreateMockMethod('__getitem__')(key)
def __call__(self, *params, **named_params):
"""Provide custom logic for mocking classes that are callable."""
# Verify the class we are mocking is callable
callable = self._class_to_mock.__dict__.get('__call__', None)
if callable is None:
raise TypeError('Not callable')
# Because the call is happening directly on this object instead of a method,
# the call on the mock method is made right here
mock_method = self._CreateMockMethod('__call__')
return mock_method(*params, **named_params)
@property
def __class__(self):
"""Return the class that is being mocked."""
return self._class_to_mock
class MockMethod(object):
"""Callable mock method.
A MockMethod should act exactly like the method it mocks, accepting parameters
and returning a value, or throwing an exception (as specified). When this
method is called, it can optionally verify whether the called method (name and
signature) matches the expected method.
"""
def __init__(self, method_name, call_queue, replay_mode):
"""Construct a new mock method.
Args:
# method_name: the name of the method
# call_queue: deque of calls, verify this call against the head, or add
# this call to the queue.
# replay_mode: False if we are recording, True if we are verifying calls
# against the call queue.
method_name: str
call_queue: list or deque
replay_mode: bool
"""
self._name = method_name
self._call_queue = call_queue
if not isinstance(call_queue, deque):
self._call_queue = deque(self._call_queue)
self._replay_mode = replay_mode
self._params = None
self._named_params = None
self._return_value = None
self._exception = None
self._side_effects = None
def __call__(self, *params, **named_params):
"""Log parameters and return the specified return value.
If the Mock(Anything/Object) associated with this call is in record mode,
this MockMethod will be pushed onto the expected call queue. If the mock
is in replay mode, this will pop a MockMethod off the top of the queue and
verify this call is equal to the expected call.
Raises:
UnexpectedMethodCall if this call is supposed to match an expected method
call and it does not.
"""
self._params = params
self._named_params = named_params
if not self._replay_mode:
self._call_queue.append(self)
return self
expected_method = self._VerifyMethodCall()
if expected_method._side_effects:
expected_method._side_effects(*params, **named_params)
if expected_method._exception:
raise expected_method._exception
return expected_method._return_value
def __getattr__(self, name):
"""Raise an AttributeError with a helpful message."""
raise AttributeError('MockMethod has no attribute "%s". '
'Did you remember to put your mocks in replay mode?' % name)
def _PopNextMethod(self):
"""Pop the next method from our call queue."""
try:
return self._call_queue.popleft()
except IndexError:
raise UnexpectedMethodCallError(self, None)
def _VerifyMethodCall(self):
"""Verify the called method is expected.
This can be an ordered method, or part of an unordered set.
Returns:
The expected mock method.
Raises:
UnexpectedMethodCall if the method called was not expected.
"""
expected = self._PopNextMethod()
# Loop here, because we might have a MethodGroup followed by another
# group.
while isinstance(expected, MethodGroup):
expected, method = expected.MethodCalled(self)
if method is not None:
return method
# This is a mock method, so just check equality.
if expected != self:
raise UnexpectedMethodCallError(self, expected)
return expected
def __str__(self):
params = ', '.join(
[repr(p) for p in self._params or []] +
['%s=%r' % x for x in sorted((self._named_params or {}).items())])
desc = "%s(%s) -> %r" % (self._name, params, self._return_value)
return desc
def __eq__(self, rhs):
"""Test whether this MockMethod is equivalent to another MockMethod.
Args:
# rhs: the right hand side of the test
rhs: MockMethod
"""
return (isinstance(rhs, MockMethod) and
self._name == rhs._name and
self._params == rhs._params and
self._named_params == rhs._named_params)
def __ne__(self, rhs):
"""Test whether this MockMethod is not equivalent to another MockMethod.
Args:
# rhs: the right hand side of the test
rhs: MockMethod
"""
return not self == rhs
def GetPossibleGroup(self):
"""Returns a possible group from the end of the call queue or None if no
other methods are on the stack.
"""
# Remove this method from the tail of the queue so we can add it to a group.
this_method = self._call_queue.pop()
assert this_method == self
# Determine if the tail of the queue is a group, or just a regular ordered
# mock method.
group = None
try:
group = self._call_queue[-1]
except IndexError:
pass
return group
def _CheckAndCreateNewGroup(self, group_name, group_class):
"""Checks if the last method (a possible group) is an instance of our
group_class. Adds the current method to this group or creates a new one.
Args:
group_name: the name of the group.
group_class: the class used to create instance of this new group
"""
group = self.GetPossibleGroup()
# If this is a group, and it is the correct group, add the method.
if isinstance(group, group_class) and group.group_name() == group_name:
group.AddMethod(self)
return self
# Create a new group and add the method.
new_group = group_class(group_name)
new_group.AddMethod(self)
self._call_queue.append(new_group)
return self
def InAnyOrder(self, group_name="default"):
"""Move this method into a group of unordered calls.
A group of unordered calls must be defined together, and must be executed
in full before the next expected method can be called. There can be
multiple groups that are expected serially, if they are given
different group names. The same group name can be reused if there is a
standard method call, or a group with a different name, spliced between
usages.
Args:
group_name: the name of the unordered group.
Returns:
self
"""
return self._CheckAndCreateNewGroup(group_name, UnorderedGroup)
def MultipleTimes(self, group_name="default"):
"""Move this method into group of calls which may be called multiple times.
A group of repeating calls must be defined together, and must be executed in
full before the next expected mehtod can be called.
Args:
group_name: the name of the unordered group.
Returns:
self
"""
return self._CheckAndCreateNewGroup(group_name, MultipleTimesGroup)
def AndReturn(self, return_value):
"""Set the value to return when this method is called.
Args:
# return_value can be anything.
"""
self._return_value = return_value
return return_value
def AndRaise(self, exception):
"""Set the exception to raise when this method is called.
Args:
# exception: the exception to raise when this method is called.
exception: Exception
"""
self._exception = exception
def WithSideEffects(self, side_effects):
"""Set the side effects that are simulated when this method is called.
Args:
side_effects: A callable which modifies the parameters or other relevant
state which a given test case depends on.
Returns:
Self for chaining with AndReturn and AndRaise.
"""
self._side_effects = side_effects
return self
class Comparator:
"""Base class for all Mox comparators.
A Comparator can be used as a parameter to a mocked method when the exact
value is not known. For example, the code you are testing might build up a
long SQL string that is passed to your mock DAO. You're only interested that
the IN clause contains the proper primary keys, so you can set your mock
up as follows:
mock_dao.RunQuery(StrContains('IN (1, 2, 4, 5)')).AndReturn(mock_result)
Now whatever query is passed in must contain the string 'IN (1, 2, 4, 5)'.
A Comparator may replace one or more parameters, for example:
# return at most 10 rows
mock_dao.RunQuery(StrContains('SELECT'), 10)
or
# Return some non-deterministic number of rows
mock_dao.RunQuery(StrContains('SELECT'), IsA(int))
"""
def equals(self, rhs):
"""Special equals method that all comparators must implement.
Args:
rhs: any python object
"""
raise NotImplementedError, 'method must be implemented by a subclass.'
def __eq__(self, rhs):
return self.equals(rhs)
def __ne__(self, rhs):
return not self.equals(rhs)
class IsA(Comparator):
"""This class wraps a basic Python type or class. It is used to verify
that a parameter is of the given type or class.
Example:
mock_dao.Connect(IsA(DbConnectInfo))
"""
def __init__(self, class_name):
"""Initialize IsA
Args:
class_name: basic python type or a class
"""
self._class_name = class_name
def equals(self, rhs):
"""Check to see if the RHS is an instance of class_name.
Args:
# rhs: the right hand side of the test
rhs: object
Returns:
bool
"""
try:
return isinstance(rhs, self._class_name)
except TypeError:
# Check raw types if there was a type error. This is helpful for
# things like cStringIO.StringIO.
return type(rhs) == type(self._class_name)
def __repr__(self):
return str(self._class_name)
class IsAlmost(Comparator):
"""Comparison class used to check whether a parameter is nearly equal
to a given value. Generally useful for floating point numbers.
Example mock_dao.SetTimeout((IsAlmost(3.9)))
"""
def __init__(self, float_value, places=7):
"""Initialize IsAlmost.
Args:
float_value: The value for making the comparison.
places: The number of decimal places to round to.
"""
self._float_value = float_value
self._places = places
def equals(self, rhs):
"""Check to see if RHS is almost equal to float_value
Args:
rhs: the value to compare to float_value
Returns:
bool
"""
try:
return round(rhs-self._float_value, self._places) == 0
except TypeError:
# This is probably because either float_value or rhs is not a number.
return False
def __repr__(self):
return str(self._float_value)
class StrContains(Comparator):
"""Comparison class used to check whether a substring exists in a
string parameter. This can be useful in mocking a database with SQL
passed in as a string parameter, for example.
Example:
mock_dao.RunQuery(StrContains('IN (1, 2, 4, 5)')).AndReturn(mock_result)
"""
def __init__(self, search_string):
"""Initialize.
Args:
# search_string: the string you are searching for
search_string: str
"""
self._search_string = search_string
def equals(self, rhs):
"""Check to see if the search_string is contained in the rhs string.
Args:
# rhs: the right hand side of the test
rhs: object
Returns:
bool
"""
try:
return rhs.find(self._search_string) > -1
except Exception:
return False
def __repr__(self):
return '<str containing \'%s\'>' % self._search_string
class Regex(Comparator):
"""Checks if a string matches a regular expression.
This uses a given regular expression to determine equality.
"""
def __init__(self, pattern, flags=0):
"""Initialize.
Args:
# pattern is the regular expression to search for
pattern: str
# flags passed to re.compile function as the second argument
flags: int
"""
self.regex = re.compile(pattern, flags=flags)
def equals(self, rhs):
"""Check to see if rhs matches regular expression pattern.
Returns:
bool
"""
return self.regex.search(rhs) is not None
def __repr__(self):
s = '<regular expression \'%s\'' % self.regex.pattern
if self.regex.flags:
s += ', flags=%d' % self.regex.flags
s += '>'
return s
class In(Comparator):
"""Checks whether an item (or key) is in a list (or dict) parameter.
Example:
mock_dao.GetUsersInfo(In('expectedUserName')).AndReturn(mock_result)
"""
def __init__(self, key):
"""Initialize.
Args:
# key is any thing that could be in a list or a key in a dict
"""
self._key = key
def equals(self, rhs):
"""Check to see whether key is in rhs.
Args:
rhs: dict
Returns:
bool
"""
return self._key in rhs
def __repr__(self):
return '<sequence or map containing \'%s\'>' % self._key
class ContainsKeyValue(Comparator):
"""Checks whether a key/value pair is in a dict parameter.
Example:
mock_dao.UpdateUsers(ContainsKeyValue('stevepm', stevepm_user_info))
"""
def __init__(self, key, value):
"""Initialize.
Args:
# key: a key in a dict
# value: the corresponding value
"""
self._key = key
self._value = value
def equals(self, rhs):
"""Check whether the given key/value pair is in the rhs dict.
Returns:
bool
"""
try:
return rhs[self._key] == self._value
except Exception:
return False
def __repr__(self):
return '<map containing the entry \'%s: %s\'>' % (self._key, self._value)
class SameElementsAs(Comparator):
"""Checks whether iterables contain the same elements (ignoring order).
Example:
mock_dao.ProcessUsers(SameElementsAs('stevepm', 'salomaki'))
"""
def __init__(self, expected_seq):
"""Initialize.
Args:
expected_seq: a sequence
"""
self._expected_seq = expected_seq
def equals(self, actual_seq):
"""Check to see whether actual_seq has same elements as expected_seq.
Args:
actual_seq: sequence
Returns:
bool
"""
try:
expected = dict([(element, None) for element in self._expected_seq])
actual = dict([(element, None) for element in actual_seq])
except TypeError:
# Fall back to slower list-compare if any of the objects are unhashable.
expected = list(self._expected_seq)
actual = list(actual_seq)
expected.sort()
actual.sort()
return expected == actual
def __repr__(self):
return '<sequence with same elements as \'%s\'>' % self._expected_seq
class And(Comparator):
"""Evaluates one or more Comparators on RHS and returns an AND of the results.
"""
def __init__(self, *args):
"""Initialize.
Args:
*args: One or more Comparator
"""
self._comparators = args
def equals(self, rhs):
"""Checks whether all Comparators are equal to rhs.
Args:
# rhs: can be anything
Returns:
bool
"""
for comparator in self._comparators:
if not comparator.equals(rhs):
return False
return True
def __repr__(self):
return '<AND %s>' % str(self._comparators)
class Or(Comparator):
"""Evaluates one or more Comparators on RHS and returns an OR of the results.
"""
def __init__(self, *args):
"""Initialize.
Args:
*args: One or more Mox comparators
"""
self._comparators = args
def equals(self, rhs):
"""Checks whether any Comparator is equal to rhs.
Args:
# rhs: can be anything
Returns:
bool
"""
for comparator in self._comparators:
if comparator.equals(rhs):
return True
return False
def __repr__(self):
return '<OR %s>' % str(self._comparators)
class Func(Comparator):
"""Call a function that should verify the parameter passed in is correct.
You may need the ability to perform more advanced operations on the parameter
in order to validate it. You can use this to have a callable validate any
parameter. The callable should return either True or False.
Example:
def myParamValidator(param):
# Advanced logic here
return True
mock_dao.DoSomething(Func(myParamValidator), true)
"""
def __init__(self, func):
"""Initialize.
Args:
func: callable that takes one parameter and returns a bool
"""
self._func = func
def equals(self, rhs):
"""Test whether rhs passes the function test.
rhs is passed into func.
Args:
rhs: any python object
Returns:
the result of func(rhs)
"""
return self._func(rhs)
def __repr__(self):
return str(self._func)
class IgnoreArg(Comparator):
"""Ignore an argument.
This can be used when we don't care about an argument of a method call.
Example:
# Check if CastMagic is called with 3 as first arg and 'disappear' as third.
mymock.CastMagic(3, IgnoreArg(), 'disappear')
"""
def equals(self, unused_rhs):
"""Ignores arguments and returns True.
Args:
unused_rhs: any python object
Returns:
always returns True
"""
return True
def __repr__(self):
return '<IgnoreArg>'
class MethodGroup(object):
"""Base class containing common behaviour for MethodGroups."""
def __init__(self, group_name):
self._group_name = group_name
def group_name(self):
return self._group_name
def __str__(self):
return '<%s "%s">' % (self.__class__.__name__, self._group_name)
def AddMethod(self, mock_method):
raise NotImplementedError
def MethodCalled(self, mock_method):
raise NotImplementedError
def IsSatisfied(self):
raise NotImplementedError
class UnorderedGroup(MethodGroup):
"""UnorderedGroup holds a set of method calls that may occur in any order.
This construct is helpful for non-deterministic events, such as iterating
over the keys of a dict.
"""
def __init__(self, group_name):
super(UnorderedGroup, self).__init__(group_name)
self._methods = []
def AddMethod(self, mock_method):
"""Add a method to this group.
Args:
mock_method: A mock method to be added to this group.
"""
self._methods.append(mock_method)
def MethodCalled(self, mock_method):
"""Remove a method call from the group.
If the method is not in the set, an UnexpectedMethodCallError will be
raised.
Args:
mock_method: a mock method that should be equal to a method in the group.
Returns:
The mock method from the group
Raises:
UnexpectedMethodCallError if the mock_method was not in the group.
"""
# Check to see if this method exists, and if so, remove it from the set
# and return it.
for method in self._methods:
if method == mock_method:
# Remove the called mock_method instead of the method in the group.
# The called method will match any comparators when equality is checked
# during removal. The method in the group could pass a comparator to
# another comparator during the equality check.
self._methods.remove(mock_method)
# If this group is not empty, put it back at the head of the queue.
if not self.IsSatisfied():
mock_method._call_queue.appendleft(self)
return self, method
raise UnexpectedMethodCallError(mock_method, self)
def IsSatisfied(self):
"""Return True if there are not any methods in this group."""
return len(self._methods) == 0
class MultipleTimesGroup(MethodGroup):
"""MultipleTimesGroup holds methods that may be called any number of times.
Note: Each method must be called at least once.
This is helpful, if you don't know or care how many times a method is called.
"""
def __init__(self, group_name):
super(MultipleTimesGroup, self).__init__(group_name)
self._methods = set()
self._methods_called = set()
def AddMethod(self, mock_method):
"""Add a method to this group.
Args:
mock_method: A mock method to be added to this group.
"""
self._methods.add(mock_method)
def MethodCalled(self, mock_method):
"""Remove a method call from the group.
If the method is not in the set, an UnexpectedMethodCallError will be
raised.
Args:
mock_method: a mock method that should be equal to a method in the group.
Returns:
The mock method from the group
Raises:
UnexpectedMethodCallError if the mock_method was not in the group.
"""
# Check to see if this method exists, and if so add it to the set of
# called methods.
for method in self._methods:
if method == mock_method:
self._methods_called.add(mock_method)
# Always put this group back on top of the queue, because we don't know
# when we are done.
mock_method._call_queue.appendleft(self)
return self, method
if self.IsSatisfied():
next_method = mock_method._PopNextMethod();
return next_method, None
else:
raise UnexpectedMethodCallError(mock_method, self)
def IsSatisfied(self):
"""Return True if all methods in this group are called at least once."""
# NOTE(psycho): We can't use the simple set difference here because we want
# to match different parameters which are considered the same e.g. IsA(str)
# and some string. This solution is O(n^2) but n should be small.
tmp = self._methods.copy()
for called in self._methods_called:
for expected in tmp:
if called == expected:
tmp.remove(expected)
if not tmp:
return True
break
return False
class MoxMetaTestBase(type):
"""Metaclass to add mox cleanup and verification to every test.
As the mox unit testing class is being constructed (MoxTestBase or a
subclass), this metaclass will modify all test functions to call the
CleanUpMox method of the test class after they finish. This means that
unstubbing and verifying will happen for every test with no additional code,
and any failures will result in test failures as opposed to errors.
"""
def __init__(cls, name, bases, d):
type.__init__(cls, name, bases, d)
# also get all the attributes from the base classes to account
# for a case when test class is not the immediate child of MoxTestBase
for base in bases:
for attr_name in dir(base):
d[attr_name] = getattr(base, attr_name)
for func_name, func in d.items():
if func_name.startswith('test') and callable(func):
setattr(cls, func_name, MoxMetaTestBase.CleanUpTest(cls, func))
@staticmethod
def CleanUpTest(cls, func):
"""Adds Mox cleanup code to any MoxTestBase method.
Always unsets stubs after a test. Will verify all mocks for tests that
otherwise pass.
Args:
cls: MoxTestBase or subclass; the class whose test method we are altering.
func: method; the method of the MoxTestBase test class we wish to alter.
Returns:
The modified method.
"""
def new_method(self, *args, **kwargs):
mox_obj = getattr(self, 'mox', None)
cleanup_mox = False
if mox_obj and isinstance(mox_obj, Mox):
cleanup_mox = True
try:
func(self, *args, **kwargs)
finally:
if cleanup_mox:
mox_obj.UnsetStubs()
if cleanup_mox:
mox_obj.VerifyAll()
new_method.__name__ = func.__name__
new_method.__doc__ = func.__doc__
new_method.__module__ = func.__module__
return new_method
class MoxTestBase(unittest.TestCase):
"""Convenience test class to make stubbing easier.
Sets up a "mox" attribute which is an instance of Mox - any mox tests will
want this. Also automatically unsets any stubs and verifies that all mock
methods have been called at the end of each test, eliminating boilerplate
code.
"""
__metaclass__ = MoxMetaTestBase
def setUp(self):
self.mox = Mox()
|
mit
|
catiedev/mbed-os
|
tools/export/ds5_5/__init__.py
|
10
|
2065
|
"""
mbed SDK
Copyright (c) 2011-2016 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from os.path import basename
from tools.export.exporters import Exporter
class DS5_5(Exporter):
NAME = 'DS5'
TARGETS = [
'LPC1768',
'LPC11U24',
'LPC812',
'UBLOX_C027',
'ARCH_PRO',
'RZ_A1H',
'VK_RZ_A1H',
]
USING_MICROLIB = [
'LPC812',
]
FILE_TYPES = {
'c_sources':'1',
'cpp_sources':'8',
's_sources':'2'
}
TOOLCHAIN = "ARM"
def get_toolchain(self):
return 'uARM' if (self.target in self.USING_MICROLIB) else 'ARM'
def generate(self):
source_files = []
for r_type, n in DS5_5.FILE_TYPES.iteritems():
for file in getattr(self.resources, r_type):
source_files.append({
'name': basename(file), 'type': n, 'path': file
})
ctx = {
'name': self.project_name,
'include_paths': self.resources.inc_dirs,
'scatter_file': self.resources.linker_script,
'object_files': self.resources.objects + self.resources.libraries,
'source_files': source_files,
'symbols': self.toolchain.get_symbols()
}
target = self.target.lower()
# Project file
self.gen_file('ds5_5/%s.project.tmpl' % target, ctx, '.project')
self.gen_file('ds5_5/%s.cproject.tmpl' % target, ctx, '.cproject')
self.gen_file('ds5_5/%s.launch.tmpl' % target, ctx, 'ds5_%s.launch' % target)
|
apache-2.0
|
OpenSmalltalk/vm
|
processors/ARM/gdb-8.3.1/gdb/syscalls/arm-linux.py
|
7
|
1759
|
# Copyright (C) 2013-2019 Free Software Foundation, Inc.
# Copying and distribution of this file, with or without modification,
# are permitted in any medium without royalty provided the copyright
# notice and this notice are preserved. This file is offered as-is,
# without any warranty.
import sys
import re
import time
infname = sys.argv[1]
inf = file(infname)
print("""\
<?xml version="1.0"?>
<!-- Copyright (C) 2009-%s Free Software Foundation, Inc.
Copying and distribution of this file, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. This file is offered as-is,
without any warranty. -->
<!DOCTYPE feature SYSTEM "gdb-syscalls.dtd">
<!-- This file was generated using the following file:
%s
The file mentioned above belongs to the Linux Kernel.
Some small hand-edits were made. -->
<syscalls_info>""" % (time.strftime("%Y"), infname))
def record(name, number, comment=None):
#nm = 'name="%s"' % name
#s = ' <syscall %-30s number="%d"/>' % (nm, number)
s = ' <syscall name="%s" number="%d"/>' % (name, number)
if comment:
s += ' <!-- %s -->' % comment
print(s)
for line in inf:
m = re.match(r'^#define __NR_(\w+)\s+\(__NR_SYSCALL_BASE\+\s*(\d+)\)',
line)
if m:
record(m.group(1), int(m.group(2)))
continue
m = re.match(r'^\s+/\* (\d+) was sys_(\w+) \*/$', line)
if m:
record(m.group(2), int(m.group(1)), 'removed')
m = re.match(r'^#define __ARM_NR_(\w+)\s+\(__ARM_NR_BASE\+\s*(\d+)\)',
line)
if m:
record('ARM_'+m.group(1), 0x0f0000+int(m.group(2)))
continue
print('</syscalls_info>')
|
mit
|
mozilla/olympia
|
src/olympia/lib/tests/test_unicodehelper.py
|
4
|
1311
|
# -*- coding: utf-8 -*-
import os
from olympia.lib import unicodehelper
COMPARISON = 'täst'
def _do_test(path):
"""Performs a test on a JS file"""
path = os.path.join(os.path.abspath(os.path.dirname(__file__)), path)
with open(path, 'rb') as fobj:
text = fobj.read()
utext = unicodehelper.decode(text)
assert utext == COMPARISON
def test_latin1():
"""Tests utf-8 encoding is properly decoded"""
_do_test('resources/unicodehelper/latin_1.txt')
def test_utf8():
"""Tests utf-8 w/o BOM encoding is properly decoded"""
_do_test('resources/unicodehelper/utf-8.txt')
def test_utf8_bom():
"""Tests utf-8 with BOM encoding is properly decoded"""
_do_test('resources/unicodehelper/utf-8-bom.txt')
def test_utf16le():
"""Tests utf-16 Little Endian encoding is properly decoded"""
_do_test('resources/unicodehelper/utf-16le.txt')
def test_utf16be():
"""Tests utf-16 Big Endian encoding is properly decoded"""
_do_test('resources/unicodehelper/utf-16be.txt')
def test_utf32le():
"""Tests utf-32 Little Endian encoding is properly decoded"""
_do_test('resources/unicodehelper/utf-32le.txt')
def test_utf32be():
"""Tests utf-32 Big Endian encoding is properly decoded"""
_do_test('resources/unicodehelper/utf-32be.txt')
|
bsd-3-clause
|
cliffano/swaggy-jenkins
|
clients/python/generated/swaggyjenkins/models/github_organization.py
|
1
|
5363
|
# coding: utf-8
"""
Swaggy Jenkins
Jenkins API clients generated from Swagger / Open API specification # noqa: E501
OpenAPI spec version: 1.1.1
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class GithubOrganization(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'_class': 'str',
'links': 'GithubOrganizationlinks',
'jenkins_organization_pipeline': 'bool',
'name': 'str'
}
attribute_map = {
'_class': '_class',
'links': '_links',
'jenkins_organization_pipeline': 'jenkinsOrganizationPipeline',
'name': 'name'
}
def __init__(self, _class=None, links=None, jenkins_organization_pipeline=None, name=None): # noqa: E501
"""GithubOrganization - a model defined in OpenAPI""" # noqa: E501
self.__class = None
self._links = None
self._jenkins_organization_pipeline = None
self._name = None
self.discriminator = None
if _class is not None:
self._class = _class
if links is not None:
self.links = links
if jenkins_organization_pipeline is not None:
self.jenkins_organization_pipeline = jenkins_organization_pipeline
if name is not None:
self.name = name
@property
def _class(self):
"""Gets the _class of this GithubOrganization. # noqa: E501
:return: The _class of this GithubOrganization. # noqa: E501
:rtype: str
"""
return self.__class
@_class.setter
def _class(self, _class):
"""Sets the _class of this GithubOrganization.
:param _class: The _class of this GithubOrganization. # noqa: E501
:type: str
"""
self.__class = _class
@property
def links(self):
"""Gets the links of this GithubOrganization. # noqa: E501
:return: The links of this GithubOrganization. # noqa: E501
:rtype: GithubOrganizationlinks
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this GithubOrganization.
:param links: The links of this GithubOrganization. # noqa: E501
:type: GithubOrganizationlinks
"""
self._links = links
@property
def jenkins_organization_pipeline(self):
"""Gets the jenkins_organization_pipeline of this GithubOrganization. # noqa: E501
:return: The jenkins_organization_pipeline of this GithubOrganization. # noqa: E501
:rtype: bool
"""
return self._jenkins_organization_pipeline
@jenkins_organization_pipeline.setter
def jenkins_organization_pipeline(self, jenkins_organization_pipeline):
"""Sets the jenkins_organization_pipeline of this GithubOrganization.
:param jenkins_organization_pipeline: The jenkins_organization_pipeline of this GithubOrganization. # noqa: E501
:type: bool
"""
self._jenkins_organization_pipeline = jenkins_organization_pipeline
@property
def name(self):
"""Gets the name of this GithubOrganization. # noqa: E501
:return: The name of this GithubOrganization. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this GithubOrganization.
:param name: The name of this GithubOrganization. # noqa: E501
:type: str
"""
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GithubOrganization):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
mit
|
alsrgv/tensorflow
|
tensorflow/python/data/experimental/kernel_tests/shuffle_and_repeat_test.py
|
17
|
5259
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.shuffle_and_repeat()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.experimental.ops import shuffle_ops
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class ShuffleAndRepeatTest(test_base.DatasetTestBase):
def _build_ds(self, seed, count=5, num_elements=20):
return dataset_ops.Dataset.range(num_elements).apply(
shuffle_ops.shuffle_and_repeat(buffer_size=5, count=count, seed=seed))
def _gen_outputs(self, ds_fn, num_outputs, verify_exhausted=True):
get_next = self.getNext(ds_fn())
outputs = []
for _ in range(num_outputs):
outputs.append(self.evaluate(get_next()))
if verify_exhausted:
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
return outputs
def testCorrectOutput(self):
output = self._gen_outputs(lambda: self._build_ds(10), 100)
self.assertSequenceEqual(
sorted(output), sorted(
np.array([range(20) for _ in range(5)]).flatten()))
for i in range(5):
self.assertSequenceEqual(sorted(output[i * 20:(i + 1) * 20]), range(20))
def testReshuffling(self):
# Check that the output orders of different epochs are indeed different.
output = self._gen_outputs(lambda: self._build_ds(10), 100)
for i in range(4):
epoch1 = output[i * 20:(i + 1) * 20]
epoch2 = output[(i + 1) * 20:(i + 2) * 20]
self.assertNotEqual(epoch1, epoch2)
def testSameOrderForSameSeeds(self):
output1 = self._gen_outputs(lambda: self._build_ds(10), 100)
output2 = self._gen_outputs(lambda: self._build_ds(10), 100)
self.assertEqual(output1, output2)
def testDifferentOrderForDifferentSeeds(self):
output1 = self._gen_outputs(lambda: self._build_ds(10), 100)
output2 = self._gen_outputs(lambda: self._build_ds(20), 100)
self.assertNotEqual(output1, output2)
self.assertEqual(sorted(output1), sorted(output2))
def testCountNone(self):
output1 = self._gen_outputs(
lambda: self._build_ds(10, count=None), 100, verify_exhausted=False)
output2 = self._gen_outputs(
lambda: self._build_ds(20, count=None), 100, verify_exhausted=False)
self.assertNotEqual(output1, output2)
self.assertEqual(sorted(output1), sorted(output2))
def testCountMinusOne(self):
output1 = self._gen_outputs(
lambda: self._build_ds(10, count=-1), 100, verify_exhausted=False)
output2 = self._gen_outputs(
lambda: self._build_ds(20, count=-1), 100, verify_exhausted=False)
self.assertNotEqual(output1, output2)
self.assertEqual(sorted(output1), sorted(output2))
def testInfiniteOutputs(self):
# Asserting the iterator is exhausted after producing 100 items should fail.
with self.assertRaises(AssertionError):
self._gen_outputs(lambda: self._build_ds(10, count=None), 100)
with self.assertRaises(AssertionError):
self._gen_outputs(lambda: self._build_ds(10, count=-1), 100)
def testInfiniteEmpty(self):
with self.assertRaises(errors.OutOfRangeError):
self._gen_outputs(lambda: self._build_ds(10, count=None, num_elements=0),
100)
with self.assertRaises(errors.OutOfRangeError):
self._gen_outputs(lambda: self._build_ds(10, count=-1, num_elements=0),
100)
def testLargeBufferSize(self):
ds = dataset_ops.Dataset.range(20).apply(
shuffle_ops.shuffle_and_repeat(buffer_size=21))
get_next = self.getNext(ds)
self.evaluate(get_next())
def testVeryLargeBufferSize(self):
num_epochs = 1000 * 1000
# Each element being shuffled and repeated has shape (100,). This will OOM
# or timeout if we actually load everything into the buffer.
ds = dataset_ops.Dataset.range(500).batch(100).apply(
shuffle_ops.shuffle_and_repeat(
buffer_size=5 * num_epochs, count=num_epochs))
# Verify two epochs worth of output.
output = self._gen_outputs(lambda: ds, 2 * 5, verify_exhausted=False)
for i in range(2):
sorted_epoch = sorted(
output[i * 5:(i + 1) * 5], key=lambda batch: batch[0])
self.assertAllEqual(sorted_epoch, np.arange(500).reshape([5, 100]))
if __name__ == "__main__":
test.main()
|
apache-2.0
|
nzavagli/UnrealPy
|
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/django-1.8.2/tests/modeladmin/tests.py
|
12
|
56499
|
from __future__ import unicode_literals
from datetime import date
from django import forms
from django.contrib.admin import BooleanFieldListFilter, SimpleListFilter
from django.contrib.admin.options import (
HORIZONTAL, VERTICAL, ModelAdmin, TabularInline,
)
from django.contrib.admin.sites import AdminSite
from django.contrib.admin.validation import ModelAdminValidator
from django.contrib.admin.widgets import AdminDateWidget, AdminRadioSelect
from django.core.checks import Error
from django.core.exceptions import ImproperlyConfigured
from django.forms.models import BaseModelFormSet
from django.forms.widgets import Select
from django.test import TestCase, ignore_warnings
from django.utils import six
from django.utils.deprecation import RemovedInDjango19Warning
from .models import (
Band, Concert, ValidationTestInlineModel, ValidationTestModel,
)
class MockRequest(object):
pass
class MockSuperUser(object):
def has_perm(self, perm):
return True
request = MockRequest()
request.user = MockSuperUser()
class ModelAdminTests(TestCase):
def setUp(self):
self.band = Band.objects.create(
name='The Doors',
bio='',
sign_date=date(1965, 1, 1),
)
self.site = AdminSite()
# form/fields/fieldsets interaction ##############################
def test_default_fields(self):
ma = ModelAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['name', 'bio', 'sign_date'])
self.assertEqual(list(ma.get_fields(request)),
['name', 'bio', 'sign_date'])
self.assertEqual(list(ma.get_fields(request, self.band)),
['name', 'bio', 'sign_date'])
def test_default_fieldsets(self):
# fieldsets_add and fieldsets_change should return a special data structure that
# is used in the templates. They should generate the "right thing" whether we
# have specified a custom form, the fields argument, or nothing at all.
#
# Here's the default case. There are no custom form_add/form_change methods,
# no fields argument, and no fieldsets argument.
ma = ModelAdmin(Band, self.site)
self.assertEqual(ma.get_fieldsets(request),
[(None, {'fields': ['name', 'bio', 'sign_date']})])
self.assertEqual(ma.get_fieldsets(request, self.band),
[(None, {'fields': ['name', 'bio', 'sign_date']})])
def test_get_fieldsets(self):
# Test that get_fieldsets is called when figuring out form fields.
# Refs #18681.
class BandAdmin(ModelAdmin):
def get_fieldsets(self, request, obj=None):
return [(None, {'fields': ['name', 'bio']})]
ma = BandAdmin(Band, self.site)
form = ma.get_form(None)
self.assertEqual(form._meta.fields, ['name', 'bio'])
class InlineBandAdmin(TabularInline):
model = Concert
fk_name = 'main_band'
can_delete = False
def get_fieldsets(self, request, obj=None):
return [(None, {'fields': ['day', 'transport']})]
ma = InlineBandAdmin(Band, self.site)
form = ma.get_formset(None).form
self.assertEqual(form._meta.fields, ['day', 'transport'])
def test_lookup_allowed_allows_nonexistent_lookup(self):
"""
Ensure that a lookup_allowed allows a parameter
whose field lookup doesn't exist.
Refs #21129.
"""
class BandAdmin(ModelAdmin):
fields = ['name']
ma = BandAdmin(Band, self.site)
self.assertTrue(ma.lookup_allowed('name__nonexistent', 'test_value'))
def test_field_arguments(self):
# If we specify the fields argument, fieldsets_add and fielsets_change should
# just stick the fields into a formsets structure and return it.
class BandAdmin(ModelAdmin):
fields = ['name']
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_fields(request)), ['name'])
self.assertEqual(list(ma.get_fields(request, self.band)), ['name'])
self.assertEqual(ma.get_fieldsets(request),
[(None, {'fields': ['name']})])
self.assertEqual(ma.get_fieldsets(request, self.band),
[(None, {'fields': ['name']})])
def test_field_arguments_restricted_on_form(self):
# If we specify fields or fieldsets, it should exclude fields on the Form class
# to the fields specified. This may cause errors to be raised in the db layer if
# required model fields aren't in fields/fieldsets, but that's preferable to
# ghost errors where you have a field in your Form class that isn't being
# displayed because you forgot to add it to fields/fieldsets
# Using `fields`.
class BandAdmin(ModelAdmin):
fields = ['name']
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name'])
self.assertEqual(list(ma.get_form(request, self.band).base_fields),
['name'])
# Using `fieldsets`.
class BandAdmin(ModelAdmin):
fieldsets = [(None, {'fields': ['name']})]
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name'])
self.assertEqual(list(ma.get_form(request, self.band).base_fields),
['name'])
# Using `exclude`.
class BandAdmin(ModelAdmin):
exclude = ['bio']
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['name', 'sign_date'])
# You can also pass a tuple to `exclude`.
class BandAdmin(ModelAdmin):
exclude = ('bio',)
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['name', 'sign_date'])
# Using `fields` and `exclude`.
class BandAdmin(ModelAdmin):
fields = ['name', 'bio']
exclude = ['bio']
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['name'])
def test_custom_form_meta_exclude_with_readonly(self):
"""
Ensure that the custom ModelForm's `Meta.exclude` is respected when
used in conjunction with `ModelAdmin.readonly_fields` and when no
`ModelAdmin.exclude` is defined.
Refs #14496.
"""
# First, with `ModelAdmin` -----------------------
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
exclude = ['bio']
class BandAdmin(ModelAdmin):
readonly_fields = ['name']
form = AdminBandForm
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['sign_date'])
# Then, with `InlineModelAdmin` -----------------
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ['day']
class ConcertInline(TabularInline):
readonly_fields = ['transport']
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['main_band', 'opening_band', 'id', 'DELETE'])
def test_custom_form_meta_exclude(self):
"""
Ensure that the custom ModelForm's `Meta.exclude` is overridden if
`ModelAdmin.exclude` or `InlineModelAdmin.exclude` are defined.
Refs #14496.
"""
# First, with `ModelAdmin` -----------------------
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
exclude = ['bio']
class BandAdmin(ModelAdmin):
exclude = ['name']
form = AdminBandForm
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['bio', 'sign_date'])
# Then, with `InlineModelAdmin` -----------------
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ['day']
class ConcertInline(TabularInline):
exclude = ['transport']
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['main_band', 'opening_band', 'day', 'id', 'DELETE'])
def test_custom_form_validation(self):
# If we specify a form, it should use it allowing custom validation to work
# properly. This won't, however, break any of the admin widgets or media.
class AdminBandForm(forms.ModelForm):
delete = forms.BooleanField()
class BandAdmin(ModelAdmin):
form = AdminBandForm
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['name', 'bio', 'sign_date', 'delete'])
self.assertEqual(
type(ma.get_form(request).base_fields['sign_date'].widget),
AdminDateWidget)
def test_form_exclude_kwarg_override(self):
"""
Ensure that the `exclude` kwarg passed to `ModelAdmin.get_form()`
overrides all other declarations. Refs #8999.
"""
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
exclude = ['name']
class BandAdmin(ModelAdmin):
exclude = ['sign_date']
form = AdminBandForm
def get_form(self, request, obj=None, **kwargs):
kwargs['exclude'] = ['bio']
return super(BandAdmin, self).get_form(request, obj, **kwargs)
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['name', 'sign_date'])
def test_formset_exclude_kwarg_override(self):
"""
Ensure that the `exclude` kwarg passed to `InlineModelAdmin.get_formset()`
overrides all other declarations. Refs #8999.
"""
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ['day']
class ConcertInline(TabularInline):
exclude = ['transport']
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
def get_formset(self, request, obj=None, **kwargs):
kwargs['exclude'] = ['opening_band']
return super(ConcertInline, self).get_formset(request, obj, **kwargs)
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['main_band', 'day', 'transport', 'id', 'DELETE'])
def test_queryset_override(self):
# If we need to override the queryset of a ModelChoiceField in our custom form
# make sure that RelatedFieldWidgetWrapper doesn't mess that up.
band2 = Band(name='The Beatles', bio='', sign_date=date(1962, 1, 1))
band2.save()
class ConcertAdmin(ModelAdmin):
pass
ma = ConcertAdmin(Concert, self.site)
form = ma.get_form(request)()
self.assertHTMLEqual(str(form["main_band"]),
'<div class="related-widget-wrapper">'
'<select name="main_band" id="id_main_band">'
'<option value="" selected="selected">---------</option>'
'<option value="%d">The Beatles</option>'
'<option value="%d">The Doors</option>'
'</select></div>' % (band2.id, self.band.id))
class AdminConcertForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(AdminConcertForm, self).__init__(*args, **kwargs)
self.fields["main_band"].queryset = Band.objects.filter(name='The Doors')
class ConcertAdminWithForm(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdminWithForm(Concert, self.site)
form = ma.get_form(request)()
self.assertHTMLEqual(str(form["main_band"]),
'<div class="related-widget-wrapper">'
'<select name="main_band" id="id_main_band">'
'<option value="" selected="selected">---------</option>'
'<option value="%d">The Doors</option>'
'</select></div>' % self.band.id)
def test_regression_for_ticket_15820(self):
"""
Ensure that `obj` is passed from `InlineModelAdmin.get_fieldsets()` to
`InlineModelAdmin.get_formset()`.
"""
class CustomConcertForm(forms.ModelForm):
class Meta:
model = Concert
fields = ['day']
class ConcertInline(TabularInline):
model = Concert
fk_name = 'main_band'
def get_formset(self, request, obj=None, **kwargs):
if obj:
kwargs['form'] = CustomConcertForm
return super(ConcertInline, self).get_formset(request, obj, **kwargs)
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
Concert.objects.create(main_band=self.band, opening_band=self.band, day=1)
ma = BandAdmin(Band, self.site)
inline_instances = ma.get_inline_instances(request)
fieldsets = list(inline_instances[0].get_fieldsets(request))
self.assertEqual(fieldsets[0][1]['fields'], ['main_band', 'opening_band', 'day', 'transport'])
fieldsets = list(inline_instances[0].get_fieldsets(request, inline_instances[0].model))
self.assertEqual(fieldsets[0][1]['fields'], ['day'])
# radio_fields behavior ###########################################
def test_default_foreign_key_widget(self):
# First, without any radio_fields specified, the widgets for ForeignKey
# and fields with choices specified ought to be a basic Select widget.
# ForeignKey widgets in the admin are wrapped with RelatedFieldWidgetWrapper so
# they need to be handled properly when type checking. For Select fields, all of
# the choices lists have a first entry of dashes.
cma = ModelAdmin(Concert, self.site)
cmafa = cma.get_form(request)
self.assertEqual(type(cmafa.base_fields['main_band'].widget.widget),
Select)
self.assertEqual(
list(cmafa.base_fields['main_band'].widget.choices),
[('', '---------'), (self.band.id, 'The Doors')])
self.assertEqual(
type(cmafa.base_fields['opening_band'].widget.widget), Select)
self.assertEqual(
list(cmafa.base_fields['opening_band'].widget.choices),
[('', '---------'), (self.band.id, 'The Doors')])
self.assertEqual(type(cmafa.base_fields['day'].widget), Select)
self.assertEqual(list(cmafa.base_fields['day'].widget.choices),
[('', '---------'), (1, 'Fri'), (2, 'Sat')])
self.assertEqual(type(cmafa.base_fields['transport'].widget),
Select)
self.assertEqual(
list(cmafa.base_fields['transport'].widget.choices),
[('', '---------'), (1, 'Plane'), (2, 'Train'), (3, 'Bus')])
def test_foreign_key_as_radio_field(self):
# Now specify all the fields as radio_fields. Widgets should now be
# RadioSelect, and the choices list should have a first entry of 'None' if
# blank=True for the model field. Finally, the widget should have the
# 'radiolist' attr, and 'inline' as well if the field is specified HORIZONTAL.
class ConcertAdmin(ModelAdmin):
radio_fields = {
'main_band': HORIZONTAL,
'opening_band': VERTICAL,
'day': VERTICAL,
'transport': HORIZONTAL,
}
cma = ConcertAdmin(Concert, self.site)
cmafa = cma.get_form(request)
self.assertEqual(type(cmafa.base_fields['main_band'].widget.widget),
AdminRadioSelect)
self.assertEqual(cmafa.base_fields['main_band'].widget.attrs,
{'class': 'radiolist inline'})
self.assertEqual(list(cmafa.base_fields['main_band'].widget.choices),
[(self.band.id, 'The Doors')])
self.assertEqual(
type(cmafa.base_fields['opening_band'].widget.widget),
AdminRadioSelect)
self.assertEqual(cmafa.base_fields['opening_band'].widget.attrs,
{'class': 'radiolist'})
self.assertEqual(
list(cmafa.base_fields['opening_band'].widget.choices),
[('', 'None'), (self.band.id, 'The Doors')])
self.assertEqual(type(cmafa.base_fields['day'].widget),
AdminRadioSelect)
self.assertEqual(cmafa.base_fields['day'].widget.attrs,
{'class': 'radiolist'})
self.assertEqual(list(cmafa.base_fields['day'].widget.choices),
[(1, 'Fri'), (2, 'Sat')])
self.assertEqual(type(cmafa.base_fields['transport'].widget),
AdminRadioSelect)
self.assertEqual(cmafa.base_fields['transport'].widget.attrs,
{'class': 'radiolist inline'})
self.assertEqual(list(cmafa.base_fields['transport'].widget.choices),
[('', 'None'), (1, 'Plane'), (2, 'Train'), (3, 'Bus')])
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ('transport',)
class ConcertAdmin(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdmin(Concert, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['main_band', 'opening_band', 'day'])
class AdminConcertForm(forms.ModelForm):
extra = forms.CharField()
class Meta:
model = Concert
fields = ['extra', 'transport']
class ConcertAdmin(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdmin(Concert, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['extra', 'transport'])
class ConcertInline(TabularInline):
form = AdminConcertForm
model = Concert
fk_name = 'main_band'
can_delete = True
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['extra', 'transport', 'id', 'DELETE', 'main_band'])
class CheckTestCase(TestCase):
def assertIsInvalid(self, model_admin, model, msg,
id=None, hint=None, invalid_obj=None):
invalid_obj = invalid_obj or model_admin
errors = model_admin.check(model=model)
expected = [
Error(
msg,
hint=hint,
obj=invalid_obj,
id=id,
)
]
self.assertEqual(errors, expected)
def assertIsInvalidRegexp(self, model_admin, model, msg,
id=None, hint=None, invalid_obj=None):
"""
Same as assertIsInvalid but treats the given msg as a regexp.
"""
invalid_obj = invalid_obj or model_admin
errors = model_admin.check(model=model)
self.assertEqual(len(errors), 1)
error = errors[0]
self.assertEqual(error.hint, hint)
self.assertEqual(error.obj, invalid_obj)
self.assertEqual(error.id, id)
six.assertRegex(self, error.msg, msg)
def assertIsValid(self, model_admin, model):
errors = model_admin.check(model=model)
expected = []
self.assertEqual(errors, expected)
class RawIdCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'raw_id_fields' must be a list or tuple.",
'admin.E001')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'raw_id_fields[0]' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E002')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = ('name',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'raw_id_fields[0]' must be a ForeignKey or ManyToManyField.",
'admin.E003')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = ('users',)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class FieldsetsCheckTests(CheckTestCase):
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {'fields': ('name',)}),)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fieldsets' must be a list or tuple.",
'admin.E007')
def test_non_iterable_item(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = ({},)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fieldsets[0]' must be a list or tuple.",
'admin.E008')
def test_item_not_a_pair(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = ((),)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fieldsets[0]' must be of length 2.",
'admin.E009')
def test_second_element_of_item_not_a_dict(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", ()),)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fieldsets[0][1]' must be a dictionary.",
'admin.E010')
def test_missing_fields_key(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {}),)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fieldsets[0][1]' must contain the key 'fields'.",
'admin.E011')
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {'fields': ('name',)}),)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
def test_specified_both_fields_and_fieldsets(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {'fields': ('name',)}),)
fields = ['name']
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"Both 'fieldsets' and 'fields' are specified.",
'admin.E005')
def test_duplicate_fields(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = [(None, {'fields': ['name', 'name']})]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"There are duplicate field(s) in 'fieldsets[0][1]'.",
'admin.E012')
def test_fieldsets_with_custom_form_validation(self):
class BandAdmin(ModelAdmin):
fieldsets = (
('Band', {
'fields': ('name',)
}),
)
self.assertIsValid(BandAdmin, Band)
class FieldsCheckTests(CheckTestCase):
def test_duplicate_fields_in_fields(self):
class ValidationTestModelAdmin(ModelAdmin):
fields = ['name', 'name']
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fields' contains duplicate field(s).",
'admin.E006')
def test_inline(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fields = 10
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fields' must be a list or tuple.",
'admin.E004',
invalid_obj=ValidationTestInline)
class FormCheckTests(CheckTestCase):
def test_invalid_type(self):
class FakeForm(object):
pass
class ValidationTestModelAdmin(ModelAdmin):
form = FakeForm
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'form' must inherit from 'BaseModelForm'.",
'admin.E016')
def test_fieldsets_with_custom_form_validation(self):
class BandAdmin(ModelAdmin):
fieldsets = (
('Band', {
'fields': ('name',)
}),
)
self.assertIsValid(BandAdmin, Band)
def test_valid_case(self):
class AdminBandForm(forms.ModelForm):
delete = forms.BooleanField()
class BandAdmin(ModelAdmin):
form = AdminBandForm
fieldsets = (
('Band', {
'fields': ('name', 'bio', 'sign_date', 'delete')
}),
)
self.assertIsValid(BandAdmin, Band)
class FilterVerticalCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'filter_vertical' must be a list or tuple.",
'admin.E017')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'filter_vertical[0]' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E019')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = ('name',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'filter_vertical[0]' must be a ManyToManyField.",
'admin.E020')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = ("users",)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class FilterHorizontalCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'filter_horizontal' must be a list or tuple.",
'admin.E018')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'filter_horizontal[0]' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E019')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = ('name',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'filter_horizontal[0]' must be a ManyToManyField.",
'admin.E020')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = ("users",)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class RadioFieldsCheckTests(CheckTestCase):
def test_not_dictionary(self):
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = ()
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'radio_fields' must be a dictionary.",
'admin.E021')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {'non_existent_field': VERTICAL}
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'radio_fields' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E022')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {'name': VERTICAL}
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'radio_fields' refers to 'name', which is not an instance "
"of ForeignKey, and does not have a 'choices' definition."),
'admin.E023')
def test_invalid_value(self):
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {"state": None}
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'radio_fields[\"state\"]' must be either admin.HORIZONTAL or admin.VERTICAL.",
'admin.E024')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {"state": VERTICAL}
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class PrepopulatedFieldsCheckTests(CheckTestCase):
def test_not_dictionary(self):
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = ()
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'prepopulated_fields' must be a dictionary.",
'admin.E026')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {'non_existent_field': ("slug",)}
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'prepopulated_fields' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E027')
def test_missing_field_again(self):
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {"slug": ('non_existent_field',)}
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'prepopulated_fields[\"slug\"][0]' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E030')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {"users": ('name',)}
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'prepopulated_fields' refers to 'users', which must not be "
"a DateTimeField, ForeignKey or ManyToManyField."),
'admin.E028')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {"slug": ('name',)}
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListDisplayTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_display' must be a list or tuple.",
'admin.E107')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'list_display[0]' refers to 'non_existent_field', which is not a callable, an attribute "
"of 'ValidationTestModelAdmin', or an attribute or method on 'modeladmin.ValidationTestModel'."),
'admin.E108')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display = ('users',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_display[0]' must not be a ManyToManyField.",
'admin.E109')
def test_valid_case(self):
def a_callable(obj):
pass
class ValidationTestModelAdmin(ModelAdmin):
def a_method(self, obj):
pass
list_display = ('name', 'decade_published_in', 'a_method', a_callable)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListDisplayLinksCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display_links = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_display_links' must be a list, a tuple, or None.",
'admin.E110')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display_links = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_display_links[0]' refers to 'non_existent_field', which is not defined in 'list_display'.",
'admin.E111')
def test_missing_in_list_display(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display_links = ('name',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_display_links[0]' refers to 'name', which is not defined in 'list_display'.",
'admin.E111')
def test_valid_case(self):
def a_callable(obj):
pass
class ValidationTestModelAdmin(ModelAdmin):
def a_method(self, obj):
pass
list_display = ('name', 'decade_published_in', 'a_method', a_callable)
list_display_links = ('name', 'decade_published_in', 'a_method', a_callable)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
def test_None_is_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display_links = None
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListFilterTests(CheckTestCase):
def test_list_filter_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
list_filter = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_filter' must be a list or tuple.",
'admin.E112')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
list_filter = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0]' refers to 'non_existent_field', which does not refer to a Field.",
'admin.E116')
def test_not_filter(self):
class RandomClass(object):
pass
class ValidationTestModelAdmin(ModelAdmin):
list_filter = (RandomClass,)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0]' must inherit from 'ListFilter'.",
'admin.E113')
def test_not_filter_again(self):
class RandomClass(object):
pass
class ValidationTestModelAdmin(ModelAdmin):
list_filter = (('is_active', RandomClass),)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0][1]' must inherit from 'FieldListFilter'.",
'admin.E115')
def test_not_filter_again_again(self):
class AwesomeFilter(SimpleListFilter):
def get_title(self):
return 'awesomeness'
def get_choices(self, request):
return (('bit', 'A bit awesome'), ('very', 'Very awesome'), )
def get_queryset(self, cl, qs):
return qs
class ValidationTestModelAdmin(ModelAdmin):
list_filter = (('is_active', AwesomeFilter),)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0][1]' must inherit from 'FieldListFilter'.",
'admin.E115')
def test_not_associated_with_field_name(self):
class ValidationTestModelAdmin(ModelAdmin):
list_filter = (BooleanFieldListFilter,)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0]' must not inherit from 'FieldListFilter'.",
'admin.E114')
def test_valid_case(self):
class AwesomeFilter(SimpleListFilter):
def get_title(self):
return 'awesomeness'
def get_choices(self, request):
return (('bit', 'A bit awesome'), ('very', 'Very awesome'), )
def get_queryset(self, cl, qs):
return qs
class ValidationTestModelAdmin(ModelAdmin):
list_filter = ('is_active', AwesomeFilter, ('is_active', BooleanFieldListFilter), 'no')
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListPerPageCheckTests(CheckTestCase):
def test_not_integer(self):
class ValidationTestModelAdmin(ModelAdmin):
list_per_page = 'hello'
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_per_page' must be an integer.",
'admin.E118')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
list_per_page = 100
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListMaxShowAllCheckTests(CheckTestCase):
def test_not_integer(self):
class ValidationTestModelAdmin(ModelAdmin):
list_max_show_all = 'hello'
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_max_show_all' must be an integer.",
'admin.E119')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
list_max_show_all = 200
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class SearchFieldsCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
search_fields = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'search_fields' must be a list or tuple.",
'admin.E126')
class DateHierarchyCheckTests(CheckTestCase):
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
date_hierarchy = 'non_existent_field'
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'date_hierarchy' refers to 'non_existent_field', which "
"is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E127')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
date_hierarchy = 'name'
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'date_hierarchy' must be a DateField or DateTimeField.",
'admin.E128')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
date_hierarchy = 'pub_date'
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class OrderingCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
ordering = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'ordering' must be a list or tuple.",
'admin.E031')
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin,
ValidationTestModel,
"The value of 'ordering[0]' refers to 'non_existent_field', which is not an attribute of 'modeladmin.ValidationTestModel'.",
'admin.E033',
)
def test_random_marker_not_alone(self):
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('?', 'name')
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'ordering' has the random ordering marker '?', but contains "
"other fields as well."),
'admin.E032',
hint='Either remove the "?", or remove the other fields.')
def test_valid_random_marker_case(self):
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('?',)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
def test_valid_complex_case(self):
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('band__name',)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('name',)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListSelectRelatedCheckTests(CheckTestCase):
def test_invalid_type(self):
class ValidationTestModelAdmin(ModelAdmin):
list_select_related = 1
self.assertIsInvalid(ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_select_related' must be a boolean, tuple or list.",
'admin.E117')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
list_select_related = False
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class SaveAsCheckTests(CheckTestCase):
def test_not_boolean(self):
class ValidationTestModelAdmin(ModelAdmin):
save_as = 1
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'save_as' must be a boolean.",
'admin.E101')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
save_as = True
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class SaveOnTopCheckTests(CheckTestCase):
def test_not_boolean(self):
class ValidationTestModelAdmin(ModelAdmin):
save_on_top = 1
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'save_on_top' must be a boolean.",
'admin.E102')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
save_on_top = True
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class InlinesCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
inlines = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'inlines' must be a list or tuple.",
'admin.E103')
def test_not_model_admin(self):
class ValidationTestInline(object):
pass
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalidRegexp(
ValidationTestModelAdmin, ValidationTestModel,
r"'.*\.ValidationTestInline' must inherit from 'BaseModelAdmin'\.",
'admin.E104')
def test_missing_model_field(self):
class ValidationTestInline(TabularInline):
pass
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalidRegexp(
ValidationTestModelAdmin, ValidationTestModel,
r"'.*\.ValidationTestInline' must have a 'model' attribute\.",
'admin.E105')
def test_invalid_model_type(self):
""" Test if `model` attribute on inline model admin is a models.Model.
"""
class SomethingBad(object):
pass
class ValidationTestInline(TabularInline):
model = SomethingBad
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalidRegexp(
ValidationTestModelAdmin, ValidationTestModel,
r"The value of '.*\.ValidationTestInline.model' must be a Model\.",
'admin.E106')
def test_valid_case(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class FkNameCheckTests(CheckTestCase):
def test_missing_field(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fk_name = 'non_existent_field'
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"'modeladmin.ValidationTestInlineModel' has no field named 'non_existent_field'.",
'admin.E202',
invalid_obj=ValidationTestInline)
def test_valid_case(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fk_name = "parent"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ExtraCheckTests(CheckTestCase):
def test_not_integer(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
extra = "hello"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'extra' must be an integer.",
'admin.E203',
invalid_obj=ValidationTestInline)
def test_valid_case(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
extra = 2
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class MaxNumCheckTests(CheckTestCase):
def test_not_integer(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
max_num = "hello"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'max_num' must be an integer.",
'admin.E204',
invalid_obj=ValidationTestInline)
def test_valid_case(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
max_num = 2
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class MinNumCheckTests(CheckTestCase):
def test_not_integer(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
min_num = "hello"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'min_num' must be an integer.",
'admin.E205',
invalid_obj=ValidationTestInline)
def test_valid_case(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
min_num = 2
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class FormsetCheckTests(CheckTestCase):
def test_invalid_type(self):
class FakeFormSet(object):
pass
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
formset = FakeFormSet
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'formset' must inherit from 'BaseModelFormSet'.",
'admin.E206',
invalid_obj=ValidationTestInline)
def test_valid_case(self):
class RealModelFormSet(BaseModelFormSet):
pass
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
formset = RealModelFormSet
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class CustomModelAdminTests(CheckTestCase):
@ignore_warnings(category=RemovedInDjango19Warning)
def test_deprecation(self):
"Deprecated Custom Validator definitions still work with the check framework."
class CustomValidator(ModelAdminValidator):
def validate_me(self, model_admin, model):
raise ImproperlyConfigured('error!')
class CustomModelAdmin(ModelAdmin):
validator_class = CustomValidator
self.assertIsInvalid(CustomModelAdmin, ValidationTestModel, 'error!')
class ListDisplayEditableTests(CheckTestCase):
def test_list_display_links_is_none(self):
"""
list_display and list_editable can contain the same values
when list_display_links is None
"""
class ProductAdmin(ModelAdmin):
list_display = ['name', 'slug', 'pub_date']
list_editable = list_display
list_display_links = None
self.assertIsValid(ProductAdmin, ValidationTestModel)
def test_list_display_same_as_list_editable(self):
"""
The first item in list_display can be the same as the first
in list_editable
"""
class ProductAdmin(ModelAdmin):
list_display = ['name', 'slug', 'pub_date']
list_editable = ['name', 'slug']
list_display_links = ['pub_date']
self.assertIsValid(ProductAdmin, ValidationTestModel)
class ModelAdminPermissionTests(TestCase):
class MockUser(object):
def has_module_perms(self, app_label):
if app_label == "modeladmin":
return True
return False
class MockAddUser(MockUser):
def has_perm(self, perm):
if perm == "modeladmin.add_band":
return True
return False
class MockChangeUser(MockUser):
def has_perm(self, perm):
if perm == "modeladmin.change_band":
return True
return False
class MockDeleteUser(MockUser):
def has_perm(self, perm):
if perm == "modeladmin.delete_band":
return True
return False
def test_has_add_permission(self):
"""
Ensure that has_add_permission returns True for users who can add
objects and False for users who can't.
"""
ma = ModelAdmin(Band, AdminSite())
request = MockRequest()
request.user = self.MockAddUser()
self.assertTrue(ma.has_add_permission(request))
request.user = self.MockChangeUser()
self.assertFalse(ma.has_add_permission(request))
request.user = self.MockDeleteUser()
self.assertFalse(ma.has_add_permission(request))
def test_has_change_permission(self):
"""
Ensure that has_change_permission returns True for users who can edit
objects and False for users who can't.
"""
ma = ModelAdmin(Band, AdminSite())
request = MockRequest()
request.user = self.MockAddUser()
self.assertFalse(ma.has_change_permission(request))
request.user = self.MockChangeUser()
self.assertTrue(ma.has_change_permission(request))
request.user = self.MockDeleteUser()
self.assertFalse(ma.has_change_permission(request))
def test_has_delete_permission(self):
"""
Ensure that has_delete_permission returns True for users who can delete
objects and False for users who can't.
"""
ma = ModelAdmin(Band, AdminSite())
request = MockRequest()
request.user = self.MockAddUser()
self.assertFalse(ma.has_delete_permission(request))
request.user = self.MockChangeUser()
self.assertFalse(ma.has_delete_permission(request))
request.user = self.MockDeleteUser()
self.assertTrue(ma.has_delete_permission(request))
def test_has_module_permission(self):
"""
Ensure that has_module_permission returns True for users who have any
permission for the module and False for users who don't.
"""
ma = ModelAdmin(Band, AdminSite())
request = MockRequest()
request.user = self.MockAddUser()
self.assertTrue(ma.has_module_permission(request))
request.user = self.MockChangeUser()
self.assertTrue(ma.has_module_permission(request))
request.user = self.MockDeleteUser()
self.assertTrue(ma.has_module_permission(request))
original_app_label = ma.opts.app_label
ma.opts.app_label = 'anotherapp'
try:
request.user = self.MockAddUser()
self.assertFalse(ma.has_module_permission(request))
request.user = self.MockChangeUser()
self.assertFalse(ma.has_module_permission(request))
request.user = self.MockDeleteUser()
self.assertFalse(ma.has_module_permission(request))
finally:
ma.opts.app_label = original_app_label
|
mit
|
luiseduardohdbackup/odoo
|
addons/document/document.py
|
152
|
83929
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
import errno
import logging
import os
import random
import shutil
import string
import time
from StringIO import StringIO
import psycopg2
import openerp
from openerp import tools
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.osv.orm import except_orm
import openerp.report.interface
from openerp.tools.misc import ustr
from openerp.tools.translate import _
from openerp.tools.safe_eval import safe_eval
from content_index import cntIndex
_logger = logging.getLogger(__name__)
class document_file(osv.osv):
_inherit = 'ir.attachment'
_columns = {
# Columns from ir.attachment:
'write_date': fields.datetime('Date Modified', readonly=True),
'write_uid': fields.many2one('res.users', 'Last Modification User', readonly=True),
# Fields of document:
'user_id': fields.many2one('res.users', 'Owner', select=1),
'parent_id': fields.many2one('document.directory', 'Directory', select=1, change_default=True),
'index_content': fields.text('Indexed Content'),
'partner_id':fields.many2one('res.partner', 'Partner', select=1),
'file_type': fields.char('Content Type'),
}
_order = "id desc"
_defaults = {
'user_id': lambda self, cr, uid, ctx:uid,
}
_sql_constraints = [
('filename_unique', 'unique (name,parent_id)', 'The filename must be unique in a directory !'),
]
def check(self, cr, uid, ids, mode, context=None, values=None):
"""Overwrite check to verify access on directory to validate specifications of doc/access_permissions.rst"""
if not isinstance(ids, list):
ids = [ids]
super(document_file, self).check(cr, uid, ids, mode, context=context, values=values)
if ids:
# use SQL to avoid recursive loop on read
cr.execute('SELECT DISTINCT parent_id from ir_attachment WHERE id in %s AND parent_id is not NULL', (tuple(ids),))
parent_ids = [parent_id for (parent_id,) in cr.fetchall()]
if parent_ids:
self.pool.get('ir.model.access').check(cr, uid, 'document.directory', mode)
self.pool.get('document.directory').check_access_rule(cr, uid, parent_ids, mode, context=context)
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
# Grab ids, bypassing 'count'
ids = super(document_file, self).search(cr, uid, args, offset=offset, limit=limit, order=order, context=context, count=False)
if not ids:
return 0 if count else []
# Filter out documents that are in directories that the user is not allowed to read.
# Must use pure SQL to avoid access rules exceptions (we want to remove the records,
# not fail), and the records have been filtered in parent's search() anyway.
cr.execute('SELECT id, parent_id from ir_attachment WHERE id in %s', (tuple(ids),))
# cont a dict of parent -> attach
parents = {}
for attach_id, attach_parent in cr.fetchall():
parents.setdefault(attach_parent, []).append(attach_id)
parent_ids = parents.keys()
# filter parents
visible_parent_ids = self.pool.get('document.directory').search(cr, uid, [('id', 'in', list(parent_ids))])
# null parents means allowed
orig_ids = ids # save the ids, to keep order
ids = parents.get(None,[])
for parent_id in visible_parent_ids:
ids.extend(parents[parent_id])
# sort result according to the original sort ordering
if count:
return len(ids)
else:
set_ids = set(ids)
return [id for id in orig_ids if id in set_ids]
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
if 'name' not in default:
name = self.read(cr, uid, [id], ['name'])[0]['name']
default.update(name=_("%s (copy)") % (name))
return super(document_file, self).copy(cr, uid, id, default, context=context)
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
vals['parent_id'] = context.get('parent_id', False) or vals.get('parent_id', False)
# take partner from uid
if vals.get('res_id', False) and vals.get('res_model', False) and not vals.get('partner_id', False):
vals['partner_id'] = self.__get_partner_id(cr, uid, vals['res_model'], vals['res_id'], context)
if vals.get('datas', False):
vals['file_type'], vals['index_content'] = self._index(cr, uid, vals['datas'].decode('base64'), vals.get('datas_fname', False), vals.get('file_type', None))
return super(document_file, self).create(cr, uid, vals, context)
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
if vals.get('datas', False):
vals['file_type'], vals['index_content'] = self._index(cr, uid, vals['datas'].decode('base64'), vals.get('datas_fname', False), vals.get('file_type', None))
return super(document_file, self).write(cr, uid, ids, vals, context)
def _index(self, cr, uid, data, datas_fname, file_type):
mime, icont = cntIndex.doIndex(data, datas_fname, file_type or None, None)
icont_u = ustr(icont)
return mime, icont_u
def __get_partner_id(self, cr, uid, res_model, res_id, context=None):
""" A helper to retrieve the associated partner from any res_model+id
It is a hack that will try to discover if the mentioned record is
clearly associated with a partner record.
"""
obj_model = self.pool[res_model]
if obj_model._name == 'res.partner':
return res_id
elif 'partner_id' in obj_model._columns and obj_model._columns['partner_id']._obj == 'res.partner':
bro = obj_model.browse(cr, uid, res_id, context=context)
return bro.partner_id.id
return False
class document_directory(osv.osv):
_name = 'document.directory'
_description = 'Directory'
_order = 'name'
_columns = {
'name': fields.char('Name', required=True, select=1),
'write_date': fields.datetime('Date Modified', readonly=True),
'write_uid': fields.many2one('res.users', 'Last Modification User', readonly=True),
'create_date': fields.datetime('Date Created', readonly=True),
'create_uid': fields.many2one('res.users', 'Creator', readonly=True),
'user_id': fields.many2one('res.users', 'Owner'),
'group_ids': fields.many2many('res.groups', 'document_directory_group_rel', 'item_id', 'group_id', 'Groups'),
'parent_id': fields.many2one('document.directory', 'Parent Directory', select=1, change_default=True),
'child_ids': fields.one2many('document.directory', 'parent_id', 'Children'),
'file_ids': fields.one2many('ir.attachment', 'parent_id', 'Files'),
'content_ids': fields.one2many('document.directory.content', 'directory_id', 'Virtual Files'),
'type': fields.selection([ ('directory','Static Directory'), ('ressource','Folders per resource'), ],
'Type', required=True, select=1, change_default=True,
help="Each directory can either have the type Static or be linked to another resource. A static directory, as with Operating Systems, is the classic directory that can contain a set of files. The directories linked to systems resources automatically possess sub-directories for each of resource types defined in the parent directory."),
'domain': fields.char('Domain', help="Use a domain if you want to apply an automatic filter on visible resources."),
'ressource_type_id': fields.many2one('ir.model', 'Resource model', change_default=True,
help="Select an object here and there will be one folder per record of that resource."),
'resource_field': fields.many2one('ir.model.fields', 'Name field', help='Field to be used as name on resource directories. If empty, the "name" will be used.'),
'resource_find_all': fields.boolean('Find all resources',
help="If true, all attachments that match this resource will " \
" be located. If false, only ones that have this as parent." ),
'ressource_parent_type_id': fields.many2one('ir.model', 'Parent Model', change_default=True,
help="If you put an object here, this directory template will appear bellow all of these objects. " \
"Such directories are \"attached\" to the specific model or record, just like attachments. " \
"Don't put a parent directory if you select a parent model."),
'ressource_id': fields.integer('Resource ID',
help="Along with Parent Model, this ID attaches this folder to a specific record of Parent Model."),
'ressource_tree': fields.boolean('Tree Structure',
help="Check this if you want to use the same tree structure as the object selected in the system."),
'dctx_ids': fields.one2many('document.directory.dctx', 'dir_id', 'Context fields'),
'company_id': fields.many2one('res.company', 'Company', change_default=True),
}
_defaults = {
'company_id': lambda s,cr,uid,c: s.pool.get('res.company')._company_default_get(cr, uid, 'document.directory', context=c),
'user_id': lambda self,cr,uid,ctx: uid,
'domain': '[]',
'type': 'directory',
'ressource_id': 0,
'resource_find_all': True,
}
_sql_constraints = [
('dirname_uniq', 'unique (name,parent_id,ressource_id,ressource_parent_type_id)', 'The directory name must be unique !'),
('no_selfparent', 'check(parent_id <> id)', 'Directory cannot be parent of itself!'),
]
def name_get(self, cr, uid, ids, context=None):
res = []
if not self.search(cr,uid,[('id','in',ids)]):
ids = []
for d in self.browse(cr, uid, ids, context=context):
s = ''
d2 = d
while d2 and d2.parent_id:
s = d2.name + (s and ('/' + s) or '')
d2 = d2.parent_id
res.append((d.id, s or d.name))
return res
def get_full_path(self, cr, uid, dir_id, context=None):
""" Return the full path to this directory, in a list, root first
"""
if isinstance(dir_id, (tuple, list)):
assert len(dir_id) == 1
dir_id = dir_id[0]
def _parent(dir_id, path):
parent=self.browse(cr, uid, dir_id)
if parent.parent_id and not parent.ressource_parent_type_id:
_parent(parent.parent_id.id,path)
path.append(parent.name)
else:
path.append(parent.name)
return path
path = []
_parent(dir_id, path)
return path
_constraints = [
(osv.osv._check_recursion, 'Error! You cannot create recursive directories.', ['parent_id'])
]
def onchange_content_id(self, cr, uid, ids, ressource_type_id):
return {}
def get_object(self, cr, uid, uri, context=None):
""" Return a node object for the given uri.
This fn merely passes the call to node_context
"""
return get_node_context(cr, uid, context).get_uri(cr, uri)
def get_node_class(self, cr, uid, ids, dbro=None, dynamic=False, context=None):
"""Retrieve the class of nodes for this directory
This function can be overriden by inherited classes ;)
@param dbro The browse object, if caller already has it
"""
if dbro is None:
dbro = self.browse(cr, uid, ids, context=context)
if dynamic:
return node_res_obj
elif dbro.type == 'directory':
return node_dir
elif dbro.type == 'ressource':
return node_res_dir
else:
raise ValueError("dir node for %s type.", dbro.type)
def _prepare_context(self, cr, uid, nctx, context=None):
""" Fill nctx with properties for this database
@param nctx instance of nodes.node_context, to be filled
@param context ORM context (dict) for us
Note that this function is called *without* a list of ids,
it should behave the same for the whole database (based on the
ORM instance of document.directory).
Some databases may override this and attach properties to the
node_context. See WebDAV, CalDAV.
"""
return
def get_dir_permissions(self, cr, uid, ids, context=None):
"""Check what permission user 'uid' has on directory 'id'
"""
assert len(ids) == 1
res = 0
for pperms in [('read', 5), ('write', 2), ('unlink', 8)]:
try:
self.check_access_rule(cr, uid, ids, pperms[0], context=context)
res |= pperms[1]
except except_orm:
pass
return res
def _locate_child(self, cr, uid, root_id, uri, nparent, ncontext):
""" try to locate the node in uri,
Return a tuple (node_dir, remaining_path)
"""
return (node_database(context=ncontext), uri)
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default ={}
name = self.read(cr, uid, [id])[0]['name']
default.update(name=_("%s (copy)") % (name))
return super(document_directory,self).copy(cr, uid, id, default, context=context)
def _check_duplication(self, cr, uid, vals, ids=None, op='create'):
name=vals.get('name',False)
parent_id=vals.get('parent_id',False)
ressource_parent_type_id=vals.get('ressource_parent_type_id',False)
ressource_id=vals.get('ressource_id',0)
if op=='write':
for directory in self.browse(cr, SUPERUSER_ID, ids):
if not name:
name=directory.name
if not parent_id:
parent_id=directory.parent_id and directory.parent_id.id or False
# TODO fix algo
if not ressource_parent_type_id:
ressource_parent_type_id=directory.ressource_parent_type_id and directory.ressource_parent_type_id.id or False
if not ressource_id:
ressource_id=directory.ressource_id and directory.ressource_id or 0
res=self.search(cr,uid,[('id','<>',directory.id),('name','=',name),('parent_id','=',parent_id),('ressource_parent_type_id','=',ressource_parent_type_id),('ressource_id','=',ressource_id)])
if len(res):
return False
if op=='create':
res = self.search(cr, SUPERUSER_ID, [('name','=',name),('parent_id','=',parent_id),('ressource_parent_type_id','=',ressource_parent_type_id),('ressource_id','=',ressource_id)])
if len(res):
return False
return True
def write(self, cr, uid, ids, vals, context=None):
if not self._check_duplication(cr, uid, vals, ids, op='write'):
raise osv.except_osv(_('ValidateError'), _('Directory name must be unique!'))
return super(document_directory,self).write(cr, uid, ids, vals, context=context)
def create(self, cr, uid, vals, context=None):
if not self._check_duplication(cr, uid, vals):
raise osv.except_osv(_('ValidateError'), _('Directory name must be unique!'))
newname = vals.get('name',False)
if newname:
for illeg in ('/', '@', '$', '#'):
if illeg in newname:
raise osv.except_osv(_('ValidateError'), _('Directory name contains special characters!'))
return super(document_directory,self).create(cr, uid, vals, context)
class document_directory_dctx(osv.osv):
""" In order to evaluate dynamic folders, child items could have a limiting
domain expression. For that, their parents will export a context where useful
information will be passed on.
If you define sth like "s_id" = "this.id" at a folder iterating over sales, its
children could have a domain like [('sale_id', = ,s_id )]
This system should be used recursively, that is, parent dynamic context will be
appended to all children down the tree.
"""
_name = 'document.directory.dctx'
_description = 'Directory Dynamic Context'
_columns = {
'dir_id': fields.many2one('document.directory', 'Directory', required=True, ondelete="cascade"),
'field': fields.char('Field', required=True, select=1, help="The name of the field."),
'expr': fields.char('Expression', required=True, help="A python expression used to evaluate the field.\n" + \
"You can use 'dir_id' for current dir, 'res_id', 'res_model' as a reference to the current record, in dynamic folders"),
}
class document_directory_content_type(osv.osv):
_name = 'document.directory.content.type'
_description = 'Directory Content Type'
_columns = {
'name': fields.char('Content Type', required=True),
'code': fields.char('Extension', size=4),
'active': fields.boolean('Active'),
'mimetype': fields.char('Mime Type')
}
_defaults = {
'active': lambda *args: 1
}
class document_directory_content(osv.osv):
_name = 'document.directory.content'
_description = 'Directory Content'
_order = "sequence"
def _extension_get(self, cr, uid, context=None):
cr.execute('select code,name from document_directory_content_type where active')
res = cr.fetchall()
return res
_columns = {
'name': fields.char('Content Name', required=True),
'sequence': fields.integer('Sequence', size=16),
'prefix': fields.char('Prefix', size=16),
'suffix': fields.char('Suffix', size=16),
'report_id': fields.many2one('ir.actions.report.xml', 'Report'),
'extension': fields.selection(_extension_get, 'Document Type', required=True, size=4),
'include_name': fields.boolean('Include Record Name',
help="Check this field if you want that the name of the file to contain the record name." \
"\nIf set, the directory will have to be a resource one."),
'directory_id': fields.many2one('document.directory', 'Directory'),
}
_defaults = {
'extension': lambda *args: '.pdf',
'sequence': lambda *args: 1,
'include_name': lambda *args: 1,
}
def _file_get(self, cr, node, nodename, content, context=None):
""" return the nodes of a <node> parent having a <content> content
The return value MUST be false or a list of node_class objects.
"""
# TODO: respect the context!
model = node.res_model
if content.include_name and not model:
return False
res2 = []
tname = ''
if content.include_name:
record_name = node.displayname or ''
if record_name:
tname = (content.prefix or '') + record_name + (content.suffix or '') + (content.extension or '')
else:
tname = (content.prefix or '') + (content.name or '') + (content.suffix or '') + (content.extension or '')
if tname.find('/'):
tname=tname.replace('/', '_')
act_id = False
if 'dctx_res_id' in node.dctx:
act_id = node.dctx['res_id']
elif hasattr(node, 'res_id'):
act_id = node.res_id
else:
act_id = node.context.context.get('res_id',False)
if not nodename:
n = node_content(tname, node, node.context,content, act_id=act_id)
res2.append( n)
else:
if nodename == tname:
n = node_content(tname, node, node.context,content, act_id=act_id)
n.fill_fields(cr)
res2.append(n)
return res2
def process_write(self, cr, uid, node, data, context=None):
if node.extension != '.pdf':
raise Exception("Invalid content: %s" % node.extension)
return True
def process_read(self, cr, uid, node, context=None):
if node.extension != '.pdf':
raise Exception("Invalid content: %s" % node.extension)
report = self.pool.get('ir.actions.report.xml').browse(cr, uid, node.report_id, context=context)
srv = openerp.report.interface.report_int._reports['report.'+report.report_name]
ctx = node.context.context.copy()
ctx.update(node.dctx)
pdf,pdftype = srv.create(cr, uid, [node.act_id,], {}, context=ctx)
return pdf
class ir_action_report_xml(osv.osv):
_name="ir.actions.report.xml"
_inherit ="ir.actions.report.xml"
def _model_get(self, cr, uid, ids, name, arg, context=None):
res = {}
model_pool = self.pool.get('ir.model')
for data in self.read(cr, uid, ids, ['model']):
model = data.get('model',False)
if model:
model_id =model_pool.search(cr, uid, [('model','=',model)])
if model_id:
res[data.get('id')] = model_id[0]
else:
res[data.get('id')] = False
return res
def _model_search(self, cr, uid, obj, name, args, context=None):
if not len(args):
return []
assert len(args) == 1 and args[0][1] == '=', 'expression is not what we expect: %r' % args
model_id= args[0][2]
if not model_id:
# a deviation from standard behavior: when searching model_id = False
# we return *all* reports, not just ones with empty model.
# One reason is that 'model' is a required field so far
return []
model = self.pool.get('ir.model').read(cr, uid, [model_id])[0]['model']
report_id = self.search(cr, uid, [('model','=',model)])
if not report_id:
return [('id','=','0')]
return [('id','in',report_id)]
_columns={
'model_id' : fields.function(_model_get, fnct_search=_model_search, string='Model Id'),
}
class document_storage(osv.osv):
""" The primary object for data storage. Deprecated. """
_name = 'document.storage'
_description = 'Storage Media'
def get_data(self, cr, uid, id, file_node, context=None, fil_obj=None):
""" retrieve the contents of some file_node having storage_id = id
optionally, fil_obj could point to the browse object of the file
(ir.attachment)
"""
boo = self.browse(cr, uid, id, context=context)
if fil_obj:
ira = fil_obj
else:
ira = self.pool.get('ir.attachment').browse(cr, uid, file_node.file_id, context=context)
data = ira.datas
if data:
out = data.decode('base64')
else:
out = ''
return out
def get_file(self, cr, uid, id, file_node, mode, context=None):
""" Return a file-like object for the contents of some node
"""
if context is None:
context = {}
boo = self.browse(cr, uid, id, context=context)
ira = self.pool.get('ir.attachment').browse(cr, uid, file_node.file_id, context=context)
return nodefd_db(file_node, ira_browse=ira, mode=mode)
def set_data(self, cr, uid, id, file_node, data, context=None, fil_obj=None):
""" store the data.
This function MUST be used from an ir.attachment. It wouldn't make sense
to store things persistently for other types (dynamic).
"""
boo = self.browse(cr, uid, id, context=context)
if fil_obj:
ira = fil_obj
else:
ira = self.pool.get('ir.attachment').browse(cr, uid, file_node.file_id, context=context)
_logger.debug( "Store data for ir.attachment #%d." % ira.id)
store_fname = None
fname = None
filesize = len(data)
self.pool.get('ir.attachment').write(cr, uid, [file_node.file_id], {'datas': data.encode('base64')}, context=context)
# 2nd phase: store the metadata
try:
icont = ''
mime = ira.file_type
if not mime:
mime = ""
try:
mime, icont = cntIndex.doIndex(data, ira.datas_fname, ira.file_type or None, fname)
except Exception:
_logger.debug('Cannot index file.', exc_info=True)
pass
try:
icont_u = ustr(icont)
except UnicodeError:
icont_u = ''
# a hack: /assume/ that the calling write operation will not try
# to write the fname and size, and update them in the db concurrently.
# We cannot use a write() here, because we are already in one.
cr.execute('UPDATE ir_attachment SET file_size = %s, index_content = %s, file_type = %s WHERE id = %s', (filesize, icont_u, mime, file_node.file_id))
self.pool.get('ir.attachment').invalidate_cache(cr, uid, ['file_size', 'index_content', 'file_type'], [file_node.file_id], context=context)
file_node.content_length = filesize
file_node.content_type = mime
return True
except Exception, e :
_logger.warning("Cannot save data.", exc_info=True)
# should we really rollback once we have written the actual data?
# at the db case (only), that rollback would be safe
raise except_orm(_('Error at doc write!'), str(e))
def _str2time(cre):
""" Convert a string with time representation (from db) into time (float)
Note: a place to fix if datetime is used in db.
"""
if not cre:
return time.time()
frac = 0.0
if isinstance(cre, basestring) and '.' in cre:
fdot = cre.find('.')
frac = float(cre[fdot:])
cre = cre[:fdot]
return time.mktime(time.strptime(cre,'%Y-%m-%d %H:%M:%S')) + frac
def get_node_context(cr, uid, context):
return node_context(cr, uid, context)
#
# An object that represent an uri
# path: the uri of the object
# content: the Content it belongs to (_print.pdf)
# type: content or collection
# content: objct = res.partner
# collection: object = directory, object2 = res.partner
# file: objct = ir.attachement
# root: if we are at the first directory of a ressource
#
class node_context(object):
""" This is the root node, representing access to some particular context
A context is a set of persistent data, which may influence the structure
of the nodes. All other transient information during a data query should
be passed down with function arguments.
"""
cached_roots = {}
node_file_class = None
def __init__(self, cr, uid, context=None):
self.dbname = cr.dbname
self.uid = uid
self.context = context
if context is None:
context = {}
context['uid'] = uid
self._dirobj = openerp.registry(cr.dbname).get('document.directory')
self.node_file_class = node_file
self.extra_ctx = {} # Extra keys for context, that do _not_ trigger inequality
assert self._dirobj
self._dirobj._prepare_context(cr, uid, self, context=context)
self.rootdir = False #self._dirobj._get_root_directory(cr,uid,context)
def __eq__(self, other):
if not type(other) == node_context:
return False
if self.dbname != other.dbname:
return False
if self.uid != other.uid:
return False
if self.context != other.context:
return False
if self.rootdir != other.rootdir:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def get(self, name, default=None):
return self.context.get(name, default)
def get_uri(self, cr, uri):
""" Although this fn passes back to doc.dir, it is needed since
it is a potential caching point.
"""
(ndir, duri) = self._dirobj._locate_child(cr, self.uid, self.rootdir, uri, None, self)
while duri:
ndir = ndir.child(cr, duri[0])
if not ndir:
return False
duri = duri[1:]
return ndir
def get_dir_node(self, cr, dbro):
"""Create (or locate) a node for a directory
@param dbro a browse object of document.directory
"""
fullpath = dbro.get_full_path(context=self.context)
klass = dbro.get_node_class(dbro, context=self.context)
return klass(fullpath, None ,self, dbro)
def get_file_node(self, cr, fbro):
""" Create or locate a node for a static file
@param fbro a browse object of an ir.attachment
"""
parent = None
if fbro.parent_id:
parent = self.get_dir_node(cr, fbro.parent_id)
return self.node_file_class(fbro.name, parent, self, fbro)
class node_class(object):
""" this is a superclass for our inodes
It is an API for all code that wants to access the document files.
Nodes have attributes which contain usual file properties
"""
our_type = 'baseclass'
DAV_PROPS = None
DAV_M_NS = None
def __init__(self, path, parent, context):
assert isinstance(context,node_context)
assert (not parent ) or isinstance(parent,node_class)
self.path = path
self.context = context
self.type=self.our_type
self.parent = parent
self.uidperms = 5 # computed permissions for our uid, in unix bits
self.mimetype = 'application/octet-stream'
self.create_date = None
self.write_date = None
self.unixperms = 0660
self.uuser = 'user'
self.ugroup = 'group'
self.content_length = 0
# dynamic context:
self.dctx = {}
if parent:
self.dctx = parent.dctx.copy()
self.displayname = 'Object'
def __eq__(self, other):
return NotImplemented
def __ne__(self, other):
return not self.__eq__(other)
def full_path(self):
""" Return the components of the full path for some
node.
The returned list only contains the names of nodes.
"""
if self.parent:
s = self.parent.full_path()
else:
s = []
if isinstance(self.path,list):
s+=self.path
elif self.path is None:
s.append('')
else:
s.append(self.path)
return s #map(lambda x: '/' +x, s)
def __repr__(self):
return "%s@/%s" % (self.our_type, '/'.join(self.full_path()))
def children(self, cr, domain=None):
print "node_class.children()"
return [] #stub
def child(self, cr, name, domain=None):
print "node_class.child()"
return None
def get_uri(self, cr, uri):
duri = uri
ndir = self
while duri:
ndir = ndir.child(cr, duri[0])
if not ndir:
return False
duri = duri[1:]
return ndir
def path_get(self):
print "node_class.path_get()"
return False
def get_data(self, cr):
raise TypeError('No data for %s.'% self.type)
def open_data(self, cr, mode):
""" Open a node_descriptor object for this node.
@param the mode of open, eg 'r', 'w', 'a', like file.open()
This operation may lock the data for this node (and accross
other node hierarchies), until the descriptor is close()d. If
the node is locked, subsequent opens (depending on mode) may
immediately fail with an exception (which?).
For this class, there is no data, so no implementation. Each
child class that has data should override this.
"""
raise TypeError('No data for %s.' % self.type)
def get_etag(self, cr):
""" Get a tag, unique per object + modification.
see. http://tools.ietf.org/html/rfc2616#section-13.3.3 """
return '"%s-%s"' % (self._get_ttag(cr), self._get_wtag(cr))
def _get_wtag(self, cr):
""" Return the modification time as a unique, compact string """
return str(_str2time(self.write_date)).replace('.','')
def _get_ttag(self, cr):
""" Get a unique tag for this type/id of object.
Must be overriden, so that each node is uniquely identified.
"""
print "node_class.get_ttag()",self
raise NotImplementedError("get_ttag stub()")
def get_dav_props(self, cr):
""" If this class has special behaviour for GroupDAV etc, export
its capabilities """
# This fn is placed here rather than WebDAV, because we want the
# baseclass methods to apply to all node subclasses
return self.DAV_PROPS or {}
def match_dav_eprop(self, cr, match, ns, prop):
res = self.get_dav_eprop(cr, ns, prop)
if res == match:
return True
return False
def get_dav_eprop(self, cr, ns, prop):
if not self.DAV_M_NS:
return None
if self.DAV_M_NS.has_key(ns):
prefix = self.DAV_M_NS[ns]
else:
_logger.debug('No namespace: %s ("%s").',ns, prop)
return None
mname = prefix + "_" + prop.replace('-','_')
if not hasattr(self, mname):
return None
try:
m = getattr(self, mname)
r = m(cr)
return r
except AttributeError:
_logger.debug('The property %s is not supported.' % prop, exc_info=True)
return None
def get_dav_resourcetype(self, cr):
""" Get the DAV resource type.
Is here because some nodes may exhibit special behaviour, like
CalDAV/GroupDAV collections
"""
raise NotImplementedError
def move_to(self, cr, ndir_node, new_name=False, fil_obj=None, ndir_obj=None, in_write=False):
""" Move this node to a new parent directory.
@param ndir_node the collection that this node should be moved under
@param new_name a name to rename this node to. If omitted, the old
name is preserved
@param fil_obj, can be None, is the browse object for the file,
if already available.
@param ndir_obj must be the browse object to the new doc.directory
location, where this node should be moved to.
in_write: When called by write(), we shouldn't attempt to write the
object, but instead return the dict of vals (avoid re-entrance).
If false, we should write all data to the object, here, as if the
caller won't do anything after calling move_to()
Return value:
True: the node is moved, the caller can update other values, too.
False: the node is either removed or fully updated, the caller
must discard the fil_obj, not attempt to write any more to it.
dict: values to write back to the object. *May* contain a new id!
Depending on src and target storage, implementations of this function
could do various things.
Should also consider node<->content, dir<->dir moves etc.
Move operations, as instructed from APIs (e.g. request from DAV) could
use this function.
"""
raise NotImplementedError(repr(self))
def create_child(self, cr, path, data=None):
""" Create a regular file under this node
"""
_logger.warning("Attempted to create a file under %r, not possible.", self)
raise IOError(errno.EPERM, "Not allowed to create file(s) here.")
def create_child_collection(self, cr, objname):
""" Create a child collection (directory) under self
"""
_logger.warning("Attempted to create a collection under %r, not possible.", self)
raise IOError(errno.EPERM, "Not allowed to create folder(s) here.")
def rm(self, cr):
raise NotImplementedError(repr(self))
def rmcol(self, cr):
raise NotImplementedError(repr(self))
def get_domain(self, cr, filters):
# TODO Document
return []
def check_perms(self, perms):
""" Check the permissions of the current node.
@param perms either an integers of the bits to check, or
a string with the permission letters
Permissions of nodes are (in a unix way):
1, x : allow descend into dir
2, w : allow write into file, or modification to dir
4, r : allow read of file, or listing of dir contents
8, u : allow remove (unlink)
"""
if isinstance(perms, str):
pe2 = 0
chars = { 'x': 1, 'w': 2, 'r': 4, 'u': 8 }
for c in perms:
pe2 = pe2 | chars[c]
perms = pe2
elif isinstance(perms, int):
if perms < 0 or perms > 15:
raise ValueError("Invalid permission bits.")
else:
raise ValueError("Invalid permission attribute.")
return ((self.uidperms & perms) == perms)
class node_database(node_class):
""" A node representing the database directory
"""
our_type = 'database'
def __init__(self, path=None, parent=False, context=None):
if path is None:
path = []
super(node_database,self).__init__(path, parent, context)
self.unixperms = 040750
self.uidperms = 5
def children(self, cr, domain=None):
res = self._child_get(cr, domain=domain) + self._file_get(cr)
return res
def child(self, cr, name, domain=None):
res = self._child_get(cr, name, domain=None)
if res:
return res[0]
res = self._file_get(cr,name)
if res:
return res[0]
return None
def _child_get(self, cr, name=False, domain=None):
dirobj = self.context._dirobj
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
where = [('parent_id','=', False), ('ressource_parent_type_id','=',False)]
if name:
where.append(('name','=',name))
is_allowed = self.check_perms(1)
else:
is_allowed = self.check_perms(5)
if not is_allowed:
raise IOError(errno.EPERM, "Permission into directory denied.")
if domain:
where = where + domain
ids = dirobj.search(cr, uid, where, context=ctx)
res = []
for dirr in dirobj.browse(cr, uid, ids, context=ctx):
klass = dirr.get_node_class(dirr, context=ctx)
res.append(klass(dirr.name, self, self.context,dirr))
return res
def _file_get(self, cr, nodename=False):
res = []
return res
def _get_ttag(self, cr):
return 'db-%s' % cr.dbname
def mkdosname(company_name, default='noname'):
""" convert a string to a dos-like name"""
if not company_name:
return default
badchars = ' !@#$%^`~*()+={}[];:\'"/?.<>'
n = ''
for c in company_name[:8]:
n += (c in badchars and '_') or c
return n
def _uid2unixperms(perms, has_owner):
""" Convert the uidperms and the owner flag to full unix bits
"""
res = 0
if has_owner:
res |= (perms & 0x07) << 6
res |= (perms & 0x05) << 3
elif perms & 0x02:
res |= (perms & 0x07) << 6
res |= (perms & 0x07) << 3
else:
res |= (perms & 0x07) << 6
res |= (perms & 0x05) << 3
res |= 0x05
return res
class node_dir(node_database):
our_type = 'collection'
def __init__(self, path, parent, context, dirr, dctx=None):
super(node_dir,self).__init__(path, parent,context)
self.dir_id = dirr and dirr.id or False
#todo: more info from dirr
self.mimetype = 'application/x-directory'
# 'httpd/unix-directory'
self.create_date = dirr and dirr.create_date or False
self.domain = dirr and dirr.domain or []
self.res_model = dirr and dirr.ressource_type_id and dirr.ressource_type_id.model or False
# TODO: the write date should be MAX(file.write)..
self.write_date = dirr and (dirr.write_date or dirr.create_date) or False
self.content_length = 0
try:
self.uuser = (dirr.user_id and dirr.user_id.login) or 'nobody'
except Exception:
self.uuser = 'nobody'
self.ugroup = mkdosname(dirr.company_id and dirr.company_id.name, default='nogroup')
self.uidperms = dirr.get_dir_permissions()
self.unixperms = 040000 | _uid2unixperms(self.uidperms, dirr and dirr.user_id)
if dctx:
self.dctx.update(dctx)
dc2 = self.context.context
dc2.update(self.dctx)
dc2['dir_id'] = self.dir_id
self.displayname = dirr and dirr.name or False
if dirr and dirr.dctx_ids:
for dfld in dirr.dctx_ids:
try:
self.dctx[dfld.field] = safe_eval(dfld.expr,dc2)
except Exception,e:
print "Cannot eval %s." % dfld.expr
print e
pass
def __eq__(self, other):
if type(self) != type(other):
return False
if not self.context == other.context:
return False
# Two directory nodes, for the same document.directory, may have a
# different context! (dynamic folders)
if self.dctx != other.dctx:
return False
return self.dir_id == other.dir_id
def get_data(self, cr):
#res = ''
#for child in self.children(cr):
# res += child.get_data(cr)
return None
def _file_get(self, cr, nodename=False):
res = super(node_dir,self)._file_get(cr, nodename)
is_allowed = self.check_perms(nodename and 1 or 5)
if not is_allowed:
raise IOError(errno.EPERM, "Permission into directory denied.")
cntobj = self.context._dirobj.pool.get('document.directory.content')
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
where = [('directory_id','=',self.dir_id) ]
ids = cntobj.search(cr, uid, where, context=ctx)
for content in cntobj.browse(cr, uid, ids, context=ctx):
res3 = cntobj._file_get(cr, self, nodename, content)
if res3:
res.extend(res3)
return res
def _child_get(self, cr, name=None, domain=None):
dirobj = self.context._dirobj
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
where = [('parent_id','=',self.dir_id)]
if name:
where.append(('name','=',name))
is_allowed = self.check_perms(1)
else:
is_allowed = self.check_perms(5)
if not is_allowed:
raise IOError(errno.EPERM, "Permission into directory denied.")
if not domain:
domain = []
where2 = where + domain + [('ressource_parent_type_id','=',False)]
ids = dirobj.search(cr, uid, where2, context=ctx)
res = []
for dirr in dirobj.browse(cr, uid, ids, context=ctx):
klass = dirr.get_node_class(dirr, context=ctx)
res.append(klass(dirr.name, self, self.context,dirr))
# Static directories should never return files with res_model/res_id
# because static dirs are /never/ related to a record.
# In fact, files related to some model and parented by the root dir
# (the default), will NOT be accessible in the node system unless
# a resource folder for that model exists (with resource_find_all=True).
# Having resource attachments in a common folder is bad practice,
# because they would be visible to all users, and their names may be
# the same, conflicting.
where += [('res_model', '=', False)]
fil_obj = dirobj.pool.get('ir.attachment')
ids = fil_obj.search(cr, uid, where, context=ctx)
if ids:
for fil in fil_obj.browse(cr, uid, ids, context=ctx):
klass = self.context.node_file_class
res.append(klass(fil.name, self, self.context, fil))
return res
def rmcol(self, cr):
uid = self.context.uid
directory = self.context._dirobj.browse(cr, uid, self.dir_id)
res = False
if not directory:
raise OSError(2, 'Not such file or directory.')
if not self.check_perms('u'):
raise IOError(errno.EPERM,"Permission denied.")
if directory._name == 'document.directory':
if self.children(cr):
raise OSError(39, 'Directory not empty.')
res = self.context._dirobj.unlink(cr, uid, [directory.id])
else:
raise OSError(1, 'Operation is not permitted.')
return res
def create_child_collection(self, cr, objname):
object2 = False
if not self.check_perms(2):
raise IOError(errno.EPERM,"Permission denied.")
dirobj = self.context._dirobj
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
obj = dirobj.browse(cr, uid, self.dir_id)
if obj and (obj.type == 'ressource') and not object2:
raise OSError(1, 'Operation is not permitted.')
#objname = uri2[-1]
val = {
'name': objname,
'ressource_parent_type_id': obj and obj.ressource_type_id.id or False,
'ressource_id': object2 and object2.id or False,
'parent_id' : obj and obj.id or False
}
return dirobj.create(cr, uid, val)
def create_child(self, cr, path, data=None):
""" API function to create a child file object and node
Return the node_* created
"""
if not self.check_perms(2):
raise IOError(errno.EPERM,"Permission denied.")
dirobj = self.context._dirobj
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
fil_obj=dirobj.pool.get('ir.attachment')
val = {
'name': path,
'datas_fname': path,
'parent_id': self.dir_id,
# Datas are not set here
}
fil_id = fil_obj.create(cr, uid, val, context=ctx)
fil = fil_obj.browse(cr, uid, fil_id, context=ctx)
fnode = node_file(path, self, self.context, fil)
if data is not None:
fnode.set_data(cr, data, fil)
return fnode
def _get_ttag(self, cr):
return 'dir-%d' % self.dir_id
def move_to(self, cr, ndir_node, new_name=False, fil_obj=None, ndir_obj=None, in_write=False):
""" Move directory. This operation is simple, since the present node is
only used for static, simple directories.
Note /may/ be called with ndir_node = None, to rename the document root.
"""
if ndir_node and (ndir_node.context != self.context):
raise NotImplementedError("Cannot move directories between contexts.")
if (not self.check_perms('u')) or (not ndir_node.check_perms('w')):
raise IOError(errno.EPERM,"Permission denied.")
dir_obj = self.context._dirobj
if not fil_obj:
dbro = dir_obj.browse(cr, self.context.uid, self.dir_id, context=self.context.context)
else:
dbro = dir_obj
assert dbro.id == self.dir_id
if not dbro:
raise IndexError("Cannot locate dir %d", self.dir_id)
if (not self.parent) and ndir_node:
if not dbro.parent_id:
raise IOError(errno.EPERM, "Cannot move the root directory!")
self.parent = self.context.get_dir_node(cr, dbro.parent_id)
assert self.parent
if self.parent != ndir_node:
_logger.debug('Cannot move dir %r from %r to %r.', self, self.parent, ndir_node)
raise NotImplementedError('Cannot move dir to another dir.')
ret = {}
if new_name and (new_name != dbro.name):
if ndir_node.child(cr, new_name):
raise IOError(errno.EEXIST, "Destination path already exists.")
ret['name'] = new_name
del dbro
if not in_write:
# We have to update the data ourselves
if ret:
ctx = self.context.context.copy()
ctx['__from_node'] = True
dir_obj.write(cr, self.context.uid, [self.dir_id,], ret, ctx)
ret = True
return ret
class node_res_dir(node_class):
""" A folder containing dynamic folders
A special sibling to node_dir, which does only contain dynamically
created folders foreach resource in the foreign model.
All folders should be of type node_res_obj and merely behave like
node_dirs (with limited domain).
"""
our_type = 'collection'
res_obj_class = None
def __init__(self, path, parent, context, dirr, dctx=None ):
super(node_res_dir,self).__init__(path, parent, context)
self.dir_id = dirr.id
#todo: more info from dirr
self.mimetype = 'application/x-directory'
# 'httpd/unix-directory'
self.create_date = dirr.create_date
# TODO: the write date should be MAX(file.write)..
self.write_date = dirr.write_date or dirr.create_date
self.content_length = 0
try:
self.uuser = (dirr.user_id and dirr.user_id.login) or 'nobody'
except Exception:
self.uuser = 'nobody'
self.ugroup = mkdosname(dirr.company_id and dirr.company_id.name, default='nogroup')
self.uidperms = dirr.get_dir_permissions()
self.unixperms = 040000 | _uid2unixperms(self.uidperms, dirr and dirr.user_id)
self.res_model = dirr.ressource_type_id and dirr.ressource_type_id.model or False
self.resm_id = dirr.ressource_id
self.res_find_all = dirr.resource_find_all
self.namefield = dirr.resource_field.name or 'name'
self.displayname = dirr.name
# Important: the domain is evaluated using the *parent* dctx!
self.domain = dirr.domain
self.ressource_tree = dirr.ressource_tree
# and then, we add our own vars in the dctx:
if dctx:
self.dctx.update(dctx)
# and then, we prepare a dctx dict, for deferred evaluation:
self.dctx_dict = {}
for dfld in dirr.dctx_ids:
self.dctx_dict[dfld.field] = dfld.expr
def __eq__(self, other):
if type(self) != type(other):
return False
if not self.context == other.context:
return False
# Two nodes, for the same document.directory, may have a
# different context! (dynamic folders)
if self.dctx != other.dctx:
return False
return self.dir_id == other.dir_id
def children(self, cr, domain=None):
return self._child_get(cr, domain=domain)
def child(self, cr, name, domain=None):
res = self._child_get(cr, name, domain=domain)
if res:
return res[0]
return None
def _child_get(self, cr, name=None, domain=None):
""" return virtual children of resource, based on the
foreign object.
Note that many objects use NULL for a name, so we should
better call the name_search(),name_get() set of methods
"""
if self.res_model not in self.context._dirobj.pool:
return []
obj = self.context._dirobj.pool[self.res_model]
dirobj = self.context._dirobj
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
ctx.update(self.context.extra_ctx)
where = []
if self.domain:
app = safe_eval(self.domain, ctx)
if not app:
pass
elif isinstance(app, list):
where.extend(app)
elif isinstance(app, tuple):
where.append(app)
else:
raise RuntimeError("Incorrect domain expr: %s." % self.domain)
if self.resm_id:
where.append(('id','=',self.resm_id))
if name:
# The =like character will match underscores against any characters
# including the special ones that couldn't exist in a FTP/DAV request
where.append((self.namefield,'=like',name.replace('\\','\\\\')))
is_allowed = self.check_perms(1)
else:
is_allowed = self.check_perms(5)
if not is_allowed:
raise IOError(errno.EPERM,"Permission denied.")
# print "Where clause for %s" % self.res_model, where
if self.ressource_tree:
object2 = False
if self.resm_id:
object2 = dirobj.pool[self.res_model].browse(cr, uid, self.resm_id) or False
if obj._parent_name in obj.fields_get(cr, uid):
where.append((obj._parent_name,'=',object2 and object2.id or False))
resids = obj.search(cr, uid, where, context=ctx)
res = []
for bo in obj.browse(cr, uid, resids, context=ctx):
if not bo:
continue
res_name = getattr(bo, self.namefield)
if not res_name:
continue
# Yes! we can't do better but skip nameless records.
# Escape the name for characters not supported in filenames
res_name = res_name.replace('/','_') # any other weird char?
if name and (res_name != ustr(name)):
# we have matched _ to any character, but we only meant to match
# the special ones.
# Eg. 'a_c' will find 'abc', 'a/c', 'a_c', may only
# return 'a/c' and 'a_c'
continue
res.append(self.res_obj_class(res_name, self.dir_id, self, self.context, self.res_model, bo))
return res
def _get_ttag(self, cr):
return 'rdir-%d' % self.dir_id
class node_res_obj(node_class):
""" A dynamically created folder.
A special sibling to node_dir, which does only contain dynamically
created folders foreach resource in the foreign model.
All folders should be of type node_res_obj and merely behave like
node_dirs (with limited domain).
"""
our_type = 'collection'
def __init__(self, path, dir_id, parent, context, res_model, res_bo, res_id=None):
super(node_res_obj,self).__init__(path, parent,context)
assert parent
#todo: more info from dirr
self.dir_id = dir_id
self.mimetype = 'application/x-directory'
# 'httpd/unix-directory'
self.create_date = parent.create_date
# TODO: the write date should be MAX(file.write)..
self.write_date = parent.write_date
self.content_length = 0
self.uidperms = parent.uidperms & 15
self.unixperms = 040000 | _uid2unixperms(self.uidperms, True)
self.uuser = parent.uuser
self.ugroup = parent.ugroup
self.res_model = res_model
self.domain = parent.domain
self.displayname = path
self.dctx_dict = parent.dctx_dict
if isinstance(parent, node_res_dir):
self.res_find_all = parent.res_find_all
else:
self.res_find_all = False
if res_bo:
self.res_id = res_bo.id
dc2 = self.context.context.copy()
dc2.update(self.dctx)
dc2['res_model'] = res_model
dc2['res_id'] = res_bo.id
dc2['this'] = res_bo
for fld,expr in self.dctx_dict.items():
try:
self.dctx[fld] = safe_eval(expr, dc2)
except Exception,e:
print "Cannot eval %s for %s." % (expr, fld)
print e
pass
else:
self.res_id = res_id
def __eq__(self, other):
if type(self) != type(other):
return False
if not self.context == other.context:
return False
if not self.res_model == other.res_model:
return False
if not self.res_id == other.res_id:
return False
if self.domain != other.domain:
return False
if self.res_find_all != other.res_find_all:
return False
if self.dctx != other.dctx:
return False
return self.dir_id == other.dir_id
def children(self, cr, domain=None):
return self._child_get(cr, domain=domain) + self._file_get(cr)
def child(self, cr, name, domain=None):
res = self._child_get(cr, name, domain=domain)
if res:
return res[0]
res = self._file_get(cr, name)
if res:
return res[0]
return None
def _file_get(self, cr, nodename=False):
res = []
is_allowed = self.check_perms((nodename and 1) or 5)
if not is_allowed:
raise IOError(errno.EPERM,"Permission denied.")
cntobj = self.context._dirobj.pool.get('document.directory.content')
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
where = [('directory_id','=',self.dir_id) ]
#if self.domain:
# where.extend(self.domain)
# print "res_obj file_get clause", where
ids = cntobj.search(cr, uid, where, context=ctx)
for content in cntobj.browse(cr, uid, ids, context=ctx):
res3 = cntobj._file_get(cr, self, nodename, content, context=ctx)
if res3:
res.extend(res3)
return res
def get_dav_props_DEPR(self, cr):
# Deprecated! (but document_ics must be cleaned, first)
res = {}
cntobj = self.context._dirobj.pool.get('document.directory.content')
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
where = [('directory_id','=',self.dir_id) ]
ids = cntobj.search(cr, uid, where, context=ctx)
for content in cntobj.browse(cr, uid, ids, context=ctx):
if content.extension == '.ics': # FIXME: call the content class!
res['http://groupdav.org/'] = ('resourcetype',)
return res
def get_dav_eprop_DEPR(self, cr, ns, prop):
# Deprecated!
if ns != 'http://groupdav.org/' or prop != 'resourcetype':
_logger.warning("Who asks for %s:%s?" % (ns, prop))
return None
cntobj = self.context._dirobj.pool.get('document.directory.content')
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
where = [('directory_id','=',self.dir_id) ]
ids = cntobj.search(cr,uid,where,context=ctx)
for content in cntobj.browse(cr, uid, ids, context=ctx):
# TODO: remove relic of GroupDAV
if content.extension == '.ics': # FIXME: call the content class!
return ('vevent-collection','http://groupdav.org/')
return None
def _child_get(self, cr, name=None, domain=None):
dirobj = self.context._dirobj
is_allowed = self.check_perms((name and 1) or 5)
if not is_allowed:
raise IOError(errno.EPERM,"Permission denied.")
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
directory = dirobj.browse(cr, uid, self.dir_id)
obj = dirobj.pool[self.res_model]
where = []
res = []
if name:
where.append(('name','=',name))
# Directory Structure display in tree structure
if self.res_id and directory.ressource_tree:
where1 = []
if name:
where1.append(('name','=like',name.replace('\\','\\\\')))
if obj._parent_name in obj.fields_get(cr, uid):
where1.append((obj._parent_name, '=', self.res_id))
namefield = directory.resource_field.name or 'name'
resids = obj.search(cr, uid, where1, context=ctx)
for bo in obj.browse(cr, uid, resids, context=ctx):
if not bo:
continue
res_name = getattr(bo, namefield)
if not res_name:
continue
res_name = res_name.replace('/', '_')
if name and (res_name != ustr(name)):
continue
# TODO Revise
klass = directory.get_node_class(directory, dynamic=True, context=ctx)
rnode = klass(res_name, dir_id=self.dir_id, parent=self, context=self.context,
res_model=self.res_model, res_bo=bo)
rnode.res_find_all = self.res_find_all
res.append(rnode)
where2 = where + [('parent_id','=',self.dir_id) ]
ids = dirobj.search(cr, uid, where2, context=ctx)
bo = obj.browse(cr, uid, self.res_id, context=ctx)
for dirr in dirobj.browse(cr, uid, ids, context=ctx):
if name and (name != dirr.name):
continue
if dirr.type == 'directory':
klass = dirr.get_node_class(dirr, dynamic=True, context=ctx)
res.append(klass(dirr.name, dirr.id, self, self.context, self.res_model, res_bo = bo, res_id = self.res_id))
elif dirr.type == 'ressource':
# child resources can be controlled by properly set dctx
klass = dirr.get_node_class(dirr, context=ctx)
res.append(klass(dirr.name,self,self.context, dirr, {'active_id': self.res_id})) # bo?
fil_obj = dirobj.pool.get('ir.attachment')
if self.res_find_all:
where2 = where
where3 = where2 + [('res_model', '=', self.res_model), ('res_id','=',self.res_id)]
# print "where clause for dir_obj", where3
ids = fil_obj.search(cr, uid, where3, context=ctx)
if ids:
for fil in fil_obj.browse(cr, uid, ids, context=ctx):
klass = self.context.node_file_class
res.append(klass(fil.name, self, self.context, fil))
# Get Child Ressource Directories
if directory.ressource_type_id and directory.ressource_type_id.id:
where4 = where + [('ressource_parent_type_id','=',directory.ressource_type_id.id)]
where5 = where4 + ['|', ('ressource_id','=',0), ('ressource_id','=',self.res_id)]
dirids = dirobj.search(cr,uid, where5)
for dirr in dirobj.browse(cr, uid, dirids, context=ctx):
if dirr.type == 'directory' and not dirr.parent_id:
klass = dirr.get_node_class(dirr, dynamic=True, context=ctx)
rnode = klass(dirr.name, dirr.id, self, self.context, self.res_model, res_bo = bo, res_id = self.res_id)
rnode.res_find_all = dirr.resource_find_all
res.append(rnode)
if dirr.type == 'ressource':
klass = dirr.get_node_class(dirr, context=ctx)
rnode = klass(dirr.name, self, self.context, dirr, {'active_id': self.res_id})
rnode.res_find_all = dirr.resource_find_all
res.append(rnode)
return res
def create_child_collection(self, cr, objname):
dirobj = self.context._dirobj
is_allowed = self.check_perms(2)
if not is_allowed:
raise IOError(errno.EPERM,"Permission denied.")
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
res_obj = dirobj.pool[self.res_model]
object2 = res_obj.browse(cr, uid, self.res_id) or False
obj = dirobj.browse(cr, uid, self.dir_id)
if obj and (obj.type == 'ressource') and not object2:
raise OSError(1, 'Operation is not permitted.')
val = {
'name': objname,
'ressource_parent_type_id': obj and obj.ressource_type_id.id or False,
'ressource_id': object2 and object2.id or False,
'parent_id' : False,
'resource_find_all': False,
}
if (obj and (obj.type in ('directory'))) or not object2:
val['parent_id'] = obj and obj.id or False
return dirobj.create(cr, uid, val)
def create_child(self, cr, path, data=None):
""" API function to create a child file object and node
Return the node_* created
"""
is_allowed = self.check_perms(2)
if not is_allowed:
raise IOError(errno.EPERM,"Permission denied.")
dirobj = self.context._dirobj
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
fil_obj=dirobj.pool.get('ir.attachment')
val = {
'name': path,
'datas_fname': path,
'res_model': self.res_model,
'res_id': self.res_id,
# Datas are not set here
}
if not self.res_find_all:
val['parent_id'] = self.dir_id
fil_id = fil_obj.create(cr, uid, val, context=ctx)
fil = fil_obj.browse(cr, uid, fil_id, context=ctx)
klass = self.context.node_file_class
fnode = klass(path, self, self.context, fil)
if data is not None:
fnode.set_data(cr, data, fil)
return fnode
def _get_ttag(self, cr):
return 'rodir-%d-%d' % (self.dir_id, self.res_id)
node_res_dir.res_obj_class = node_res_obj
class node_file(node_class):
our_type = 'file'
def __init__(self, path, parent, context, fil):
super(node_file,self).__init__(path, parent,context)
self.file_id = fil.id
#todo: more info from ir_attachment
if fil.file_type and '/' in fil.file_type:
self.mimetype = str(fil.file_type)
self.create_date = fil.create_date
self.write_date = fil.write_date or fil.create_date
self.content_length = fil.file_size
self.displayname = fil.name
self.uidperms = 14
if parent:
if not parent.check_perms('x'):
self.uidperms = 0
elif not parent.check_perms('w'):
self.uidperms = 4
try:
self.uuser = (fil.user_id and fil.user_id.login) or 'nobody'
except Exception:
self.uuser = 'nobody'
self.ugroup = mkdosname(fil.company_id and fil.company_id.name, default='nogroup')
def __eq__(self, other):
if type(self) != type(other):
return False
if not self.context == other.context:
return False
if self.dctx != other.dctx:
return False
return self.file_id == other.file_id
def open_data(self, cr, mode):
if not self.check_perms(4):
raise IOError(errno.EPERM, "Permission denied.")
stobj = self.context._dirobj.pool.get('document.storage')
return stobj.get_file(cr, self.context.uid, None, self, mode=mode, context=self.context.context)
def rm(self, cr):
uid = self.context.uid
if not self.check_perms(8):
raise IOError(errno.EPERM, "Permission denied.")
document_obj = self.context._dirobj.pool.get('ir.attachment')
if self.type in ('collection','database'):
return False
document = document_obj.browse(cr, uid, self.file_id, context=self.context.context)
res = False
if document and document._name == 'ir.attachment':
res = document_obj.unlink(cr, uid, [document.id])
return res
def fix_ppath(self, cr, fbro):
"""Sometimes we may init this w/o path, parent.
This function fills the missing path from the file browse object
Note: this may be an expensive operation, do on demand. However,
once caching is in, we might want to do that at init time and keep
this object anyway
"""
if self.path or self.parent:
return
assert fbro
uid = self.context.uid
dirpath = []
if fbro.parent_id:
dirobj = self.context._dirobj.pool.get('document.directory')
dirpath = dirobj.get_full_path(cr, uid, fbro.parent_id.id, context=self.context.context)
if fbro.datas_fname:
dirpath.append(fbro.datas_fname)
else:
dirpath.append(fbro.name)
if len(dirpath)>1:
self.path = dirpath
else:
self.path = dirpath[0]
def get_data(self, cr, fil_obj=None):
""" Retrieve the data for some file.
fil_obj may optionally be specified, and should be a browse object
for the file. This is useful when the caller has already initiated
the browse object. """
if not self.check_perms(4):
raise IOError(errno.EPERM, "Permission denied.")
stobj = self.context._dirobj.pool.get('document.storage')
return stobj.get_data(cr, self.context.uid, None, self,self.context.context, fil_obj)
def get_data_len(self, cr, fil_obj=None):
bin_size = self.context.context.get('bin_size', False)
if bin_size and not self.content_length:
self.content_length = fil_obj.db_datas
return self.content_length
def set_data(self, cr, data, fil_obj=None):
""" Store data at some file.
fil_obj may optionally be specified, and should be a browse object
for the file. This is useful when the caller has already initiated
the browse object. """
if not self.check_perms(2):
raise IOError(errno.EPERM, "Permission denied.")
stobj = self.context._dirobj.pool.get('document.storage')
return stobj.set_data(cr, self.context.uid, None, self, data, self.context.context, fil_obj)
def _get_ttag(self, cr):
return 'file-%d' % self.file_id
def move_to(self, cr, ndir_node, new_name=False, fil_obj=None, ndir_obj=None, in_write=False):
if ndir_node and ndir_node.context != self.context:
raise NotImplementedError("Cannot move files between contexts.")
if (not self.check_perms(8)) and ndir_node.check_perms(2):
raise IOError(errno.EPERM, "Permission denied.")
doc_obj = self.context._dirobj.pool.get('ir.attachment')
if not fil_obj:
dbro = doc_obj.browse(cr, self.context.uid, self.file_id, context=self.context.context)
else:
dbro = fil_obj
assert dbro.id == self.file_id, "%s != %s for %r." % (dbro.id, self.file_id, self)
if not dbro:
raise IndexError("Cannot locate doc %d.", self.file_id)
if (not self.parent):
# there *must* be a parent node for this one
self.parent = self.context.get_dir_node(cr, dbro.parent_id)
assert self.parent
ret = {}
if ndir_node and self.parent != ndir_node:
if not (isinstance(self.parent, node_dir) and isinstance(ndir_node, node_dir)):
_logger.debug('Cannot move file %r from %r to %r.', self, self.parent, ndir_node)
raise NotImplementedError('Cannot move files between dynamic folders.')
if not ndir_obj:
ndir_obj = self.context._dirobj.browse(cr, self.context.uid, \
ndir_node.dir_id, context=self.context.context)
assert ndir_obj.id == ndir_node.dir_id
r2 = { 'parent_id': ndir_obj.id }
ret.update(r2)
if new_name and (new_name != dbro.name):
if len(ret):
raise NotImplementedError("Cannot rename and move.") # TODO
r2 = { 'name': new_name, 'datas_fname': new_name }
ret.update(r2)
del dbro
if not in_write:
# We have to update the data ourselves
if ret:
ctx = self.context.context.copy()
ctx['__from_node'] = True
doc_obj.write(cr, self.context.uid, [self.file_id,], ret, ctx )
ret = True
return ret
class node_content(node_class):
our_type = 'content'
def __init__(self, path, parent, context, cnt, dctx=None, act_id=None):
super(node_content,self).__init__(path, parent,context)
self.cnt_id = cnt.id
self.create_date = False
self.write_date = False
self.content_length = False
self.unixperms = 0640
if parent:
self.uidperms = parent.uidperms & 14
self.uuser = parent.uuser
self.ugroup = parent.ugroup
self.extension = cnt.extension
self.report_id = cnt.report_id and cnt.report_id.id
#self.mimetype = cnt.extension.
self.displayname = path
if dctx:
self.dctx.update(dctx)
self.act_id = act_id
def fill_fields(self, cr, dctx=None):
""" Try to read the object and fill missing fields, like mimetype,
dates etc.
This function must be different from the constructor, because
it uses the db cursor.
"""
cr.execute('SELECT DISTINCT mimetype FROM document_directory_content_type WHERE active AND code = %s;',
(self.extension,))
res = cr.fetchall()
if res and res[0][0]:
self.mimetype = str(res[0][0])
def get_data(self, cr, fil_obj=None):
cntobj = self.context._dirobj.pool.get('document.directory.content')
if not self.check_perms(4):
raise IOError(errno.EPERM, "Permission denied.")
ctx = self.context.context.copy()
ctx.update(self.dctx)
data = cntobj.process_read(cr, self.context.uid, self, ctx)
if data:
self.content_length = len(data)
return data
def open_data(self, cr, mode):
if mode.endswith('b'):
mode = mode[:-1]
if mode in ('r', 'w'):
cperms = mode[:1]
elif mode in ('r+', 'w+'):
cperms = 'rw'
else:
raise IOError(errno.EINVAL, "Cannot open at mode %s." % mode)
if not self.check_perms(cperms):
raise IOError(errno.EPERM, "Permission denied.")
ctx = self.context.context.copy()
ctx.update(self.dctx)
return nodefd_content(self, cr, mode, ctx)
def get_data_len(self, cr, fil_obj=None):
# FIXME : here, we actually generate the content twice!!
# we should have cached the generated content, but it is
# not advisable to do keep it in memory, until we have a cache
# expiration logic.
if not self.content_length:
self.get_data(cr,fil_obj)
return self.content_length
def set_data(self, cr, data, fil_obj=None):
cntobj = self.context._dirobj.pool.get('document.directory.content')
if not self.check_perms(2):
raise IOError(errno.EPERM, "Permission denied.")
ctx = self.context.context.copy()
ctx.update(self.dctx)
return cntobj.process_write(cr, self.context.uid, self, data, ctx)
def _get_ttag(self, cr):
return 'cnt-%d%s' % (self.cnt_id,(self.act_id and ('-' + str(self.act_id))) or '')
def get_dav_resourcetype(self, cr):
return ''
class node_descriptor(object):
"""A file-like interface to the data contents of a node.
This class is NOT a node, but an /open descriptor/ for some
node. It can hold references to a cursor or a file object,
because the life of a node_descriptor will be the open period
of the data.
It should also take care of locking, with any native mechanism
or using the db.
For the implementation, it would be OK just to wrap around file,
StringIO or similar class. The node_descriptor is only needed to
provide the link to the parent /node/ object.
"""
def __init__(self, parent):
assert isinstance(parent, node_class)
self.name = parent.displayname
self.__parent = parent
def _get_parent(self):
return self.__parent
def open(self, **kwargs):
raise NotImplementedError
def close(self):
raise NotImplementedError
def read(self, size=None):
raise NotImplementedError
def seek(self, offset, whence=None):
raise NotImplementedError
def tell(self):
raise NotImplementedError
def write(self, str):
raise NotImplementedError
def size(self):
raise NotImplementedError
def __len__(self):
return self.size()
def __nonzero__(self):
""" Ensure that a node_descriptor will never equal False
Since we do define __len__ and __iter__ for us, we must avoid
being regarded as non-true objects.
"""
return True
def next(self, str):
raise NotImplementedError
class nodefd_content(StringIO, node_descriptor):
""" A descriptor to content nodes
"""
def __init__(self, parent, cr, mode, ctx):
node_descriptor.__init__(self, parent)
self._context=ctx
self._size = 0L
if mode in ('r', 'r+'):
cntobj = parent.context._dirobj.pool.get('document.directory.content')
data = cntobj.process_read(cr, parent.context.uid, parent, ctx)
if data:
self._size = len(data)
parent.content_length = len(data)
StringIO.__init__(self, data)
elif mode in ('w', 'w+'):
StringIO.__init__(self, None)
# at write, we start at 0 (= overwrite), but have the original
# data available, in case of a seek()
elif mode == 'a':
StringIO.__init__(self, None)
else:
_logger.error("Incorrect mode %s is specified.", mode)
raise IOError(errno.EINVAL, "Invalid file mode.")
self.mode = mode
def size(self):
return self._size
def close(self):
# we now open a *separate* cursor, to update the data.
# FIXME: this may be improved, for concurrency handling
if self.mode == 'r':
StringIO.close(self)
return
par = self._get_parent()
uid = par.context.uid
cr = openerp.registry(par.context.dbname).cursor()
try:
if self.mode in ('w', 'w+', 'r+'):
data = self.getvalue()
cntobj = par.context._dirobj.pool.get('document.directory.content')
cntobj.process_write(cr, uid, par, data, par.context.context)
elif self.mode == 'a':
raise NotImplementedError
cr.commit()
except Exception:
_logger.exception('Cannot update db content #%d for close.', par.cnt_id)
raise
finally:
cr.close()
StringIO.close(self)
class nodefd_static(StringIO, node_descriptor):
""" A descriptor to nodes with static data.
"""
def __init__(self, parent, cr, mode, ctx=None):
node_descriptor.__init__(self, parent)
self._context=ctx
self._size = 0L
if mode in ('r', 'r+'):
data = parent.get_data(cr)
if data:
self._size = len(data)
parent.content_length = len(data)
StringIO.__init__(self, data)
elif mode in ('w', 'w+'):
StringIO.__init__(self, None)
# at write, we start at 0 (= overwrite), but have the original
# data available, in case of a seek()
elif mode == 'a':
StringIO.__init__(self, None)
else:
_logger.error("Incorrect mode %s is specified.", mode)
raise IOError(errno.EINVAL, "Invalid file mode.")
self.mode = mode
def size(self):
return self._size
def close(self):
# we now open a *separate* cursor, to update the data.
# FIXME: this may be improved, for concurrency handling
if self.mode == 'r':
StringIO.close(self)
return
par = self._get_parent()
# uid = par.context.uid
cr = openerp.registry(par.context.dbname).cursor()
try:
if self.mode in ('w', 'w+', 'r+'):
data = self.getvalue()
par.set_data(cr, data)
elif self.mode == 'a':
raise NotImplementedError
cr.commit()
except Exception:
_logger.exception('Cannot update db content #%d for close.', par.cnt_id)
raise
finally:
cr.close()
StringIO.close(self)
class nodefd_db(StringIO, node_descriptor):
""" A descriptor to db data
"""
def __init__(self, parent, ira_browse, mode):
node_descriptor.__init__(self, parent)
self._size = 0L
if mode.endswith('b'):
mode = mode[:-1]
if mode in ('r', 'r+'):
data = ira_browse.datas
if data:
data = data.decode('base64')
self._size = len(data)
StringIO.__init__(self, data)
elif mode in ('w', 'w+'):
StringIO.__init__(self, None)
# at write, we start at 0 (= overwrite), but have the original
# data available, in case of a seek()
elif mode == 'a':
StringIO.__init__(self, None)
else:
_logger.error("Incorrect mode %s is specified.", mode)
raise IOError(errno.EINVAL, "Invalid file mode.")
self.mode = mode
def size(self):
return self._size
def close(self):
# we now open a *separate* cursor, to update the data.
# FIXME: this may be improved, for concurrency handling
par = self._get_parent()
# uid = par.context.uid
registry = openerp.modules.registry.RegistryManager.get(par.context.dbname)
with registry.cursor() as cr:
data = self.getvalue().encode('base64')
if self.mode in ('w', 'w+', 'r+'):
registry.get('ir.attachment').write(cr, 1, par.file_id, {'datas': data})
cr.commit()
StringIO.close(self)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
rwl/PyCIM
|
CIM15/CDPSM/Connectivity/IEC61970/Wires/__init__.py
|
1
|
3466
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""An extension to the Core and Topology package that models information on the electrical characteristics of Transmission and Distribution networks. This package is used by network applications such as State Estimation, Load Flow and Optimal Power Flow.
"""
from CIM15.CDPSM.Connectivity.IEC61970.Wires.Fuse import Fuse
from CIM15.CDPSM.Connectivity.IEC61970.Wires.EnergyConsumer import EnergyConsumer
from CIM15.CDPSM.Connectivity.IEC61970.Wires.Switch import Switch
from CIM15.CDPSM.Connectivity.IEC61970.Wires.Disconnector import Disconnector
from CIM15.CDPSM.Connectivity.IEC61970.Wires.ACLineSegment import ACLineSegment
from CIM15.CDPSM.Connectivity.IEC61970.Wires.SynchronousMachine import SynchronousMachine
from CIM15.CDPSM.Connectivity.IEC61970.Wires.BusbarSection import BusbarSection
from CIM15.CDPSM.Connectivity.IEC61970.Wires.LoadBreakSwitch import LoadBreakSwitch
from CIM15.CDPSM.Connectivity.IEC61970.Wires.TransformerTank import TransformerTank
from CIM15.CDPSM.Connectivity.IEC61970.Wires.GroundDisconnector import GroundDisconnector
from CIM15.CDPSM.Connectivity.IEC61970.Wires.PowerTransformerEnd import PowerTransformerEnd
from CIM15.CDPSM.Connectivity.IEC61970.Wires.Junction import Junction
from CIM15.CDPSM.Connectivity.IEC61970.Wires.SeriesCompensator import SeriesCompensator
from CIM15.CDPSM.Connectivity.IEC61970.Wires.Breaker import Breaker
from CIM15.CDPSM.Connectivity.IEC61970.Wires.TransformerTankEnd import TransformerTankEnd
from CIM15.CDPSM.Connectivity.IEC61970.Wires.Sectionaliser import Sectionaliser
from CIM15.CDPSM.Connectivity.IEC61970.Wires.DCLineSegment import DCLineSegment
from CIM15.CDPSM.Connectivity.IEC61970.Wires.Line import Line
from CIM15.CDPSM.Connectivity.IEC61970.Wires.Conductor import Conductor
from CIM15.CDPSM.Connectivity.IEC61970.Wires.PowerTransformer import PowerTransformer
from CIM15.CDPSM.Connectivity.IEC61970.Wires.Ground import Ground
from CIM15.CDPSM.Connectivity.IEC61970.Wires.TransformerEnd import TransformerEnd
from CIM15.CDPSM.Connectivity.IEC61970.Wires.ShuntCompensator import ShuntCompensator
from CIM15.CDPSM.Connectivity.IEC61970.Wires.EnergySource import EnergySource
from CIM15.CDPSM.Connectivity.IEC61970.Wires.Jumper import Jumper
nsURI = "http://iec.ch/TC57/2010/CIM-schema-cim15?profile=http://iec.ch/TC57/2011/iec61968-13/CDPSM/Connectivity#Wires"
nsPrefix = "cimWires"
|
mit
|
alexproca/askbot-devel
|
askbot/migrations/0099_auto__del_field_thread_accepted_answer__del_field_anonymousanswer_ques.py
|
10
|
33856
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'PostRevision', fields ['question', 'revision']
db.delete_unique('askbot_postrevision', ['question_id', 'revision'])
# Removing unique constraint on 'PostRevision', fields ['answer', 'revision']
db.delete_unique('askbot_postrevision', ['answer_id', 'revision'])
# Deleting field 'Thread.accepted_answer'
db.delete_column('askbot_thread', 'accepted_answer_id')
# Deleting field 'AnonymousAnswer.question'
db.delete_column('askbot_anonymousanswer', 'question_id')
# Changing field 'AnonymousAnswer.question_post'
db.alter_column('askbot_anonymousanswer', 'question_post_id', self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['askbot.Post']))
# Deleting field 'QuestionView.question'
db.delete_column('askbot_questionview', 'question_id')
# Changing field 'QuestionView.question_post'
db.alter_column('askbot_questionview', 'question_post_id', self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['askbot.Post']))
# Deleting field 'PostRevision.question'
db.delete_column('askbot_postrevision', 'question_id')
# Deleting field 'PostRevision.answer'
db.delete_column('askbot_postrevision', 'answer_id')
# Adding unique constraint on 'PostRevision', fields ['post', 'revision']
db.create_unique('askbot_postrevision', ['post_id', 'revision'])
def backwards(self, orm):
# Removing unique constraint on 'PostRevision', fields ['post', 'revision']
db.delete_unique('askbot_postrevision', ['post_id', 'revision'])
# Adding field 'Thread.accepted_answer'
db.add_column('askbot_thread', 'accepted_answer', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['askbot.Answer'], null=True, blank=True), keep_default=False)
# User chose to not deal with backwards NULL issues for 'AnonymousAnswer.question'
raise RuntimeError("Cannot reverse this migration. 'AnonymousAnswer.question' and its values cannot be restored.")
# Changing field 'AnonymousAnswer.question_post'
db.alter_column('askbot_anonymousanswer', 'question_post_id', self.gf('django.db.models.fields.related.ForeignKey')(null=True, to=orm['askbot.Post']))
# User chose to not deal with backwards NULL issues for 'QuestionView.question'
raise RuntimeError("Cannot reverse this migration. 'QuestionView.question' and its values cannot be restored.")
# Changing field 'QuestionView.question_post'
db.alter_column('askbot_questionview', 'question_post_id', self.gf('django.db.models.fields.related.ForeignKey')(null=True, to=orm['askbot.Post']))
# Adding field 'PostRevision.question'
db.add_column('askbot_postrevision', 'question', self.gf('django.db.models.fields.related.ForeignKey')(related_name='revisions', null=True, to=orm['askbot.Question'], blank=True), keep_default=False)
# Adding field 'PostRevision.answer'
db.add_column('askbot_postrevision', 'answer', self.gf('django.db.models.fields.related.ForeignKey')(related_name='revisions', null=True, to=orm['askbot.Answer'], blank=True), keep_default=False)
# Adding unique constraint on 'PostRevision', fields ['answer', 'revision']
db.create_unique('askbot_postrevision', ['answer_id', 'revision'])
# Adding unique constraint on 'PostRevision', fields ['question', 'revision']
db.create_unique('askbot_postrevision', ['question_id', 'revision'])
models = {
'askbot.activity': {
'Meta': {'object_name': 'Activity', 'db_table': "u'activity'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'activity_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_auditted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'question_post': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Post']", 'null': 'True'}),
'receiving_users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'received_activity'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'recipients': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'incoming_activity'", 'symmetrical': 'False', 'through': "orm['askbot.ActivityAuditStatus']", 'to': "orm['auth.User']"}),
'summary': ('django.db.models.fields.TextField', [], {'default': "''"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.activityauditstatus': {
'Meta': {'unique_together': "(('user', 'activity'),)", 'object_name': 'ActivityAuditStatus'},
'activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Activity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.anonymousanswer': {
'Meta': {'object_name': 'AnonymousAnswer'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'question_post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'anonymous_answers'", 'to': "orm['askbot.Post']"}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.anonymousquestion': {
'Meta': {'object_name': 'AnonymousQuestion'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.answer': {
'Meta': {'object_name': 'Answer', 'db_table': "u'answer'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['askbot.Question']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.award': {
'Meta': {'object_name': 'Award', 'db_table': "u'award'"},
'awarded_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'badge': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_badge'", 'to': "orm['askbot.BadgeData']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_user'", 'to': "orm['auth.User']"})
},
'askbot.badgedata': {
'Meta': {'ordering': "('slug',)", 'object_name': 'BadgeData'},
'awarded_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'awarded_to': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'badges'", 'symmetrical': 'False', 'through': "orm['askbot.Award']", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'askbot.comment': {
'Meta': {'ordering': "('-added_at',)", 'object_name': 'Comment', 'db_table': "u'comment'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'html': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '2048'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'offensive_flag_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['auth.User']"})
},
'askbot.emailfeedsetting': {
'Meta': {'object_name': 'EmailFeedSetting'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feed_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'frequency': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reported_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'subscriber': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notification_subscriptions'", 'to': "orm['auth.User']"})
},
'askbot.favoritequestion': {
'Meta': {'object_name': 'FavoriteQuestion', 'db_table': "u'favorite_question'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Thread']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_favorite_questions'", 'to': "orm['auth.User']"})
},
'askbot.markedtag': {
'Meta': {'object_name': 'MarkedTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_selections'", 'to': "orm['askbot.Tag']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tag_selections'", 'to': "orm['auth.User']"})
},
'askbot.post': {
'Meta': {'object_name': 'Post'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'posts'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_posts'", 'null': 'True', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_posts'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_posts'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comment_posts'", 'null': 'True', 'to': "orm['askbot.Post']"}),
'post_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'self_answer': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['askbot.Answer']"}),
'self_comment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['askbot.Comment']"}),
'self_question': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['askbot.Question']"}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'posts'", 'to': "orm['askbot.Thread']"}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
# "Post-processing" - added manually to add support for URL mapping
'old_question_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': True, 'blank': True, 'default': None, 'unique': 'True'}),
'old_answer_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': True, 'blank': True, 'default': None, 'unique': 'True'}),
'old_comment_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': True, 'blank': True, 'default': None, 'unique': 'True'}),
},
'askbot.postrevision': {
'Meta': {'ordering': "('-revision',)", 'unique_together': "(('post', 'revision'),)", 'object_name': 'PostRevision'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'postrevisions'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'revisions'", 'null': 'True', 'to': "orm['askbot.Post']"}),
'revised_at': ('django.db.models.fields.DateTimeField', [], {}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {}),
'revision_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'tagnames': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '125', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '300', 'blank': 'True'})
},
'askbot.question': {
'Meta': {'object_name': 'Question', 'db_table': "u'question'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questions'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questions'", 'unique': 'True', 'to': "orm['askbot.Thread']"}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.questionview': {
'Meta': {'object_name': 'QuestionView'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question_post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'viewed'", 'to': "orm['askbot.Post']"}),
'when': ('django.db.models.fields.DateTimeField', [], {}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'question_views'", 'to': "orm['auth.User']"})
},
'askbot.repute': {
'Meta': {'object_name': 'Repute', 'db_table': "u'repute'"},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'negative': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'positive': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'question_post': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Post']", 'null': 'True', 'blank': 'True'}),
'reputation': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'reputation_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'reputed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.tag': {
'Meta': {'ordering': "('-used_count', 'name')", 'object_name': 'Tag', 'db_table': "u'tag'"},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_tags'", 'to': "orm['auth.User']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_tags'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'used_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'askbot.thread': {
'Meta': {'object_name': 'Thread'},
'accepted_answer_post': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['askbot.Post']"}),
'answer_accepted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'answer_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'close_reason': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'closed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'closed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'favorited_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'unused_favorite_threads'", 'symmetrical': 'False', 'through': "orm['askbot.FavoriteQuestion']", 'to': "orm['auth.User']"}),
'favourite_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'followed_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'followed_threads'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_activity_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_activity_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'unused_last_active_in_threads'", 'to': "orm['auth.User']"}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'threads'", 'symmetrical': 'False', 'to': "orm['askbot.Tag']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'askbot.vote': {
'Meta': {'unique_together': "(('user', 'voted_post'),)", 'object_name': 'Vote', 'db_table': "u'vote'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': "orm['auth.User']"}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {}),
'voted_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'voted_post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'post_votes'", 'to': "orm['askbot.Post']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['askbot']
|
gpl-3.0
|
w1ll1am23/home-assistant
|
tests/components/ring/test_sensor.py
|
23
|
1636
|
"""The tests for the Ring sensor platform."""
from .common import setup_platform
WIFI_ENABLED = False
async def test_sensor(hass, requests_mock):
"""Test the Ring sensors."""
await setup_platform(hass, "sensor")
front_battery_state = hass.states.get("sensor.front_battery")
assert front_battery_state is not None
assert front_battery_state.state == "80"
front_door_battery_state = hass.states.get("sensor.front_door_battery")
assert front_door_battery_state is not None
assert front_door_battery_state.state == "100"
downstairs_volume_state = hass.states.get("sensor.downstairs_volume")
assert downstairs_volume_state is not None
assert downstairs_volume_state.state == "2"
front_door_last_activity_state = hass.states.get("sensor.front_door_last_activity")
assert front_door_last_activity_state is not None
downstairs_wifi_signal_strength_state = hass.states.get(
"sensor.downstairs_wifi_signal_strength"
)
if not WIFI_ENABLED:
return
assert downstairs_wifi_signal_strength_state is not None
assert downstairs_wifi_signal_strength_state.state == "-39"
front_door_wifi_signal_category_state = hass.states.get(
"sensor.front_door_wifi_signal_category"
)
assert front_door_wifi_signal_category_state is not None
assert front_door_wifi_signal_category_state.state == "good"
front_door_wifi_signal_strength_state = hass.states.get(
"sensor.front_door_wifi_signal_strength"
)
assert front_door_wifi_signal_strength_state is not None
assert front_door_wifi_signal_strength_state.state == "-58"
|
apache-2.0
|
coolbombom/CouchPotato
|
library/sqlalchemy/orm/__init__.py
|
11
|
52904
|
# sqlalchemy/orm/__init__.py
# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010 Michael Bayer
# [email protected]
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Functional constructs for ORM configuration.
See the SQLAlchemy object relational tutorial and mapper configuration
documentation for an overview of how this module is used.
"""
from sqlalchemy.orm import exc
from sqlalchemy.orm.mapper import (
Mapper,
_mapper_registry,
class_mapper,
)
from sqlalchemy.orm.interfaces import (
EXT_CONTINUE,
EXT_STOP,
ExtensionOption,
InstrumentationManager,
MapperExtension,
PropComparator,
SessionExtension,
AttributeExtension,
)
from sqlalchemy.orm.util import (
AliasedClass as aliased,
Validator,
join,
object_mapper,
outerjoin,
polymorphic_union,
with_parent,
)
from sqlalchemy.orm.properties import (
ColumnProperty,
ComparableProperty,
CompositeProperty,
RelationshipProperty,
PropertyLoader,
SynonymProperty,
)
from sqlalchemy.orm import mapper as mapperlib
from sqlalchemy.orm.mapper import reconstructor, validates
from sqlalchemy.orm import strategies
from sqlalchemy.orm.query import AliasOption, Query
from sqlalchemy.sql import util as sql_util
from sqlalchemy.orm.session import Session
from sqlalchemy.orm.session import object_session, sessionmaker, \
make_transient
from sqlalchemy.orm.scoping import ScopedSession
from sqlalchemy import util as sa_util
__all__ = (
'EXT_CONTINUE',
'EXT_STOP',
'InstrumentationManager',
'MapperExtension',
'AttributeExtension',
'Validator',
'PropComparator',
'Query',
'Session',
'aliased',
'backref',
'class_mapper',
'clear_mappers',
'column_property',
'comparable_property',
'compile_mappers',
'composite',
'contains_alias',
'contains_eager',
'create_session',
'defer',
'deferred',
'dynamic_loader',
'eagerload',
'eagerload_all',
'extension',
'immediateload',
'join',
'joinedload',
'joinedload_all',
'lazyload',
'mapper',
'make_transient',
'noload',
'object_mapper',
'object_session',
'outerjoin',
'polymorphic_union',
'reconstructor',
'relationship',
'relation',
'scoped_session',
'sessionmaker',
'subqueryload',
'subqueryload_all',
'synonym',
'undefer',
'undefer_group',
'validates'
)
def scoped_session(session_factory, scopefunc=None):
"""Provides thread-local or scoped management of :class:`.Session` objects.
This is a front-end function to
:class:`.ScopedSession`.
:param session_factory: a callable function that produces
:class:`Session` instances, such as :func:`sessionmaker`.
:param scopefunc: Optional "scope" function which would be
passed to the :class:`.ScopedRegistry`. If None, the
:class:`.ThreadLocalRegistry` is used by default.
:returns: an :class:`.ScopedSession` instance
Usage::
Session = scoped_session(sessionmaker(autoflush=True))
To instantiate a Session object which is part of the scoped context,
instantiate normally::
session = Session()
Most session methods are available as classmethods from the scoped
session::
Session.commit()
Session.close()
"""
return ScopedSession(session_factory, scopefunc=scopefunc)
def create_session(bind=None, **kwargs):
"""Create a new :class:`.Session`
with no automation enabled by default.
This function is used primarily for testing. The usual
route to :class:`.Session` creation is via its constructor
or the :func:`.sessionmaker` function.
:param bind: optional, a single Connectable to use for all
database access in the created
:class:`~sqlalchemy.orm.session.Session`.
:param \*\*kwargs: optional, passed through to the
:class:`Session` constructor.
:returns: an :class:`~sqlalchemy.orm.session.Session` instance
The defaults of create_session() are the opposite of that of
:func:`sessionmaker`; ``autoflush`` and ``expire_on_commit`` are
False, ``autocommit`` is True. In this sense the session acts
more like the "classic" SQLAlchemy 0.3 session with these.
Usage::
>>> from sqlalchemy.orm import create_session
>>> session = create_session()
It is recommended to use :func:`sessionmaker` instead of
create_session().
"""
kwargs.setdefault('autoflush', False)
kwargs.setdefault('autocommit', True)
kwargs.setdefault('expire_on_commit', False)
return Session(bind=bind, **kwargs)
def relationship(argument, secondary=None, **kwargs):
"""Provide a relationship of a primary Mapper to a secondary Mapper.
.. note:: :func:`relationship` is historically known as
:func:`relation` prior to version 0.6.
This corresponds to a parent-child or associative table relationship. The
constructed class is an instance of :class:`RelationshipProperty`.
A typical :func:`relationship`::
mapper(Parent, properties={
'children': relationship(Children)
})
:param argument:
a class or :class:`Mapper` instance, representing the target of
the relationship.
:param secondary:
for a many-to-many relationship, specifies the intermediary
table. The *secondary* keyword argument should generally only
be used for a table that is not otherwise expressed in any class
mapping. In particular, using the Association Object Pattern is
generally mutually exclusive with the use of the *secondary*
keyword argument.
:param backref:
indicates the string name of a property to be placed on the related
mapper's class that will handle this relationship in the other
direction. The other property will be created automatically
when the mappers are configured. Can also be passed as a
:func:`backref` object to control the configuration of the
new relationship.
:param back_populates:
Takes a string name and has the same meaning as ``backref``,
except the complementing property is **not** created automatically,
and instead must be configured explicitly on the other mapper. The
complementing property should also indicate ``back_populates``
to this relationship to ensure proper functioning.
:param cascade:
a comma-separated list of cascade rules which determines how
Session operations should be "cascaded" from parent to child.
This defaults to ``False``, which means the default cascade
should be used. The default value is ``"save-update, merge"``.
Available cascades are:
* ``save-update`` - cascade the :meth:`.Session.add`
operation. This cascade applies both to future and
past calls to :meth:`~sqlalchemy.orm.session.Session.add`,
meaning new items added to a collection or scalar relationship
get placed into the same session as that of the parent, and
also applies to items which have been removed from this
relationship but are still part of unflushed history.
* ``merge`` - cascade the :meth:`~sqlalchemy.orm.session.Session.merge`
operation
* ``expunge`` - cascade the :meth:`.Session.expunge`
operation
* ``delete`` - cascade the :meth:`.Session.delete`
operation
* ``delete-orphan`` - if an item of the child's type with no
parent is detected, mark it for deletion. Note that this
option prevents a pending item of the child's class from being
persisted without a parent present.
* ``refresh-expire`` - cascade the :meth:`.Session.expire`
and :meth:`~sqlalchemy.orm.session.Session.refresh` operations
* ``all`` - shorthand for "save-update,merge, refresh-expire,
expunge, delete"
:param cascade_backrefs=True:
a boolean value indicating if the ``save-update`` cascade should
operate along a backref event. When set to ``False`` on a
one-to-many relationship that has a many-to-one backref, assigning
a persistent object to the many-to-one attribute on a transient object
will not add the transient to the session. Similarly, when
set to ``False`` on a many-to-one relationship that has a one-to-many
backref, appending a persistent object to the one-to-many collection
on a transient object will not add the transient to the session.
``cascade_backrefs`` is new in 0.6.5.
:param collection_class:
a class or callable that returns a new list-holding object. will
be used in place of a plain list for storing elements.
Behavior of this attribute is described in detail at
:ref:`custom_collections`.
:param comparator_factory:
a class which extends :class:`RelationshipProperty.Comparator` which
provides custom SQL clause generation for comparison operations.
:param doc:
docstring which will be applied to the resulting descriptor.
:param extension:
an :class:`AttributeExtension` instance, or list of extensions,
which will be prepended to the list of attribute listeners for
the resulting descriptor placed on the class. These listeners
will receive append and set events before the operation
proceeds, and may be used to halt (via exception throw) or
change the value used in the operation.
:param foreign_keys:
a list of columns which are to be used as "foreign key" columns.
Normally, :func:`relationship` uses the :class:`.ForeignKey`
and :class:`.ForeignKeyConstraint` objects present within the
mapped or secondary :class:`.Table` to determine the "foreign" side of
the join condition. This is used to construct SQL clauses in order
to load objects, as well as to "synchronize" values from
primary key columns to referencing foreign key columns.
The ``foreign_keys`` parameter overrides the notion of what's
"foreign" in the table metadata, allowing the specification
of a list of :class:`.Column` objects that should be considered
part of the foreign key.
There are only two use cases for ``foreign_keys`` - one, when it is not
convenient for :class:`.Table` metadata to contain its own foreign key
metadata (which should be almost never, unless reflecting a large amount of
tables from a MySQL MyISAM schema, or a schema that doesn't actually
have foreign keys on it). The other is for extremely
rare and exotic composite foreign key setups where some columns
should artificially not be considered as foreign.
:param innerjoin=False:
when ``True``, joined eager loads will use an inner join to join
against related tables instead of an outer join. The purpose
of this option is strictly one of performance, as inner joins
generally perform better than outer joins. This flag can
be set to ``True`` when the relationship references an object
via many-to-one using local foreign keys that are not nullable,
or when the reference is one-to-one or a collection that is
guaranteed to have one or at least one entry.
:param join_depth:
when non-``None``, an integer value indicating how many levels
deep "eager" loaders should join on a self-referring or cyclical
relationship. The number counts how many times the same Mapper
shall be present in the loading condition along a particular join
branch. When left at its default of ``None``, eager loaders
will stop chaining when they encounter a the same target mapper
which is already higher up in the chain. This option applies
both to joined- and subquery- eager loaders.
:param lazy='select': specifies
how the related items should be loaded. Default value is
``select``. Values include:
* ``select`` - items should be loaded lazily when the property is first
accessed, using a separate SELECT statement, or identity map
fetch for simple many-to-one references.
* ``immediate`` - items should be loaded as the parents are loaded,
using a separate SELECT statement, or identity map fetch for
simple many-to-one references. (new as of 0.6.5)
* ``joined`` - items should be loaded "eagerly" in the same query as
that of the parent, using a JOIN or LEFT OUTER JOIN. Whether
the join is "outer" or not is determined by the ``innerjoin``
parameter.
* ``subquery`` - items should be loaded "eagerly" within the same
query as that of the parent, using a second SQL statement
which issues a JOIN to a subquery of the original
statement.
* ``noload`` - no loading should occur at any time. This is to
support "write-only" attributes, or attributes which are
populated in some manner specific to the application.
* ``dynamic`` - the attribute will return a pre-configured
:class:`~sqlalchemy.orm.query.Query` object for all read
operations, onto which further filtering operations can be
applied before iterating the results. The dynamic
collection supports a limited set of mutation operations,
allowing ``append()`` and ``remove()``. Changes to the
collection will not be visible until flushed
to the database, where it is then refetched upon iteration.
* True - a synonym for 'select'
* False - a synonyn for 'joined'
* None - a synonym for 'noload'
Detailed discussion of loader strategies is at :ref:`loading_toplevel`.
:param load_on_pending=False:
Indicates loading behavior for transient or pending parent objects.
When set to ``True``, causes the lazy-loader to
issue a query for a parent object that is not persistent, meaning it has
never been flushed. This may take effect for a pending object when
autoflush is disabled, or for a transient object that has been
"attached" to a :class:`.Session` but is not part of its pending
collection. Attachment of transient objects to the session without
moving to the "pending" state is not a supported behavior at this time.
Note that the load of related objects on a pending or transient object
also does not trigger any attribute change events - no user-defined
events will be emitted for these attributes, and if and when the
object is ultimately flushed, only the user-specific foreign key
attributes will be part of the modified state.
The load_on_pending flag does not improve behavior
when the ORM is used normally - object references should be constructed
at the object level, not at the foreign key level, so that they
are present in an ordinary way before flush() proceeds. This flag
is not not intended for general use.
New in 0.6.5.
:param order_by:
indicates the ordering that should be applied when loading these
items.
:param passive_deletes=False:
Indicates loading behavior during delete operations.
A value of True indicates that unloaded child items should not
be loaded during a delete operation on the parent. Normally,
when a parent item is deleted, all child items are loaded so
that they can either be marked as deleted, or have their
foreign key to the parent set to NULL. Marking this flag as
True usually implies an ON DELETE <CASCADE|SET NULL> rule is in
place which will handle updating/deleting child rows on the
database side.
Additionally, setting the flag to the string value 'all' will
disable the "nulling out" of the child foreign keys, when there
is no delete or delete-orphan cascade enabled. This is
typically used when a triggering or error raise scenario is in
place on the database side. Note that the foreign key
attributes on in-session child objects will not be changed
after a flush occurs so this is a very special use-case
setting.
:param passive_updates=True:
Indicates loading and INSERT/UPDATE/DELETE behavior when the
source of a foreign key value changes (i.e. an "on update"
cascade), which are typically the primary key columns of the
source row.
When True, it is assumed that ON UPDATE CASCADE is configured on
the foreign key in the database, and that the database will
handle propagation of an UPDATE from a source column to
dependent rows. Note that with databases which enforce
referential integrity (i.e. PostgreSQL, MySQL with InnoDB tables),
ON UPDATE CASCADE is required for this operation. The
relationship() will update the value of the attribute on related
items which are locally present in the session during a flush.
When False, it is assumed that the database does not enforce
referential integrity and will not be issuing its own CASCADE
operation for an update. The relationship() will issue the
appropriate UPDATE statements to the database in response to the
change of a referenced key, and items locally present in the
session during a flush will also be refreshed.
This flag should probably be set to False if primary key changes
are expected and the database in use doesn't support CASCADE
(i.e. SQLite, MySQL MyISAM tables).
Also see the passive_updates flag on ``mapper()``.
A future SQLAlchemy release will provide a "detect" feature for
this flag.
:param post_update:
this indicates that the relationship should be handled by a
second UPDATE statement after an INSERT or before a
DELETE. Currently, it also will issue an UPDATE after the
instance was UPDATEd as well, although this technically should
be improved. This flag is used to handle saving bi-directional
dependencies between two individual rows (i.e. each row
references the other), where it would otherwise be impossible to
INSERT or DELETE both rows fully since one row exists before the
other. Use this flag when a particular mapping arrangement will
incur two rows that are dependent on each other, such as a table
that has a one-to-many relationship to a set of child rows, and
also has a column that references a single child row within that
list (i.e. both tables contain a foreign key to each other). If
a ``flush()`` operation returns an error that a "cyclical
dependency" was detected, this is a cue that you might want to
use ``post_update`` to "break" the cycle.
:param primaryjoin:
a ColumnElement (i.e. WHERE criterion) that will be used as the primary
join of this child object against the parent object, or in a
many-to-many relationship the join of the primary object to the
association table. By default, this value is computed based on the
foreign key relationships of the parent and child tables (or association
table).
:param remote_side:
used for self-referential relationships, indicates the column or
list of columns that form the "remote side" of the relationship.
:param secondaryjoin:
a ColumnElement (i.e. WHERE criterion) that will be used as the join of
an association table to the child object. By default, this value is
computed based on the foreign key relationships of the association and
child tables.
:param single_parent=(True|False):
when True, installs a validator which will prevent objects
from being associated with more than one parent at a time.
This is used for many-to-one or many-to-many relationships that
should be treated either as one-to-one or one-to-many. Its
usage is optional unless delete-orphan cascade is also
set on this relationship(), in which case its required (new in 0.5.2).
:param uselist=(True|False):
a boolean that indicates if this property should be loaded as a
list or a scalar. In most cases, this value is determined
automatically by ``relationship()``, based on the type and direction
of the relationship - one to many forms a list, many to one
forms a scalar, many to many is a list. If a scalar is desired
where normally a list would be present, such as a bi-directional
one-to-one relationship, set uselist to False.
:param viewonly=False:
when set to True, the relationship is used only for loading objects
within the relationship, and has no effect on the unit-of-work
flush process. Relationships with viewonly can specify any kind of
join conditions to provide additional views of related objects
onto a parent object. Note that the functionality of a viewonly
relationship has its limits - complicated join conditions may
not compile into eager or lazy loaders properly. If this is the
case, use an alternative method.
"""
return RelationshipProperty(argument, secondary=secondary, **kwargs)
def relation(*arg, **kw):
"""A synonym for :func:`relationship`."""
return relationship(*arg, **kw)
def dynamic_loader(argument, secondary=None, primaryjoin=None,
secondaryjoin=None, foreign_keys=None, backref=None,
post_update=False, cascade=False, remote_side=None,
enable_typechecks=True, passive_deletes=False, doc=None,
order_by=None, comparator_factory=None, query_class=None):
"""Construct a dynamically-loading mapper property.
This property is similar to :func:`relationship`, except read
operations return an active :class:`Query` object which reads from
the database when accessed. Items may be appended to the
attribute via ``append()``, or removed via ``remove()``; changes
will be persisted to the database during a :meth:`Sesion.flush`.
However, no other Python list or collection mutation operations
are available.
A subset of arguments available to :func:`relationship` are available
here.
:param argument:
a class or :class:`Mapper` instance, representing the target of
the relationship.
:param secondary:
for a many-to-many relationship, specifies the intermediary
table. The *secondary* keyword argument should generally only
be used for a table that is not otherwise expressed in any class
mapping. In particular, using the Association Object Pattern is
generally mutually exclusive with the use of the *secondary*
keyword argument.
:param query_class:
Optional, a custom Query subclass to be used as the basis for
dynamic collection.
"""
from sqlalchemy.orm.dynamic import DynaLoader
return RelationshipProperty(
argument, secondary=secondary, primaryjoin=primaryjoin,
secondaryjoin=secondaryjoin, foreign_keys=foreign_keys,
backref=backref,
post_update=post_update, cascade=cascade, remote_side=remote_side,
enable_typechecks=enable_typechecks, passive_deletes=passive_deletes,
order_by=order_by, comparator_factory=comparator_factory,doc=doc,
strategy_class=DynaLoader, query_class=query_class)
def column_property(*args, **kwargs):
"""Provide a column-level property for use with a Mapper.
Column-based properties can normally be applied to the mapper's
``properties`` dictionary using the ``schema.Column`` element directly.
Use this function when the given column is not directly present within the
mapper's selectable; examples include SQL expressions, functions, and
scalar SELECT queries.
Columns that aren't present in the mapper's selectable won't be persisted
by the mapper and are effectively "read-only" attributes.
:param \*cols:
list of Column objects to be mapped.
:param comparator_factory: a class which extends
:class:`.ColumnProperty.Comparator` which provides custom SQL clause
generation for comparison operations.
:param group:
a group name for this property when marked as deferred.
:param deferred:
when True, the column property is "deferred", meaning that
it does not load immediately, and is instead loaded when the
attribute is first accessed on an instance. See also
:func:`~sqlalchemy.orm.deferred`.
:param doc:
optional string that will be applied as the doc on the
class-bound descriptor.
:param extension:
an :class:`~sqlalchemy.orm.interfaces.AttributeExtension` instance,
or list of extensions, which will be prepended to the list of
attribute listeners for the resulting descriptor placed on the class.
These listeners will receive append and set events before the
operation proceeds, and may be used to halt (via exception throw)
or change the value used in the operation.
"""
return ColumnProperty(*args, **kwargs)
def composite(class_, *cols, **kwargs):
"""Return a composite column-based property for use with a Mapper.
See the mapping documention section :ref:`mapper_composite` for a full
usage example.
:param class\_:
The "composite type" class.
:param \*cols:
List of Column objects to be mapped.
:param group:
A group name for this property when marked as deferred.
:param deferred:
When True, the column property is "deferred", meaning that it does not
load immediately, and is instead loaded when the attribute is first
accessed on an instance. See also :func:`~sqlalchemy.orm.deferred`.
:param comparator_factory: a class which extends
:class:`.CompositeProperty.Comparator` which provides custom SQL clause
generation for comparison operations.
:param doc:
optional string that will be applied as the doc on the
class-bound descriptor.
:param extension:
an :class:`~sqlalchemy.orm.interfaces.AttributeExtension` instance,
or list of extensions, which will be prepended to the list of
attribute listeners for the resulting descriptor placed on the class.
These listeners will receive append and set events before the
operation proceeds, and may be used to halt (via exception throw)
or change the value used in the operation.
"""
return CompositeProperty(class_, *cols, **kwargs)
def backref(name, **kwargs):
"""Create a back reference with explicit arguments, which are the same
arguments one can send to :func:`relationship`.
Used with the `backref` keyword argument to :func:`relationship` in
place of a string argument.
"""
return (name, kwargs)
def deferred(*columns, **kwargs):
"""Return a :class:`DeferredColumnProperty`, which indicates this
object attributes should only be loaded from its corresponding
table column when first accessed.
Used with the `properties` dictionary sent to :func:`mapper`.
"""
return ColumnProperty(deferred=True, *columns, **kwargs)
def mapper(class_, local_table=None, *args, **params):
"""Return a new :class:`~.Mapper` object.
:param class\_: The class to be mapped.
:param local_table: The table to which the class is mapped, or None if
this mapper inherits from another mapper using concrete table
inheritance.
:param always_refresh: If True, all query operations for this mapped
class will overwrite all data within object instances that already
exist within the session, erasing any in-memory changes with
whatever information was loaded from the database. Usage of this
flag is highly discouraged; as an alternative, see the method
`populate_existing()` on :class:`~sqlalchemy.orm.query.Query`.
:param allow_null_pks: This flag is deprecated - this is stated as
allow_partial_pks which defaults to True.
:param allow_partial_pks: Defaults to True. Indicates that a
composite primary key with some NULL values should be considered as
possibly existing within the database. This affects whether a
mapper will assign an incoming row to an existing identity, as well
as if session.merge() will check the database first for a
particular primary key value. A "partial primary key" can occur if
one has mapped to an OUTER JOIN, for example.
:param batch: Indicates that save operations of multiple entities
can be batched together for efficiency. setting to False indicates
that an instance will be fully saved before saving the next
instance, which includes inserting/updating all table rows
corresponding to the entity as well as calling all
:class:`MapperExtension` methods corresponding to the save
operation.
:param column_prefix: A string which will be prepended to the `key`
name of all Columns when creating column-based properties from the
given Table. Does not affect explicitly specified column-based
properties
:param concrete: If True, indicates this mapper should use concrete
table inheritance with its parent mapper.
:param exclude_properties: A list or set of string column names to
be excluded from mapping. As of SQLAlchemy 0.6.4, this collection
may also include :class:`.Column` objects. Columns named or present
in this list will not be automatically mapped. Note that neither
this option nor include_properties will allow one to circumvent plan
Python inheritance - if mapped class ``B`` inherits from mapped
class ``A``, no combination of includes or excludes will allow ``B``
to have fewer properties than its superclass, ``A``.
:param extension: A :class:`.MapperExtension` instance or
list of :class:`~sqlalchemy.orm.interfaces.MapperExtension`
instances which will be applied to all operations by this
:class:`~sqlalchemy.orm.mapper.Mapper`.
:param include_properties: An inclusive list or set of string column
names to map. As of SQLAlchemy 0.6.4, this collection may also
include :class:`.Column` objects in order to disambiguate between
same-named columns in a selectable (such as a
:func:`~.expression.join()`). If this list is not ``None``, columns
present in the mapped table but not named or present in this list
will not be automatically mapped. See also "exclude_properties".
:param inherits: Another :class:`~sqlalchemy.orm.Mapper` for which
this :class:`~sqlalchemy.orm.Mapper` will have an inheritance
relationship with.
:param inherit_condition: For joined table inheritance, a SQL
expression (constructed
:class:`~sqlalchemy.expression.sql.ClauseElement`) which will
define how the two tables are joined; defaults to a natural join
between the two tables.
:param inherit_foreign_keys: When inherit_condition is used and the
condition contains no ForeignKey columns, specify the "foreign"
columns of the join condition in this list. else leave as None.
:param non_primary: Construct a :class:`Mapper` that will define only
the selection of instances, not their persistence. Any number of
non_primary mappers may be created for a particular class.
:param order_by: A single :class:`Column` or list of :class:`Column`
objects for which selection operations should use as the default
ordering for entities. Defaults to the OID/ROWID of the table if
any, or the first primary key column of the table.
:param passive_updates: Indicates UPDATE behavior of foreign keys
when a primary key changes on a joined-table inheritance or other
joined table mapping.
When True, it is assumed that ON UPDATE CASCADE is configured on
the foreign key in the database, and that the database will handle
propagation of an UPDATE from a source column to dependent rows.
Note that with databases which enforce referential integrity (i.e.
PostgreSQL, MySQL with InnoDB tables), ON UPDATE CASCADE is
required for this operation. The relationship() will update the
value of the attribute on related items which are locally present
in the session during a flush.
When False, it is assumed that the database does not enforce
referential integrity and will not be issuing its own CASCADE
operation for an update. The relationship() will issue the
appropriate UPDATE statements to the database in response to the
change of a referenced key, and items locally present in the
session during a flush will also be refreshed.
This flag should probably be set to False if primary key changes
are expected and the database in use doesn't support CASCADE (i.e.
SQLite, MySQL MyISAM tables).
Also see the passive_updates flag on :func:`relationship()`.
A future SQLAlchemy release will provide a "detect" feature for
this flag.
:param polymorphic_on: Used with mappers in an inheritance
relationship, a ``Column`` which will identify the class/mapper
combination to be used with a particular row. Requires the
``polymorphic_identity`` value to be set for all mappers in the
inheritance hierarchy. The column specified by ``polymorphic_on``
is usually a column that resides directly within the base mapper's
mapped table; alternatively, it may be a column that is only
present within the <selectable> portion of the ``with_polymorphic``
argument.
:param polymorphic_identity: A value which will be stored in the
Column denoted by polymorphic_on, corresponding to the *class
identity* of this mapper.
:param properties: A dictionary mapping the string names of object
attributes to ``MapperProperty`` instances, which define the
persistence behavior of that attribute. Note that the columns in
the mapped table are automatically converted into
``ColumnProperty`` instances based on the `key` property of each
``Column`` (although they can be overridden using this dictionary).
:param primary_key: A list of ``Column`` objects which define the
*primary key* to be used against this mapper's selectable unit.
This is normally simply the primary key of the `local_table`, but
can be overridden here.
:param version_id_col: A ``Column`` which must have an integer type
that will be used to keep a running *version id* of mapped entities
in the database. this is used during save operations to ensure that
no other thread or process has updated the instance during the
lifetime of the entity, else a :class:`StaleDataError` exception is
thrown.
:param version_id_generator: A callable which defines the algorithm
used to generate new version ids. Defaults to an integer
generator. Can be replaced with one that generates timestamps,
uuids, etc. e.g.::
import uuid
mapper(Cls, table,
version_id_col=table.c.version_uuid,
version_id_generator=lambda version:uuid.uuid4().hex
)
The callable receives the current version identifier as its
single argument.
:param with_polymorphic: A tuple in the form ``(<classes>,
<selectable>)`` indicating the default style of "polymorphic"
loading, that is, which tables are queried at once. <classes> is
any single or list of mappers and/or classes indicating the
inherited classes that should be loaded at once. The special value
``'*'`` may be used to indicate all descending classes should be
loaded immediately. The second tuple argument <selectable>
indicates a selectable that will be used to query for multiple
classes. Normally, it is left as None, in which case this mapper
will form an outer join from the base mapper's table to that of
all desired sub-mappers. When specified, it provides the
selectable to be used for polymorphic loading. When
with_polymorphic includes mappers which load from a "concrete"
inheriting table, the <selectable> argument is required, since it
usually requires more complex UNION queries.
"""
return Mapper(class_, local_table, *args, **params)
def synonym(name, map_column=False, descriptor=None,
comparator_factory=None, doc=None):
"""Set up `name` as a synonym to another mapped property.
Used with the ``properties`` dictionary sent to
:func:`~sqlalchemy.orm.mapper`.
Any existing attributes on the class which map the key name sent
to the ``properties`` dictionary will be used by the synonym to provide
instance-attribute behavior (that is, any Python property object, provided
by the ``property`` builtin or providing a ``__get__()``, ``__set__()``
and ``__del__()`` method). If no name exists for the key, the
``synonym()`` creates a default getter/setter object automatically and
applies it to the class.
`name` refers to the name of the existing mapped property, which can be
any other ``MapperProperty`` including column-based properties and
relationships.
If `map_column` is ``True``, an additional ``ColumnProperty`` is created
on the mapper automatically, using the synonym's name as the keyname of
the property, and the keyname of this ``synonym()`` as the name of the
column to map. For example, if a table has a column named ``status``::
class MyClass(object):
def _get_status(self):
return self._status
def _set_status(self, value):
self._status = value
status = property(_get_status, _set_status)
mapper(MyClass, sometable, properties={
"status":synonym("_status", map_column=True)
})
The column named ``status`` will be mapped to the attribute named
``_status``, and the ``status`` attribute on ``MyClass`` will be used to
proxy access to the column-based attribute.
"""
return SynonymProperty(name, map_column=map_column,
descriptor=descriptor,
comparator_factory=comparator_factory,
doc=doc)
def comparable_property(comparator_factory, descriptor=None):
"""Provides a method of applying a :class:`.PropComparator`
to any Python descriptor attribute.
Allows a regular Python @property (descriptor) to be used in Queries and
SQL constructs like a managed attribute. comparable_property wraps a
descriptor with a proxy that directs operator overrides such as ==
(__eq__) to the supplied comparator but proxies everything else through to
the original descriptor::
from sqlalchemy.orm import mapper, comparable_property
from sqlalchemy.orm.interfaces import PropComparator
from sqlalchemy.sql import func
class MyClass(object):
@property
def myprop(self):
return 'foo'
class MyComparator(PropComparator):
def __eq__(self, other):
return func.lower(other) == foo
mapper(MyClass, mytable, properties={
'myprop': comparable_property(MyComparator)})
Used with the ``properties`` dictionary sent to
:func:`~sqlalchemy.orm.mapper`.
Note that :func:`comparable_property` is usually not needed for basic
needs. The recipe at :mod:`.derived_attributes` offers a simpler
pure-Python method of achieving a similar result using class-bound
attributes with SQLAlchemy expression constructs.
:param comparator_factory:
A PropComparator subclass or factory that defines operator behavior
for this property.
:param descriptor:
Optional when used in a ``properties={}`` declaration. The Python
descriptor or property to layer comparison behavior on top of.
The like-named descriptor will be automatically retreived from the
mapped class if left blank in a ``properties`` declaration.
"""
return ComparableProperty(comparator_factory, descriptor)
def compile_mappers():
"""Compile all mappers that have been defined.
This is equivalent to calling ``compile()`` on any individual mapper.
"""
for m in list(_mapper_registry):
m.compile()
def clear_mappers():
"""Remove all mappers from all classes.
This function removes all instrumentation from classes and disposes
of their associated mappers. Once called, the classes are unmapped
and can be later re-mapped with new mappers.
:func:`.clear_mappers` is *not* for normal use, as there is literally no
valid usage for it outside of very specific testing scenarios. Normally,
mappers are permanent structural components of user-defined classes, and
are never discarded independently of their class. If a mapped class itself
is garbage collected, its mapper is automatically disposed of as well. As
such, :func:`.clear_mappers` is only for usage in test suites that re-use
the same classes with different mappings, which is itself an extremely rare
use case - the only such use case is in fact SQLAlchemy's own test suite,
and possibly the test suites of other ORM extension libraries which
intend to test various combinations of mapper construction upon a fixed
set of classes.
"""
mapperlib._COMPILE_MUTEX.acquire()
try:
while _mapper_registry:
try:
# can't even reliably call list(weakdict) in jython
mapper, b = _mapper_registry.popitem()
mapper.dispose()
except KeyError:
pass
finally:
mapperlib._COMPILE_MUTEX.release()
def extension(ext):
"""Return a ``MapperOption`` that will insert the given
``MapperExtension`` to the beginning of the list of extensions
that will be called in the context of the ``Query``.
Used with :meth:`~sqlalchemy.orm.query.Query.options`.
"""
return ExtensionOption(ext)
@sa_util.accepts_a_list_as_starargs(list_deprecation='deprecated')
def joinedload(*keys, **kw):
"""Return a ``MapperOption`` that will convert the property of the given
name into an joined eager load.
.. note:: This function is known as :func:`eagerload` in all versions
of SQLAlchemy prior to version 0.6beta3, including the 0.5 and 0.4
series. :func:`eagerload` will remain available for the foreseeable
future in order to enable cross-compatibility.
Used with :meth:`~sqlalchemy.orm.query.Query.options`.
examples::
# joined-load the "orders" colleciton on "User"
query(User).options(joinedload(User.orders))
# joined-load the "keywords" collection on each "Item",
# but not the "items" collection on "Order" - those
# remain lazily loaded.
query(Order).options(joinedload(Order.items, Item.keywords))
# to joined-load across both, use joinedload_all()
query(Order).options(joinedload_all(Order.items, Item.keywords))
:func:`joinedload` also accepts a keyword argument `innerjoin=True` which
indicates using an inner join instead of an outer::
query(Order).options(joinedload(Order.user, innerjoin=True))
Note that the join created by :func:`joinedload` is aliased such that no
other aspects of the query will affect what it loads. To use joined eager
loading with a join that is constructed manually using
:meth:`~sqlalchemy.orm.query.Query.join` or :func:`~sqlalchemy.orm.join`,
see :func:`contains_eager`.
See also: :func:`subqueryload`, :func:`lazyload`
"""
innerjoin = kw.pop('innerjoin', None)
if innerjoin is not None:
return (
strategies.EagerLazyOption(keys, lazy='joined'),
strategies.EagerJoinOption(keys, innerjoin)
)
else:
return strategies.EagerLazyOption(keys, lazy='joined')
@sa_util.accepts_a_list_as_starargs(list_deprecation='deprecated')
def joinedload_all(*keys, **kw):
"""Return a ``MapperOption`` that will convert all properties along the
given dot-separated path into an joined eager load.
.. note:: This function is known as :func:`eagerload_all` in all versions
of SQLAlchemy prior to version 0.6beta3, including the 0.5 and 0.4
series. :func:`eagerload_all` will remain available for the
foreseeable future in order to enable cross-compatibility.
Used with :meth:`~sqlalchemy.orm.query.Query.options`.
For example::
query.options(joinedload_all('orders.items.keywords'))...
will set all of 'orders', 'orders.items', and 'orders.items.keywords' to
load in one joined eager load.
Individual descriptors are accepted as arguments as well::
query.options(joinedload_all(User.orders, Order.items, Item.keywords))
The keyword arguments accept a flag `innerjoin=True|False` which will
override the value of the `innerjoin` flag specified on the
relationship().
See also: :func:`subqueryload_all`, :func:`lazyload`
"""
innerjoin = kw.pop('innerjoin', None)
if innerjoin is not None:
return (
strategies.EagerLazyOption(keys, lazy='joined', chained=True),
strategies.EagerJoinOption(keys, innerjoin, chained=True)
)
else:
return strategies.EagerLazyOption(keys, lazy='joined', chained=True)
def eagerload(*args, **kwargs):
"""A synonym for :func:`joinedload()`."""
return joinedload(*args, **kwargs)
def eagerload_all(*args, **kwargs):
"""A synonym for :func:`joinedload_all()`"""
return joinedload_all(*args, **kwargs)
def subqueryload(*keys):
"""Return a ``MapperOption`` that will convert the property
of the given name into an subquery eager load.
Used with :meth:`~sqlalchemy.orm.query.Query.options`.
examples::
# subquery-load the "orders" colleciton on "User"
query(User).options(subqueryload(User.orders))
# subquery-load the "keywords" collection on each "Item",
# but not the "items" collection on "Order" - those
# remain lazily loaded.
query(Order).options(subqueryload(Order.items, Item.keywords))
# to subquery-load across both, use subqueryload_all()
query(Order).options(subqueryload_all(Order.items, Item.keywords))
See also: :func:`joinedload`, :func:`lazyload`
"""
return strategies.EagerLazyOption(keys, lazy="subquery")
def subqueryload_all(*keys):
"""Return a ``MapperOption`` that will convert all properties along the
given dot-separated path into a subquery eager load.
Used with :meth:`~sqlalchemy.orm.query.Query.options`.
For example::
query.options(subqueryload_all('orders.items.keywords'))...
will set all of 'orders', 'orders.items', and 'orders.items.keywords' to
load in one subquery eager load.
Individual descriptors are accepted as arguments as well::
query.options(subqueryload_all(User.orders, Order.items,
Item.keywords))
See also: :func:`joinedload_all`, :func:`lazyload`, :func:`immediateload`
"""
return strategies.EagerLazyOption(keys, lazy="subquery", chained=True)
@sa_util.accepts_a_list_as_starargs(list_deprecation='deprecated')
def lazyload(*keys):
"""Return a ``MapperOption`` that will convert the property of the given
name into a lazy load.
Used with :meth:`~sqlalchemy.orm.query.Query.options`.
See also: :func:`eagerload`, :func:`subqueryload`, :func:`immediateload`
"""
return strategies.EagerLazyOption(keys, lazy=True)
def noload(*keys):
"""Return a ``MapperOption`` that will convert the property of the
given name into a non-load.
Used with :meth:`~sqlalchemy.orm.query.Query.options`.
See also: :func:`lazyload`, :func:`eagerload`, :func:`subqueryload`, :func:`immediateload`
"""
return strategies.EagerLazyOption(keys, lazy=None)
def immediateload(*keys):
"""Return a ``MapperOption`` that will convert the property of the given
name into an immediate load.
Used with :meth:`~sqlalchemy.orm.query.Query.options`.
See also: :func:`lazyload`, :func:`eagerload`, :func:`subqueryload`
New as of verison 0.6.5.
"""
return strategies.EagerLazyOption(keys, lazy='immediate')
def contains_alias(alias):
"""Return a ``MapperOption`` that will indicate to the query that
the main table has been aliased.
`alias` is the string name or ``Alias`` object representing the
alias.
"""
return AliasOption(alias)
@sa_util.accepts_a_list_as_starargs(list_deprecation='deprecated')
def contains_eager(*keys, **kwargs):
"""Return a ``MapperOption`` that will indicate to the query that
the given attribute should be eagerly loaded from columns currently
in the query.
Used with :meth:`~sqlalchemy.orm.query.Query.options`.
The option is used in conjunction with an explicit join that loads
the desired rows, i.e.::
sess.query(Order).\\
join(Order.user).\\
options(contains_eager(Order.user))
The above query would join from the ``Order`` entity to its related
``User`` entity, and the returned ``Order`` objects would have the
``Order.user`` attribute pre-populated.
:func:`contains_eager` also accepts an `alias` argument, which is the
string name of an alias, an :func:`~sqlalchemy.sql.expression.alias`
construct, or an :func:`~sqlalchemy.orm.aliased` construct. Use this when
the eagerly-loaded rows are to come from an aliased table::
user_alias = aliased(User)
sess.query(Order).\\
join((user_alias, Order.user)).\\
options(contains_eager(Order.user, alias=user_alias))
See also :func:`eagerload` for the "automatic" version of this
functionality.
For additional examples of :func:`contains_eager` see
:ref:`contains_eager`.
"""
alias = kwargs.pop('alias', None)
if kwargs:
raise exceptions.ArgumentError('Invalid kwargs for contains_eag'
'er: %r' % kwargs.keys())
return strategies.EagerLazyOption(keys, lazy='joined',
propagate_to_loaders=False), \
strategies.LoadEagerFromAliasOption(keys, alias=alias)
@sa_util.accepts_a_list_as_starargs(list_deprecation='deprecated')
def defer(*keys):
"""Return a ``MapperOption`` that will convert the column property of the
given name into a deferred load.
Used with :meth:`~sqlalchemy.orm.query.Query.options`.
"""
return strategies.DeferredOption(keys, defer=True)
@sa_util.accepts_a_list_as_starargs(list_deprecation='deprecated')
def undefer(*keys):
"""Return a ``MapperOption`` that will convert the column property of the
given name into a non-deferred (regular column) load.
Used with :meth:`~sqlalchemy.orm.query.Query.options`.
"""
return strategies.DeferredOption(keys, defer=False)
def undefer_group(name):
"""Return a ``MapperOption`` that will convert the given group of deferred
column properties into a non-deferred (regular column) load.
Used with :meth:`~sqlalchemy.orm.query.Query.options`.
"""
return strategies.UndeferGroupOption(name)
|
gpl-3.0
|
jbzdak/edx-platform
|
cms/envs/aws.py
|
1
|
15187
|
"""
This is the default template for our main set of AWS servers.
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-import, unused-wildcard-import
# Pylint gets confused by path.py instances, which report themselves as class
# objects. As a result, pylint applies the wrong regex in validating names,
# and throws spurious errors. Therefore, we disable invalid-name checking.
# pylint: disable=invalid-name
import json
from .common import *
from openedx.core.lib.logsettings import get_logger_config
import os
from path import Path as path
from xmodule.modulestore.modulestore_settings import convert_module_store_setting_if_needed
# SERVICE_VARIANT specifies name of the variant used, which decides what JSON
# configuration files are read during startup.
SERVICE_VARIANT = os.environ.get('SERVICE_VARIANT', None)
# CONFIG_ROOT specifies the directory where the JSON configuration
# files are expected to be found. If not specified, use the project
# directory.
CONFIG_ROOT = path(os.environ.get('CONFIG_ROOT', ENV_ROOT))
# CONFIG_PREFIX specifies the prefix of the JSON configuration files,
# based on the service variant. If no variant is use, don't use a
# prefix.
CONFIG_PREFIX = SERVICE_VARIANT + "." if SERVICE_VARIANT else ""
############### ALWAYS THE SAME ################################
DEBUG = False
EMAIL_BACKEND = 'django_ses.SESBackend'
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
# IMPORTANT: With this enabled, the server must always be behind a proxy that
# strips the header HTTP_X_FORWARDED_PROTO from client requests. Otherwise,
# a user can fool our server into thinking it was an https connection.
# See
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
# for other warnings.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
###################################### CELERY ################################
# Don't use a connection pool, since connections are dropped by ELB.
BROKER_POOL_LIMIT = 0
BROKER_CONNECTION_TIMEOUT = 1
# For the Result Store, use the django cache named 'celery'
CELERY_RESULT_BACKEND = 'djcelery.backends.cache:CacheBackend'
# When the broker is behind an ELB, use a heartbeat to refresh the
# connection and to detect if it has been dropped.
BROKER_HEARTBEAT = 10.0
BROKER_HEARTBEAT_CHECKRATE = 2
# Each worker should only fetch one message at a time
CELERYD_PREFETCH_MULTIPLIER = 1
# Rename the exchange and queues for each variant
QUEUE_VARIANT = CONFIG_PREFIX.lower()
CELERY_DEFAULT_EXCHANGE = 'edx.{0}core'.format(QUEUE_VARIANT)
HIGH_PRIORITY_QUEUE = 'edx.{0}core.high'.format(QUEUE_VARIANT)
DEFAULT_PRIORITY_QUEUE = 'edx.{0}core.default'.format(QUEUE_VARIANT)
LOW_PRIORITY_QUEUE = 'edx.{0}core.low'.format(QUEUE_VARIANT)
CELERY_DEFAULT_QUEUE = DEFAULT_PRIORITY_QUEUE
CELERY_DEFAULT_ROUTING_KEY = DEFAULT_PRIORITY_QUEUE
CELERY_QUEUES = {
HIGH_PRIORITY_QUEUE: {},
LOW_PRIORITY_QUEUE: {},
DEFAULT_PRIORITY_QUEUE: {}
}
############# NON-SECURE ENV CONFIG ##############################
# Things like server locations, ports, etc.
with open(CONFIG_ROOT / CONFIG_PREFIX + "env.json") as env_file:
ENV_TOKENS = json.load(env_file)
# STATIC_URL_BASE specifies the base url to use for static files
STATIC_URL_BASE = ENV_TOKENS.get('STATIC_URL_BASE', None)
if STATIC_URL_BASE:
# collectstatic will fail if STATIC_URL is a unicode string
STATIC_URL = STATIC_URL_BASE.encode('ascii')
if not STATIC_URL.endswith("/"):
STATIC_URL += "/"
STATIC_URL += EDX_PLATFORM_REVISION + "/"
# GITHUB_REPO_ROOT is the base directory
# for course data
GITHUB_REPO_ROOT = ENV_TOKENS.get('GITHUB_REPO_ROOT', GITHUB_REPO_ROOT)
# STATIC_ROOT specifies the directory where static files are
# collected
STATIC_ROOT_BASE = ENV_TOKENS.get('STATIC_ROOT_BASE', None)
if STATIC_ROOT_BASE:
STATIC_ROOT = path(STATIC_ROOT_BASE) / EDX_PLATFORM_REVISION
EMAIL_BACKEND = ENV_TOKENS.get('EMAIL_BACKEND', EMAIL_BACKEND)
EMAIL_FILE_PATH = ENV_TOKENS.get('EMAIL_FILE_PATH', None)
EMAIL_HOST = ENV_TOKENS.get('EMAIL_HOST', EMAIL_HOST)
EMAIL_PORT = ENV_TOKENS.get('EMAIL_PORT', EMAIL_PORT)
EMAIL_USE_TLS = ENV_TOKENS.get('EMAIL_USE_TLS', EMAIL_USE_TLS)
LMS_BASE = ENV_TOKENS.get('LMS_BASE')
# Note that FEATURES['PREVIEW_LMS_BASE'] gets read in from the environment file.
SITE_NAME = ENV_TOKENS['SITE_NAME']
ALLOWED_HOSTS = [
# TODO: bbeggs remove this before prod, temp fix to get load testing running
"*",
ENV_TOKENS.get('CMS_BASE')
]
LOG_DIR = ENV_TOKENS['LOG_DIR']
CACHES = ENV_TOKENS['CACHES']
# Cache used for location mapping -- called many times with the same key/value
# in a given request.
if 'loc_cache' not in CACHES:
CACHES['loc_cache'] = {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'edx_location_mem_cache',
}
SESSION_COOKIE_DOMAIN = ENV_TOKENS.get('SESSION_COOKIE_DOMAIN')
SESSION_COOKIE_HTTPONLY = ENV_TOKENS.get('SESSION_COOKIE_HTTPONLY', True)
SESSION_ENGINE = ENV_TOKENS.get('SESSION_ENGINE', SESSION_ENGINE)
SESSION_COOKIE_SECURE = ENV_TOKENS.get('SESSION_COOKIE_SECURE', SESSION_COOKIE_SECURE)
SESSION_SAVE_EVERY_REQUEST = ENV_TOKENS.get('SESSION_SAVE_EVERY_REQUEST', SESSION_SAVE_EVERY_REQUEST)
# social sharing settings
SOCIAL_SHARING_SETTINGS = ENV_TOKENS.get('SOCIAL_SHARING_SETTINGS', SOCIAL_SHARING_SETTINGS)
# allow for environments to specify what cookie name our login subsystem should use
# this is to fix a bug regarding simultaneous logins between edx.org and edge.edx.org which can
# happen with some browsers (e.g. Firefox)
if ENV_TOKENS.get('SESSION_COOKIE_NAME', None):
# NOTE, there's a bug in Django (http://bugs.python.org/issue18012) which necessitates this being a str()
SESSION_COOKIE_NAME = str(ENV_TOKENS.get('SESSION_COOKIE_NAME'))
# Set the names of cookies shared with the marketing site
# These have the same cookie domain as the session, which in production
# usually includes subdomains.
EDXMKTG_LOGGED_IN_COOKIE_NAME = ENV_TOKENS.get('EDXMKTG_LOGGED_IN_COOKIE_NAME', EDXMKTG_LOGGED_IN_COOKIE_NAME)
EDXMKTG_USER_INFO_COOKIE_NAME = ENV_TOKENS.get('EDXMKTG_USER_INFO_COOKIE_NAME', EDXMKTG_USER_INFO_COOKIE_NAME)
#Email overrides
DEFAULT_FROM_EMAIL = ENV_TOKENS.get('DEFAULT_FROM_EMAIL', DEFAULT_FROM_EMAIL)
DEFAULT_FEEDBACK_EMAIL = ENV_TOKENS.get('DEFAULT_FEEDBACK_EMAIL', DEFAULT_FEEDBACK_EMAIL)
ADMINS = ENV_TOKENS.get('ADMINS', ADMINS)
SERVER_EMAIL = ENV_TOKENS.get('SERVER_EMAIL', SERVER_EMAIL)
MKTG_URLS = ENV_TOKENS.get('MKTG_URLS', MKTG_URLS)
TECH_SUPPORT_EMAIL = ENV_TOKENS.get('TECH_SUPPORT_EMAIL', TECH_SUPPORT_EMAIL)
COURSES_WITH_UNSAFE_CODE = ENV_TOKENS.get("COURSES_WITH_UNSAFE_CODE", [])
ASSET_IGNORE_REGEX = ENV_TOKENS.get('ASSET_IGNORE_REGEX', ASSET_IGNORE_REGEX)
# Theme overrides
THEME_NAME = ENV_TOKENS.get('THEME_NAME', None)
#Timezone overrides
TIME_ZONE = ENV_TOKENS.get('TIME_ZONE', TIME_ZONE)
# Push to LMS overrides
GIT_REPO_EXPORT_DIR = ENV_TOKENS.get('GIT_REPO_EXPORT_DIR', '/edx/var/edxapp/export_course_repos')
# Translation overrides
LANGUAGES = ENV_TOKENS.get('LANGUAGES', LANGUAGES)
LANGUAGE_CODE = ENV_TOKENS.get('LANGUAGE_CODE', LANGUAGE_CODE)
USE_I18N = ENV_TOKENS.get('USE_I18N', USE_I18N)
ENV_FEATURES = ENV_TOKENS.get('FEATURES', {})
for feature, value in ENV_FEATURES.items():
FEATURES[feature] = value
# Additional installed apps
for app in ENV_TOKENS.get('ADDL_INSTALLED_APPS', []):
INSTALLED_APPS += (app,)
WIKI_ENABLED = ENV_TOKENS.get('WIKI_ENABLED', WIKI_ENABLED)
LOGGING = get_logger_config(LOG_DIR,
logging_env=ENV_TOKENS['LOGGING_ENV'],
debug=False,
service_variant=SERVICE_VARIANT)
#theming start:
PLATFORM_NAME = ENV_TOKENS.get('PLATFORM_NAME', 'edX')
STUDIO_NAME = ENV_TOKENS.get('STUDIO_NAME', 'edX Studio')
STUDIO_SHORT_NAME = ENV_TOKENS.get('STUDIO_SHORT_NAME', 'Studio')
# Event Tracking
if "TRACKING_IGNORE_URL_PATTERNS" in ENV_TOKENS:
TRACKING_IGNORE_URL_PATTERNS = ENV_TOKENS.get("TRACKING_IGNORE_URL_PATTERNS")
# Django CAS external authentication settings
CAS_EXTRA_LOGIN_PARAMS = ENV_TOKENS.get("CAS_EXTRA_LOGIN_PARAMS", None)
if FEATURES.get('AUTH_USE_CAS'):
CAS_SERVER_URL = ENV_TOKENS.get("CAS_SERVER_URL", None)
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'django_cas.backends.CASBackend',
)
INSTALLED_APPS += ('django_cas',)
MIDDLEWARE_CLASSES += ('django_cas.middleware.CASMiddleware',)
CAS_ATTRIBUTE_CALLBACK = ENV_TOKENS.get('CAS_ATTRIBUTE_CALLBACK', None)
if CAS_ATTRIBUTE_CALLBACK:
import importlib
CAS_USER_DETAILS_RESOLVER = getattr(
importlib.import_module(CAS_ATTRIBUTE_CALLBACK['module']),
CAS_ATTRIBUTE_CALLBACK['function']
)
################ SECURE AUTH ITEMS ###############################
# Secret things: passwords, access keys, etc.
with open(CONFIG_ROOT / CONFIG_PREFIX + "auth.json") as auth_file:
AUTH_TOKENS = json.load(auth_file)
############### XBlock filesystem field config ##########
if 'DJFS' in AUTH_TOKENS and AUTH_TOKENS['DJFS'] is not None:
DJFS = AUTH_TOKENS['DJFS']
if 'url_root' in DJFS:
DJFS['url_root'] = DJFS['url_root'].format(platform_revision=EDX_PLATFORM_REVISION)
EMAIL_HOST_USER = AUTH_TOKENS.get('EMAIL_HOST_USER', EMAIL_HOST_USER)
EMAIL_HOST_PASSWORD = AUTH_TOKENS.get('EMAIL_HOST_PASSWORD', EMAIL_HOST_PASSWORD)
# Note that this is the Studio key for Segment. There is a separate key for the LMS.
CMS_SEGMENT_KEY = AUTH_TOKENS.get('SEGMENT_KEY')
SECRET_KEY = AUTH_TOKENS['SECRET_KEY']
AWS_ACCESS_KEY_ID = AUTH_TOKENS["AWS_ACCESS_KEY_ID"]
if AWS_ACCESS_KEY_ID == "":
AWS_ACCESS_KEY_ID = None
AWS_SECRET_ACCESS_KEY = AUTH_TOKENS["AWS_SECRET_ACCESS_KEY"]
if AWS_SECRET_ACCESS_KEY == "":
AWS_SECRET_ACCESS_KEY = None
if AUTH_TOKENS.get('DEFAULT_FILE_STORAGE'):
DEFAULT_FILE_STORAGE = AUTH_TOKENS.get('DEFAULT_FILE_STORAGE')
elif AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY:
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
else:
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
DATABASES = AUTH_TOKENS['DATABASES']
# Enable automatic transaction management on all databases
# https://docs.djangoproject.com/en/1.8/topics/db/transactions/#tying-transactions-to-http-requests
# This needs to be true for all databases
for database_name in DATABASES:
DATABASES[database_name]['ATOMIC_REQUESTS'] = True
MODULESTORE = convert_module_store_setting_if_needed(AUTH_TOKENS.get('MODULESTORE', MODULESTORE))
CONTENTSTORE = AUTH_TOKENS['CONTENTSTORE']
DOC_STORE_CONFIG = AUTH_TOKENS['DOC_STORE_CONFIG']
# Datadog for events!
DATADOG = AUTH_TOKENS.get("DATADOG", {})
DATADOG.update(ENV_TOKENS.get("DATADOG", {}))
# TODO: deprecated (compatibility with previous settings)
if 'DATADOG_API' in AUTH_TOKENS:
DATADOG['api_key'] = AUTH_TOKENS['DATADOG_API']
# Celery Broker
CELERY_ALWAYS_EAGER = ENV_TOKENS.get("CELERY_ALWAYS_EAGER", False)
CELERY_BROKER_TRANSPORT = ENV_TOKENS.get("CELERY_BROKER_TRANSPORT", "")
CELERY_BROKER_HOSTNAME = ENV_TOKENS.get("CELERY_BROKER_HOSTNAME", "")
CELERY_BROKER_VHOST = ENV_TOKENS.get("CELERY_BROKER_VHOST", "")
CELERY_BROKER_USER = AUTH_TOKENS.get("CELERY_BROKER_USER", "")
CELERY_BROKER_PASSWORD = AUTH_TOKENS.get("CELERY_BROKER_PASSWORD", "")
BROKER_URL = "{0}://{1}:{2}@{3}/{4}".format(CELERY_BROKER_TRANSPORT,
CELERY_BROKER_USER,
CELERY_BROKER_PASSWORD,
CELERY_BROKER_HOSTNAME,
CELERY_BROKER_VHOST)
# Event tracking
TRACKING_BACKENDS.update(AUTH_TOKENS.get("TRACKING_BACKENDS", {}))
EVENT_TRACKING_BACKENDS['tracking_logs']['OPTIONS']['backends'].update(AUTH_TOKENS.get("EVENT_TRACKING_BACKENDS", {}))
EVENT_TRACKING_BACKENDS['segmentio']['OPTIONS']['processors'][0]['OPTIONS']['whitelist'].extend(
AUTH_TOKENS.get("EVENT_TRACKING_SEGMENTIO_EMIT_WHITELIST", []))
SUBDOMAIN_BRANDING = ENV_TOKENS.get('SUBDOMAIN_BRANDING', {})
VIRTUAL_UNIVERSITIES = ENV_TOKENS.get('VIRTUAL_UNIVERSITIES', [])
##### ACCOUNT LOCKOUT DEFAULT PARAMETERS #####
MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED = ENV_TOKENS.get("MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED", 5)
MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS = ENV_TOKENS.get("MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS", 15 * 60)
MICROSITE_CONFIGURATION = ENV_TOKENS.get('MICROSITE_CONFIGURATION', {})
MICROSITE_ROOT_DIR = path(ENV_TOKENS.get('MICROSITE_ROOT_DIR', ''))
#### PASSWORD POLICY SETTINGS #####
PASSWORD_MIN_LENGTH = ENV_TOKENS.get("PASSWORD_MIN_LENGTH")
PASSWORD_MAX_LENGTH = ENV_TOKENS.get("PASSWORD_MAX_LENGTH")
PASSWORD_COMPLEXITY = ENV_TOKENS.get("PASSWORD_COMPLEXITY", {})
PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD = ENV_TOKENS.get("PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD")
PASSWORD_DICTIONARY = ENV_TOKENS.get("PASSWORD_DICTIONARY", [])
### INACTIVITY SETTINGS ####
SESSION_INACTIVITY_TIMEOUT_IN_SECONDS = AUTH_TOKENS.get("SESSION_INACTIVITY_TIMEOUT_IN_SECONDS")
##### X-Frame-Options response header settings #####
X_FRAME_OPTIONS = ENV_TOKENS.get('X_FRAME_OPTIONS', X_FRAME_OPTIONS)
##### ADVANCED_SECURITY_CONFIG #####
ADVANCED_SECURITY_CONFIG = ENV_TOKENS.get('ADVANCED_SECURITY_CONFIG', {})
################ ADVANCED COMPONENT/PROBLEM TYPES ###############
ADVANCED_COMPONENT_TYPES = ENV_TOKENS.get('ADVANCED_COMPONENT_TYPES', ADVANCED_COMPONENT_TYPES)
ADVANCED_PROBLEM_TYPES = ENV_TOKENS.get('ADVANCED_PROBLEM_TYPES', ADVANCED_PROBLEM_TYPES)
DEPRECATED_ADVANCED_COMPONENT_TYPES = ENV_TOKENS.get(
'DEPRECATED_ADVANCED_COMPONENT_TYPES', DEPRECATED_ADVANCED_COMPONENT_TYPES
)
################ VIDEO UPLOAD PIPELINE ###############
VIDEO_UPLOAD_PIPELINE = ENV_TOKENS.get('VIDEO_UPLOAD_PIPELINE', VIDEO_UPLOAD_PIPELINE)
################ PUSH NOTIFICATIONS ###############
PARSE_KEYS = AUTH_TOKENS.get("PARSE_KEYS", {})
# Video Caching. Pairing country codes with CDN URLs.
# Example: {'CN': 'http://api.xuetangx.com/edx/video?s3_url='}
VIDEO_CDN_URL = ENV_TOKENS.get('VIDEO_CDN_URL', {})
if FEATURES['ENABLE_COURSEWARE_INDEX'] or FEATURES['ENABLE_LIBRARY_INDEX']:
# Use ElasticSearch for the search engine
SEARCH_ENGINE = "search.elastic.ElasticSearchEngine"
XBLOCK_SETTINGS = ENV_TOKENS.get('XBLOCK_SETTINGS', {})
XBLOCK_SETTINGS.setdefault("VideoDescriptor", {})["licensing_enabled"] = FEATURES.get("LICENSING", False)
XBLOCK_SETTINGS.setdefault("VideoModule", {})['YOUTUBE_API_KEY'] = AUTH_TOKENS.get('YOUTUBE_API_KEY', YOUTUBE_API_KEY)
################# PROCTORING CONFIGURATION ##################
PROCTORING_BACKEND_PROVIDER = AUTH_TOKENS.get("PROCTORING_BACKEND_PROVIDER", PROCTORING_BACKEND_PROVIDER)
PROCTORING_SETTINGS = ENV_TOKENS.get("PROCTORING_SETTINGS", PROCTORING_SETTINGS)
############################ OAUTH2 Provider ###################################
# OpenID Connect issuer ID. Normally the URL of the authentication endpoint.
OAUTH_OIDC_ISSUER = ENV_TOKENS['OAUTH_OIDC_ISSUER']
|
agpl-3.0
|
osiell/server-tools
|
base_export_manager/tests/test_ir_exports_line.py
|
9
|
3859
|
# -*- coding: utf-8 -*-
# © 2015 Antiun Ingenieria S.L. - Javier Iniesta
# Copyright 2016 Pedro M. Baeza <[email protected]>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp.tests.common import TransactionCase
from openerp.exceptions import ValidationError
class TestIrExportsLineCase(TransactionCase):
def setUp(self):
super(TestIrExportsLineCase, self).setUp()
m_ir_exports = self.env['ir.exports']
self.export = m_ir_exports.create({'name': 'Partner Test',
'resource': 'res.partner'})
self.partner_model = self.env['ir.model'].search(
[('model', '=', 'res.partner')])
self.field_parent_id = self.env['ir.model.fields'].search(
[('name', '=', 'parent_id'),
('model_id', '=', self.partner_model.id)])
self.field_name = self.env['ir.model.fields'].search(
[('name', '=', 'name'),
('model_id', '=', self.partner_model.id)])
def test_check_name(self):
m_ir_exports_line = self.env['ir.exports.line']
m_ir_exports_line.create({'name': 'name',
'export_id': self.export.id})
with self.assertRaises(ValidationError):
m_ir_exports_line.create({'name': 'name',
'export_id': self.export.id})
with self.assertRaises(ValidationError):
m_ir_exports_line.create({'name': 'bad_error_name',
'export_id': self.export.id})
def test_get_label_string(self):
m_ir_exports_line = self.env['ir.exports.line']
export_line = m_ir_exports_line.create({'name': 'parent_id/name',
'export_id': self.export.id})
self.assertEqual(export_line.with_context(lang="en_US").label,
"Related Company/Name (parent_id/name)")
with self.assertRaises(ValidationError):
m_ir_exports_line.create({'name': '',
'export_id': self.export.id})
def test_model_default_by_context(self):
"""Fields inherit the model_id by context."""
line = self.env["ir.exports.line"].with_context(
default_model1_id=self.export.model_id.id).create({
"name": "name",
"export_id": self.export.id,
})
self.assertEqual(line.model1_id, self.export.model_id)
def test_inverse_name(self):
line = self.env['ir.exports.line'].create({
'export_id': self.export.id,
'name': 'parent_id/parent_id/parent_id/name',
})
self.assertEqual(line.model1_id, self.partner_model)
self.assertEqual(line.model2_id, self.partner_model)
self.assertEqual(line.field1_id, self.field_parent_id)
self.assertEqual(line.field2_id, self.field_parent_id)
self.assertEqual(line.field3_id, self.field_parent_id)
self.assertEqual(line.field4_id, self.field_name)
def test_compute_name(self):
line = self.env['ir.exports.line'].create({
'export_id': self.export.id,
'field1_id': self.field_parent_id.id,
'field2_id': self.field_parent_id.id,
'field3_id': self.field_parent_id.id,
'field4_id': self.field_name.id,
})
self.assertEqual(line.name, 'parent_id/parent_id/parent_id/name')
def test_write_name_same_root(self):
self.env['ir.exports.line'].create({
'export_id': self.export.id,
'name': 'parent_id',
})
line = self.env['ir.exports.line'].create({
'export_id': self.export.id,
'name': 'name',
})
# This should end without errors
line.name = 'parent_id/name'
|
agpl-3.0
|
luotao19861229/pyspider
|
pyspider/webui/result.py
|
11
|
1803
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<[email protected]>
# http://binux.me
# Created on 2014-10-19 16:23:55
from __future__ import unicode_literals
from flask import render_template, request, json
from flask import Response
from .app import app
from pyspider.libs import result_dump
@app.route('/results')
def result():
resultdb = app.config['resultdb']
project = request.args.get('project')
offset = int(request.args.get('offset', 0))
limit = int(request.args.get('limit', 20))
count = resultdb.count(project)
results = list(resultdb.select(project, offset=offset, limit=limit))
return render_template(
"result.html", count=count, results=results,
result_formater=result_dump.result_formater,
project=project, offset=offset, limit=limit, json=json
)
@app.route('/results/dump/<project>.<_format>')
def dump_result(project, _format):
resultdb = app.config['resultdb']
# force update project list
resultdb.get(project, 'any')
if project not in resultdb.projects:
return "no such project.", 404
offset = int(request.args.get('offset', 0)) or None
limit = int(request.args.get('limit', 0)) or None
results = resultdb.select(project, offset=offset, limit=limit)
if _format == 'json':
valid = request.args.get('style', 'rows') == 'full'
return Response(result_dump.dump_as_json(results, valid),
mimetype='application/json')
elif _format == 'txt':
return Response(result_dump.dump_as_txt(results),
mimetype='text/plain')
elif _format == 'csv':
return Response(result_dump.dump_as_csv(results),
mimetype='text/csv')
|
apache-2.0
|
vnsofthe/odoo
|
addons/product/wizard/__init__.py
|
452
|
1076
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import product_price
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
shinymayhem/kubernetes
|
cluster/juju/return-node-ips.py
|
310
|
1024
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import sys
# This script helps parse out the private IP addresses from the
# `juju run` command's JSON object, see cluster/juju/util.sh
if len(sys.argv) > 1:
# It takes the JSON output as the first argument.
nodes = json.loads(sys.argv[1])
# There can be multiple nodes to print the Stdout.
for num in nodes:
print num['Stdout'].rstrip()
else:
exit(1)
|
apache-2.0
|
boomcan90/store
|
tests/test_order_consists_of.py
|
4
|
1190
|
# # -*- coding: utf-8 -*-
# """Functional tests using WebTest.
# See: http://webtest.readthedocs.org/
# TESTS MUST START WITH "test"
# """
# from flask import url_for
# class TestBreakTheOrder:
# """
# Breaking the order
# """
# def test_order_conists_of_is_not_not_found(self, testapp):
# # !!! URL needs the / at the end.
# res = testapp.get('/consistsof/')
# assert res.status_code != 404
# def test_OrderConsistsOf_is_accessible(self, testapp):
# # testapp made available from the tests module
# res = testapp.get('/consistsof/')
# assert res.status_code == 200
# def test_OrderConsistsOf_has_list_of_not_stuff(self, testapp):
# res = testapp.get('/consistsof/consistsof')
# # i have discovered that "string" in res is case sensitive
# # in general to know more see:
# # http://webtest.readthedocs.io/en/latest/api.html#webtest-response-testresponse
# assert "List of NOT beer NOR ORDERS OR BOOKS" in res
# def test_browse_consists_of_returns_empty_list(self, order, testapp):
# res = testapp.get('/consistsof/ordersList')
# assert "data" in res
|
bsd-3-clause
|
Kagee/youtube-dl
|
youtube_dl/extractor/dailymotion.py
|
1
|
9120
|
# coding: utf-8
from __future__ import unicode_literals
import re
import json
import itertools
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urllib_request,
)
from ..utils import (
ExtractorError,
int_or_none,
orderedSet,
str_to_int,
unescapeHTML,
)
class DailymotionBaseInfoExtractor(InfoExtractor):
@staticmethod
def _build_request(url):
"""Build a request with the family filter disabled"""
request = compat_urllib_request.Request(url)
request.add_header('Cookie', 'family_filter=off')
request.add_header('Cookie', 'ff=off')
return request
class DailymotionIE(DailymotionBaseInfoExtractor):
"""Information Extractor for Dailymotion"""
_VALID_URL = r'(?i)(?:https?://)?(?:(www|touch)\.)?dailymotion\.[a-z]{2,3}/(?:(embed|#)/)?video/(?P<id>[^/?_]+)'
IE_NAME = 'dailymotion'
_FORMATS = [
('stream_h264_ld_url', 'ld'),
('stream_h264_url', 'standard'),
('stream_h264_hq_url', 'hq'),
('stream_h264_hd_url', 'hd'),
('stream_h264_hd1080_url', 'hd180'),
]
_TESTS = [
{
'url': 'http://www.dailymotion.com/video/x33vw9_tutoriel-de-youtubeur-dl-des-video_tech',
'md5': '392c4b85a60a90dc4792da41ce3144eb',
'info_dict': {
'id': 'x33vw9',
'ext': 'mp4',
'uploader': 'Amphora Alex and Van .',
'title': 'Tutoriel de Youtubeur"DL DES VIDEO DE YOUTUBE"',
}
},
# Vevo video
{
'url': 'http://www.dailymotion.com/video/x149uew_katy-perry-roar-official_musi',
'info_dict': {
'title': 'Roar (Official)',
'id': 'USUV71301934',
'ext': 'mp4',
'uploader': 'Katy Perry',
'upload_date': '20130905',
},
'params': {
'skip_download': True,
},
'skip': 'VEVO is only available in some countries',
},
# age-restricted video
{
'url': 'http://www.dailymotion.com/video/xyh2zz_leanna-decker-cyber-girl-of-the-year-desires-nude-playboy-plus_redband',
'md5': '0d667a7b9cebecc3c89ee93099c4159d',
'info_dict': {
'id': 'xyh2zz',
'ext': 'mp4',
'title': 'Leanna Decker - Cyber Girl Of The Year Desires Nude [Playboy Plus]',
'uploader': 'HotWaves1012',
'age_limit': 18,
}
}
]
def _real_extract(self, url):
video_id = self._match_id(url)
url = 'http://www.dailymotion.com/video/%s' % video_id
# Retrieve video webpage to extract further information
request = self._build_request(url)
webpage = self._download_webpage(request, video_id)
# Extract URL, uploader and title from webpage
self.report_extraction(video_id)
# It may just embed a vevo video:
m_vevo = re.search(
r'<link rel="video_src" href="[^"]*?vevo.com[^"]*?video=(?P<id>[\w]*)',
webpage)
if m_vevo is not None:
vevo_id = m_vevo.group('id')
self.to_screen('Vevo video detected: %s' % vevo_id)
return self.url_result('vevo:%s' % vevo_id, ie='Vevo')
age_limit = self._rta_search(webpage)
video_upload_date = None
mobj = re.search(r'<div class="[^"]*uploaded_cont[^"]*" title="[^"]*">([0-9]{2})-([0-9]{2})-([0-9]{4})</div>', webpage)
if mobj is not None:
video_upload_date = mobj.group(3) + mobj.group(2) + mobj.group(1)
embed_url = 'http://www.dailymotion.com/embed/video/%s' % video_id
embed_page = self._download_webpage(embed_url, video_id,
'Downloading embed page')
info = self._search_regex(r'var info = ({.*?}),$', embed_page,
'video info', flags=re.MULTILINE)
info = json.loads(info)
if info.get('error') is not None:
msg = 'Couldn\'t get video, Dailymotion says: %s' % info['error']['title']
raise ExtractorError(msg, expected=True)
formats = []
for (key, format_id) in self._FORMATS:
video_url = info.get(key)
if video_url is not None:
m_size = re.search(r'H264-(\d+)x(\d+)', video_url)
if m_size is not None:
width, height = map(int_or_none, (m_size.group(1), m_size.group(2)))
else:
width, height = None, None
formats.append({
'url': video_url,
'ext': 'mp4',
'format_id': format_id,
'width': width,
'height': height,
})
if not formats:
raise ExtractorError('Unable to extract video URL')
# subtitles
video_subtitles = self.extract_subtitles(video_id, webpage)
view_count = str_to_int(self._search_regex(
r'video_views_count[^>]+>\s+([\d\.,]+)',
webpage, 'view count', fatal=False))
title = self._og_search_title(webpage, default=None)
if title is None:
title = self._html_search_regex(
r'(?s)<span\s+id="video_title"[^>]*>(.*?)</span>', webpage,
'title')
return {
'id': video_id,
'formats': formats,
'uploader': info['owner.screenname'],
'upload_date': video_upload_date,
'title': title,
'subtitles': video_subtitles,
'thumbnail': info['thumbnail_url'],
'age_limit': age_limit,
'view_count': view_count,
}
def _get_subtitles(self, video_id, webpage):
try:
sub_list = self._download_webpage(
'https://api.dailymotion.com/video/%s/subtitles?fields=id,language,url' % video_id,
video_id, note=False)
except ExtractorError as err:
self._downloader.report_warning('unable to download video subtitles: %s' % compat_str(err))
return {}
info = json.loads(sub_list)
if (info['total'] > 0):
sub_lang_list = dict((l['language'], [{'url': l['url'], 'ext': 'srt'}]) for l in info['list'])
return sub_lang_list
self._downloader.report_warning('video doesn\'t have subtitles')
return {}
class DailymotionPlaylistIE(DailymotionBaseInfoExtractor):
IE_NAME = 'dailymotion:playlist'
_VALID_URL = r'(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/playlist/(?P<id>.+?)/'
_MORE_PAGES_INDICATOR = r'(?s)<div class="pages[^"]*">.*?<a\s+class="[^"]*?icon-arrow_right[^"]*?"'
_PAGE_TEMPLATE = 'https://www.dailymotion.com/playlist/%s/%s'
_TESTS = [{
'url': 'http://www.dailymotion.com/playlist/xv4bw_nqtv_sport/1#video=xl8v3q',
'info_dict': {
'title': 'SPORT',
'id': 'xv4bw_nqtv_sport',
},
'playlist_mincount': 20,
}]
def _extract_entries(self, id):
video_ids = []
for pagenum in itertools.count(1):
request = self._build_request(self._PAGE_TEMPLATE % (id, pagenum))
webpage = self._download_webpage(request,
id, 'Downloading page %s' % pagenum)
video_ids.extend(re.findall(r'data-xid="(.+?)"', webpage))
if re.search(self._MORE_PAGES_INDICATOR, webpage) is None:
break
return [self.url_result('http://www.dailymotion.com/video/%s' % video_id, 'Dailymotion')
for video_id in orderedSet(video_ids)]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
playlist_id = mobj.group('id')
webpage = self._download_webpage(url, playlist_id)
return {
'_type': 'playlist',
'id': playlist_id,
'title': self._og_search_title(webpage),
'entries': self._extract_entries(playlist_id),
}
class DailymotionUserIE(DailymotionPlaylistIE):
IE_NAME = 'dailymotion:user'
_VALID_URL = r'https?://(?:www\.)?dailymotion\.[a-z]{2,3}/user/(?P<user>[^/]+)'
_PAGE_TEMPLATE = 'http://www.dailymotion.com/user/%s/%s'
_TESTS = [{
'url': 'https://www.dailymotion.com/user/nqtv',
'info_dict': {
'id': 'nqtv',
'title': 'Rémi Gaillard',
},
'playlist_mincount': 100,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
user = mobj.group('user')
webpage = self._download_webpage(url, user)
full_user = unescapeHTML(self._html_search_regex(
r'<a class="nav-image" title="([^"]+)" href="/%s">' % re.escape(user),
webpage, 'user'))
return {
'_type': 'playlist',
'id': user,
'title': full_user,
'entries': self._extract_entries(user),
}
|
unlicense
|
alx-eu/django
|
django/contrib/admin/views/decorators.py
|
230
|
1286
|
from functools import wraps
from django.utils.translation import ugettext as _
from django.contrib.admin.forms import AdminAuthenticationForm
from django.contrib.auth.views import login
from django.contrib.auth import REDIRECT_FIELD_NAME
def staff_member_required(view_func):
"""
Decorator for views that checks that the user is logged in and is a staff
member, displaying the login page if necessary.
"""
@wraps(view_func)
def _checklogin(request, *args, **kwargs):
if request.user.is_active and request.user.is_staff:
# The user is valid. Continue to the admin page.
return view_func(request, *args, **kwargs)
assert hasattr(request, 'session'), "The Django admin requires session middleware to be installed. Edit your MIDDLEWARE_CLASSES setting to insert 'django.contrib.sessions.middleware.SessionMiddleware'."
defaults = {
'template_name': 'admin/login.html',
'authentication_form': AdminAuthenticationForm,
'extra_context': {
'title': _('Log in'),
'app_path': request.get_full_path(),
REDIRECT_FIELD_NAME: request.get_full_path(),
},
}
return login(request, **defaults)
return _checklogin
|
bsd-3-clause
|
130s/ros_buildfarm
|
scripts/devel/catkin_make_isolated_and_test.py
|
3
|
2508
|
#!/usr/bin/env python3
import argparse
import os
import sys
from ros_buildfarm.catkin_workspace import call_catkin_make_isolated
from ros_buildfarm.catkin_workspace import clean_workspace
from ros_buildfarm.catkin_workspace import ensure_workspace_exists
def main(argv=sys.argv[1:]):
parser = argparse.ArgumentParser(
description="Invoke 'catkin_make_isolated' on a workspace while "
"enabling and running the tests")
parser.add_argument(
'--rosdistro-name',
required=True,
help='The name of the ROS distro to identify the setup file to be '
'sourced (if available)')
parser.add_argument(
'--workspace-root',
required=True,
help='The root path of the workspace to compile')
parser.add_argument(
'--parent-result-space',
help='The path of the parent result space')
parser.add_argument(
'--clean-before',
action='store_true',
help='The flag if the workspace should be cleaned before the '
'invocation')
parser.add_argument(
'--clean-after',
action='store_true',
help='The flag if the workspace should be cleaned after the '
'invocation')
args = parser.parse_args(argv)
ensure_workspace_exists(args.workspace_root)
if args.clean_before:
clean_workspace(args.workspace_root)
try:
test_results_dir = os.path.join(args.workspace_root, 'test_results')
arguments = [
'--cmake-args', '-DCATKIN_ENABLE_TESTING=1',
'-DCATKIN_SKIP_TESTING=0',
'-DCATKIN_TEST_RESULTS_DIR=%s' % test_results_dir,
'--catkin-make-args', '-j1']
rc = call_catkin_make_isolated(
args.rosdistro_name, args.workspace_root,
arguments,
parent_result_space=args.parent_result_space)
if not rc:
rc = call_catkin_make_isolated(
args.rosdistro_name, args.workspace_root,
arguments + ['tests'],
parent_result_space=args.parent_result_space)
if not rc:
rc = call_catkin_make_isolated(
args.rosdistro_name, args.workspace_root,
arguments + ['run_tests'],
parent_result_space=args.parent_result_space)
finally:
if args.clean_after:
clean_workspace(args.workspace_root)
return rc
if __name__ == '__main__':
sys.exit(main())
|
apache-2.0
|
caveman-dick/ansible
|
test/units/modules/network/iosxr/iosxr_module.py
|
56
|
3259
|
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
def set_module_args(args):
args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
basic._ANSIBLE_ARGS = to_bytes(args)
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except:
pass
fixture_data[path] = data
return data
class AnsibleExitJson(Exception):
pass
class AnsibleFailJson(Exception):
pass
class TestIosxrModule(unittest.TestCase):
def execute_module(self, failed=False, changed=False, commands=None, sort=True, defaults=False):
self.load_fixtures(commands)
if failed:
result = self.failed()
self.assertTrue(result['failed'], result)
else:
result = self.changed(changed)
self.assertEqual(result['changed'], changed, result)
if commands is not None:
if sort:
self.assertEqual(sorted(commands), sorted(result['commands']), result['commands'])
else:
self.assertEqual(commands, result['commands'], result['commands'])
return result
def failed(self):
def fail_json(*args, **kwargs):
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
with patch.object(basic.AnsibleModule, 'fail_json', fail_json):
with self.assertRaises(AnsibleFailJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertTrue(result['failed'], result)
return result
def changed(self, changed=False):
def exit_json(*args, **kwargs):
if 'changed' not in kwargs:
kwargs['changed'] = False
raise AnsibleExitJson(kwargs)
with patch.object(basic.AnsibleModule, 'exit_json', exit_json):
with self.assertRaises(AnsibleExitJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertEqual(result['changed'], changed, result)
return result
def load_fixtures(self, commands=None):
pass
|
gpl-3.0
|
t-artistik/browserscope
|
base/summary_test_set.py
|
9
|
2474
|
#!/usr/bin/python2.4
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the 'License')
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Benchmark Tests Definitions."""
import logging
from categories import test_set_base
from categories import all_test_sets
class SummaryTest(test_set_base.TestBase):
def __init__(self, category, category_name):
test_set_base.TestBase.__init__(
self,
key=category,
name=category_name,
url=None,
doc=None,
min_value=0,
max_value=0)
_TESTS = []
for test_set in all_test_sets.GetVisibleTestSets():
_TESTS.append(SummaryTest(test_set.category, test_set.category_name))
class SummaryTestSet(test_set_base.TestSet):
def GetTestScoreAndDisplayValue(self, test_key, raw_scores):
"""Get a normalized score (0 to 100) and a value to output to the display.
Args:
test_key: a key for a test_set test.
raw_scores: a dict of raw_scores indexed by test keys.
Returns:
score, display_value
# score is from 0 to 100.
# display_value is the text for the cell.
"""
score = raw_scores[test_key]
return score, score
def GetRowScoreAndDisplayValue(self, results):
"""Get the overall score for this row of results data.
Args:
results: A dictionary that looks like:
{
'testkey1': {'score': 1-10, 'median': median, 'display': 'celltext'},
'testkey2': {'score': 1-10, 'median': median, 'display': 'celltext'},
etc...
}
Returns:
A tuple of (score, display)
Where score is a value between 1-100.
And display is the text for the cell.
"""
logging.info('summary getrowscore results: %s' % results)
if not results.has_key('score') or results['score']['median'] is None:
score = 0
else:
score = results['score']['median']
return score, score
TEST_SET = SummaryTestSet(
category='summary',
category_name='Summary',
tests=_TESTS,
test_page=''
)
|
apache-2.0
|
MoserMichael/cstuff
|
python-pypi/ntrace/setup.py
|
1
|
1131
|
import os
from setuptools import setup
from setuptools import Extension
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as f:
return f.read()
setup(
name = "ntrace",
version = "0.0.1",
author = "Michael Moser",
author_email = "[email protected]",
description = ("execution tracer that gives more output than trace.py"),
license = "BSD",
keywords = "program tracing, debugging",
url = "http://mosermichael.github.io/cstuff/all/projects/2015/02/24/python-backtrace.html",
packages=['ntrace', 'tests'],
long_description=read('README'),
ext_modules = [Extension(name = "ntracenative",
sources = ["ntrace/ntracenativemodule.c"])],
classifiers=[
"Topic :: Software Development :: Debuggers",
"Programming Language :: Python :: 2.7",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"License :: OSI Approved :: BSD License",
],
)
|
bsd-3-clause
|
quamilek/django
|
tests/file_uploads/views.py
|
298
|
5093
|
from __future__ import unicode_literals
import contextlib
import hashlib
import json
import os
from django.core.files.uploadedfile import UploadedFile
from django.http import HttpResponse, HttpResponseServerError
from django.utils import six
from django.utils.encoding import force_bytes, smart_str
from .models import FileModel
from .tests import UNICODE_FILENAME, UPLOAD_TO
from .uploadhandler import ErroringUploadHandler, QuotaUploadHandler
def file_upload_view(request):
"""
Check that a file upload can be updated into the POST dictionary without
going pear-shaped.
"""
form_data = request.POST.copy()
form_data.update(request.FILES)
if isinstance(form_data.get('file_field'), UploadedFile) and isinstance(form_data['name'], six.text_type):
# If a file is posted, the dummy client should only post the file name,
# not the full path.
if os.path.dirname(form_data['file_field'].name) != '':
return HttpResponseServerError()
return HttpResponse('')
else:
return HttpResponseServerError()
def file_upload_view_verify(request):
"""
Use the sha digest hash to verify the uploaded contents.
"""
form_data = request.POST.copy()
form_data.update(request.FILES)
for key, value in form_data.items():
if key.endswith('_hash'):
continue
if key + '_hash' not in form_data:
continue
submitted_hash = form_data[key + '_hash']
if isinstance(value, UploadedFile):
new_hash = hashlib.sha1(value.read()).hexdigest()
else:
new_hash = hashlib.sha1(force_bytes(value)).hexdigest()
if new_hash != submitted_hash:
return HttpResponseServerError()
# Adding large file to the database should succeed
largefile = request.FILES['file_field2']
obj = FileModel()
obj.testfile.save(largefile.name, largefile)
return HttpResponse('')
def file_upload_unicode_name(request):
# Check to see if unicode name came through properly.
if not request.FILES['file_unicode'].name.endswith(UNICODE_FILENAME):
return HttpResponseServerError()
response = None
# Check to make sure the exotic characters are preserved even
# through file save.
uni_named_file = request.FILES['file_unicode']
obj = FileModel.objects.create(testfile=uni_named_file)
full_name = '%s/%s' % (UPLOAD_TO, uni_named_file.name)
if not os.path.exists(full_name):
response = HttpResponseServerError()
# Cleanup the object with its exotic file name immediately.
# (shutil.rmtree used elsewhere in the tests to clean up the
# upload directory has been seen to choke on unicode
# filenames on Windows.)
obj.delete()
os.unlink(full_name)
if response:
return response
else:
return HttpResponse('')
def file_upload_echo(request):
"""
Simple view to echo back info about uploaded files for tests.
"""
r = {k: f.name for k, f in request.FILES.items()}
return HttpResponse(json.dumps(r))
def file_upload_echo_content(request):
"""
Simple view to echo back the content of uploaded files for tests.
"""
def read_and_close(f):
with contextlib.closing(f):
return f.read().decode('utf-8')
r = {k: read_and_close(f) for k, f in request.FILES.items()}
return HttpResponse(json.dumps(r))
def file_upload_quota(request):
"""
Dynamically add in an upload handler.
"""
request.upload_handlers.insert(0, QuotaUploadHandler())
return file_upload_echo(request)
def file_upload_quota_broken(request):
"""
You can't change handlers after reading FILES; this view shouldn't work.
"""
response = file_upload_echo(request)
request.upload_handlers.insert(0, QuotaUploadHandler())
return response
def file_upload_getlist_count(request):
"""
Check the .getlist() function to ensure we receive the correct number of files.
"""
file_counts = {}
for key in request.FILES.keys():
file_counts[key] = len(request.FILES.getlist(key))
return HttpResponse(json.dumps(file_counts))
def file_upload_errors(request):
request.upload_handlers.insert(0, ErroringUploadHandler())
return file_upload_echo(request)
def file_upload_filename_case_view(request):
"""
Check adding the file to the database will preserve the filename case.
"""
file = request.FILES['file_field']
obj = FileModel()
obj.testfile.save(file.name, file)
return HttpResponse('%d' % obj.pk)
def file_upload_content_type_extra(request):
"""
Simple view to echo back extra content-type parameters.
"""
params = {}
for file_name, uploadedfile in request.FILES.items():
params[file_name] = {
k: smart_str(v) for k, v in uploadedfile.content_type_extra.items()
}
return HttpResponse(json.dumps(params))
def file_upload_fd_closing(request, access):
if access == 't':
request.FILES # Trigger file parsing.
return HttpResponse('')
|
bsd-3-clause
|
kevin-coder/tensorflow-fork
|
tensorflow/contrib/learn/python/learn/learn_io/dask_io.py
|
42
|
4229
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow dask.DataFrame (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.util.deprecation import deprecated
try:
# pylint: disable=g-import-not-at-top
import dask.dataframe as dd
allowed_classes = (dd.Series, dd.DataFrame)
HAS_DASK = True
except ImportError:
HAS_DASK = False
def _add_to_index(df, start):
"""New dask.dataframe with values added to index of each subdataframe."""
df = df.copy()
df.index += start
return df
def _get_divisions(df):
"""Number of rows in each sub-dataframe."""
lengths = df.map_partitions(len).compute()
divisions = np.cumsum(lengths).tolist()
divisions.insert(0, 0)
return divisions
def _construct_dask_df_with_divisions(df):
"""Construct the new task graph and make a new dask.dataframe around it."""
divisions = _get_divisions(df)
# pylint: disable=protected-access
name = 'csv-index' + df._name
dsk = {(name, i): (_add_to_index, (df._name, i), divisions[i])
for i in range(df.npartitions)}
# pylint: enable=protected-access
from toolz import merge # pylint: disable=g-import-not-at-top
if isinstance(df, dd.DataFrame):
return dd.DataFrame(merge(dsk, df.dask), name, df.columns, divisions)
elif isinstance(df, dd.Series):
return dd.Series(merge(dsk, df.dask), name, df.name, divisions)
@deprecated(None, 'Please feed input to tf.data to support dask.')
def extract_dask_data(data):
"""Extract data from dask.Series or dask.DataFrame for predictors.
Given a distributed dask.DataFrame or dask.Series containing columns or names
for one or more predictors, this operation returns a single dask.DataFrame or
dask.Series that can be iterated over.
Args:
data: A distributed dask.DataFrame or dask.Series.
Returns:
A dask.DataFrame or dask.Series that can be iterated over.
If the supplied argument is neither a dask.DataFrame nor a dask.Series this
operation returns it without modification.
"""
if isinstance(data, allowed_classes):
return _construct_dask_df_with_divisions(data)
else:
return data
@deprecated(None, 'Please feed input to tf.data to support dask.')
def extract_dask_labels(labels):
"""Extract data from dask.Series or dask.DataFrame for labels.
Given a distributed dask.DataFrame or dask.Series containing exactly one
column or name, this operation returns a single dask.DataFrame or dask.Series
that can be iterated over.
Args:
labels: A distributed dask.DataFrame or dask.Series with exactly one
column or name.
Returns:
A dask.DataFrame or dask.Series that can be iterated over.
If the supplied argument is neither a dask.DataFrame nor a dask.Series this
operation returns it without modification.
Raises:
ValueError: If the supplied dask.DataFrame contains more than one
column or the supplied dask.Series contains more than
one name.
"""
if isinstance(labels, dd.DataFrame):
ncol = labels.columns
elif isinstance(labels, dd.Series):
ncol = labels.name
if isinstance(labels, allowed_classes):
if len(ncol) > 1:
raise ValueError('Only one column for labels is allowed.')
return _construct_dask_df_with_divisions(labels)
else:
return labels
|
apache-2.0
|
napalm-automation/napalm-yang
|
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/authentication/__init__.py
|
1
|
11770
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import state
class authentication(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/authentication. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines authentication information of the
node.
"""
__slots__ = ("_path_helper", "_extmethods", "__state")
_yang_name = "authentication"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"authentication",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/authentication/state (container)
YANG Description: State parameters of TLV 10.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/authentication/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters of TLV 10.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("state", state)])
from . import state
class authentication(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/authentication. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines authentication information of the
node.
"""
__slots__ = ("_path_helper", "_extmethods", "__state")
_yang_name = "authentication"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"authentication",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/authentication/state (container)
YANG Description: State parameters of TLV 10.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/authentication/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters of TLV 10.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("state", state)])
|
apache-2.0
|
bleachbit/bleachbit
|
tests/TestAll.py
|
1
|
1646
|
# vim: ts=4:sw=4:expandtab
# BleachBit
# Copyright (C) 2008-2021 Andrew Ziem
# https://www.bleachbit.org
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Run all test suites
"""
import os
import unittest
import sys
import tempfile
import shutil
if __name__ == '__main__':
testdir = tempfile.mkdtemp(prefix='TestAll '+__name__)
os.environ['BLEACHBIT_TEST_OPTIONS_DIR'] = testdir
print("""You should use the unittest discovery, it's much nicer:
python -m unittest discover -p Test*.py # run all tests
python -m unittest tests.TestCLI # run only the CLI tests
python -m unittest tests.TestCLI.CLITestCase.test_encoding # run only a single test""")
suite = unittest.defaultTestLoader.discover(
os.getcwd(), pattern='Test*.py')
success = unittest.TextTestRunner(verbosity=2).run(suite).wasSuccessful()
del os.environ['BLEACHBIT_TEST_OPTIONS_DIR']
if os.path.exists(testdir):
shutil.rmtree(testdir)
sys.exit(success == False)
|
gpl-3.0
|
Mazecreator/tensorflow
|
tensorflow/contrib/layers/python/layers/embedding_ops_test.py
|
86
|
31950
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""embedding_ops tests."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import sys
import numpy as np
from tensorflow.contrib.layers.python.layers import embedding_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class SafeEmbeddingLookupSparseTest(test.TestCase):
def _random_weights(self, vocab_size=4, embed_dim=4, num_shards=1):
assert vocab_size > 0
assert embed_dim > 0
assert num_shards > 0
assert num_shards <= vocab_size
embedding_weights = partitioned_variables.create_partitioned_variables(
shape=[vocab_size, embed_dim],
slicing=[num_shards, 1],
initializer=init_ops.truncated_normal_initializer(
mean=0.0, stddev=1.0 / math.sqrt(vocab_size), dtype=dtypes.float32))
for w in embedding_weights:
w.initializer.run()
embedding_weights = [w.eval() for w in embedding_weights]
return embedding_weights
def _ids_and_weights_2d(self):
# Each row demonstrates a test case:
# Row 0: multiple valid ids, 1 invalid id, weighted mean
# Row 1: all ids are invalid (leaving no valid ids after pruning)
# Row 2: no ids to begin with
# Row 3: single id
# Row 4: all ids have <=0 weight
indices = [[0, 0], [0, 1], [0, 2], [1, 0], [3, 0], [4, 0], [4, 1]]
ids = [0, 1, -1, -1, 2, 0, 1]
weights = [1.0, 2.0, 1.0, 1.0, 3.0, 0.0, -0.5]
shape = [5, 4]
sparse_ids = sparse_tensor_lib.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(ids, dtypes.int64),
constant_op.constant(shape, dtypes.int64))
sparse_weights = sparse_tensor_lib.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(weights, dtypes.float32),
constant_op.constant(shape, dtypes.int64))
return sparse_ids, sparse_weights
def _ids_and_weights_3d(self):
# Each (2-D) index demonstrates a test case:
# Index 0, 0: multiple valid ids, 1 invalid id, weighted mean
# Index 0, 1: all ids are invalid (leaving no valid ids after pruning)
# Index 0, 2: no ids to begin with
# Index 1, 0: single id
# Index 1, 1: all ids have <=0 weight
# Index 1, 2: no ids to begin with
indices = [[0, 0, 0], [0, 0, 1], [0, 0, 2], [0, 1, 0], [1, 0, 0], [1, 1, 0],
[1, 1, 1]]
ids = [0, 1, -1, -1, 2, 0, 1]
weights = [1.0, 2.0, 1.0, 1.0, 3.0, 0.0, -0.5]
shape = [2, 3, 4]
sparse_ids = sparse_tensor_lib.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(ids, dtypes.int64),
constant_op.constant(shape, dtypes.int64))
sparse_weights = sparse_tensor_lib.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(weights, dtypes.float32),
constant_op.constant(shape, dtypes.int64))
return sparse_ids, sparse_weights
def test_safe_embedding_lookup_sparse_return_zero_vector(self):
with self.test_session():
embedding_weights = self._random_weights()
sparse_ids, sparse_weights = self._ids_and_weights_2d()
embedding_lookup_result = (embedding_ops.safe_embedding_lookup_sparse(
embedding_weights, sparse_ids, sparse_weights).eval())
self.assertAllClose(
embedding_lookup_result,
[(1.0 * embedding_weights[0][0] + 2.0 * embedding_weights[0][1]) /
3.0, [0] * 4, [0] * 4, embedding_weights[0][2], [0] * 4])
def test_safe_embedding_lookup_sparse_return_special_vector(self):
with self.test_session():
embedding_weights = self._random_weights()
sparse_ids, sparse_weights = self._ids_and_weights_2d()
embedding_lookup_result = (embedding_ops.safe_embedding_lookup_sparse(
embedding_weights, sparse_ids, sparse_weights, default_id=3).eval())
self.assertAllClose(
embedding_lookup_result,
[(1.0 * embedding_weights[0][0] + 2.0 * embedding_weights[0][1]) /
3.0, embedding_weights[0][3], embedding_weights[0][3],
embedding_weights[0][2], embedding_weights[0][3]])
def test_safe_embedding_lookup_sparse_no_weights(self):
with self.test_session():
embedding_weights = self._random_weights()
sparse_ids, _ = self._ids_and_weights_2d()
embedding_lookup_result = (embedding_ops.safe_embedding_lookup_sparse(
embedding_weights, sparse_ids, None).eval())
self.assertAllClose(
embedding_lookup_result,
[(embedding_weights[0][0] + embedding_weights[0][1]) / 2.0, [0] * 4,
[0] * 4, embedding_weights[0][2], (
embedding_weights[0][0] + embedding_weights[0][1]) / 2.0])
def test_safe_embedding_lookup_sparse_partitioned(self):
with self.test_session():
embedding_weights = self._random_weights(num_shards=3)
sparse_ids, _ = self._ids_and_weights_2d()
embedding_lookup_result = (embedding_ops.safe_embedding_lookup_sparse(
embedding_weights, sparse_ids, None).eval())
embedding_weights = list(itertools.chain(*embedding_weights))
self.assertAllClose(embedding_lookup_result,
[(embedding_weights[0] + embedding_weights[1]) / 2.0,
[0] * 4, [0] * 4, embedding_weights[2],
(embedding_weights[0] + embedding_weights[1]) / 2.0])
def test_safe_embedding_lookup_sparse_partitioned_inconsistent_weights(self):
with self.test_session():
embedding_weights = self._random_weights(num_shards=3)
sparse_ids, sparse_weights = self._ids_and_weights_2d()
embedding_weights[1] = embedding_weights[1].astype(np.float64)
self.assertRaises(ValueError, embedding_ops.safe_embedding_lookup_sparse,
embedding_weights, sparse_ids)
embedding_weights = [
constant_op.constant(w, dtype=dtypes.float64)
for w in embedding_weights
]
self.assertRaises(ValueError, embedding_ops.safe_embedding_lookup_sparse,
embedding_weights, sparse_ids, sparse_weights)
def test_safe_embedding_lookup_sparse_3d_return_zero_vector(self):
with self.test_session():
embedding_weights = self._random_weights()
sparse_ids, sparse_weights = self._ids_and_weights_3d()
embedding_lookup_result = (embedding_ops.safe_embedding_lookup_sparse(
embedding_weights, sparse_ids, sparse_weights).eval())
self.assertAllClose(embedding_lookup_result, [[
(1.0 * embedding_weights[0][0] + 2.0 * embedding_weights[0][1]) / 3.0,
[0] * 4, [0] * 4
], [embedding_weights[0][2], [0] * 4, [0] * 4]])
def test_safe_embedding_lookup_sparse_3d_return_special_vector(self):
with self.test_session():
embedding_weights = self._random_weights()
sparse_ids, sparse_weights = self._ids_and_weights_3d()
embedding_lookup_result = (embedding_ops.safe_embedding_lookup_sparse(
embedding_weights, sparse_ids, sparse_weights, default_id=3).eval())
self.assertAllClose(
embedding_lookup_result,
[[(1.0 * embedding_weights[0][0] + 2.0 * embedding_weights[0][1]) /
3.0, embedding_weights[0][3], embedding_weights[0][3]], [
embedding_weights[0][2], embedding_weights[0][3],
embedding_weights[0][3]
]])
def test_safe_embedding_lookup_sparse_3d_no_weights(self):
with self.test_session():
embedding_weights = self._random_weights()
sparse_ids, _ = self._ids_and_weights_3d()
embedding_lookup_result = (embedding_ops.safe_embedding_lookup_sparse(
embedding_weights, sparse_ids, None).eval())
self.assertAllClose(embedding_lookup_result, [[(
embedding_weights[0][0] + embedding_weights[0][1]) / 2.0, [0] * 4, [
0
] * 4], [
embedding_weights[0][2],
(embedding_weights[0][0] + embedding_weights[0][1]) / 2.0, [0] * 4
]])
def test_safe_embedding_lookup_sparse_3d_partitioned(self):
with self.test_session():
embedding_weights = self._random_weights(num_shards=3)
sparse_ids, _ = self._ids_and_weights_3d()
embedding_lookup_result = (embedding_ops.safe_embedding_lookup_sparse(
embedding_weights, sparse_ids, None).eval())
embedding_weights = list(itertools.chain(*embedding_weights))
self.assertAllClose(embedding_lookup_result, [[
(embedding_weights[0] + embedding_weights[1]) / 2.0, [0] * 4, [0] * 4
], [
embedding_weights[2],
(embedding_weights[0] + embedding_weights[1]) / 2.0, [0] * 4
]])
def test_safe_embedding_lookup_sparse_3d_partitioned_inconsistent_weights(
self):
with self.test_session():
embedding_weights = self._random_weights(num_shards=3)
sparse_ids, sparse_weights = self._ids_and_weights_3d()
embedding_weights[1] = embedding_weights[1].astype(np.float64)
self.assertRaises(ValueError, embedding_ops.safe_embedding_lookup_sparse,
embedding_weights, sparse_ids)
embedding_weights = [
constant_op.constant(w, dtype=dtypes.float64)
for w in embedding_weights
]
self.assertRaises(ValueError, embedding_ops.safe_embedding_lookup_sparse,
embedding_weights, sparse_ids, sparse_weights)
class ScatteredEmbeddingLookupTest(test.TestCase):
def setUp(self):
random_seed.set_random_seed(1)
def _random_weights(self, size=50, num_shards=1):
assert size > 0
assert num_shards > 0
assert num_shards <= size
embedding_weights = partitioned_variables.create_partitioned_variables(
shape=[size],
slicing=[num_shards],
initializer=init_ops.truncated_normal_initializer(
mean=0.0, stddev=1.0, dtype=dtypes.float32))
for w in embedding_weights:
w.initializer.run()
return embedding_weights
def test_scattered_embedding_consistency(self):
with self.test_session():
embedding_weights = self._random_weights()
values = constant_op.constant(["foo", "foo"])
embedding_lookup_result = embedding_ops.scattered_embedding_lookup(
embedding_weights, values, dimension=10).eval()
self.assertAllEqual(embedding_lookup_result.shape, [2, 10])
self.assertAllEqual(embedding_lookup_result[0],
embedding_lookup_result[1])
def test_scattered_embedding_multiple_partition(self):
with self.test_session():
embedding_weights = self._random_weights(num_shards=7)
values = constant_op.constant([4, 4, 5])
embedding_lookup_result = embedding_ops.scattered_embedding_lookup(
embedding_weights, values, dimension=5).eval()
self.assertAllEqual(embedding_lookup_result.shape, [3, 5])
self.assertAllEqual(embedding_lookup_result[0],
embedding_lookup_result[1])
# Different embedding expected for different value.
embedding_diff = np.min(
(embedding_lookup_result[2] - embedding_lookup_result[0])**2)
self.assertGreater(embedding_diff, 0)
def test_scattered_embedding_coverage(self):
with self.test_session():
size = 8
embedding_weights = self._random_weights(size=size, num_shards=3)
values = constant_op.constant(["foo"])
# Large embedding dimension to cover the full range of weights.
embedding_lookup_result = embedding_ops.scattered_embedding_lookup(
embedding_weights, values, dimension=100).eval()
self.assertEqual(len(np.unique(embedding_lookup_result[0])), size)
def test_scattered_embedding_multi_dimension(self):
with self.test_session():
embedding_weights = self._random_weights()
values = constant_op.constant([["foo", "bar", "bar"],
["bar", "bar", "foo"]])
embedding_lookup_result = embedding_ops.scattered_embedding_lookup(
embedding_weights, values, dimension=10).eval()
self.assertAllEqual(embedding_lookup_result.shape, [2, 3, 10])
self.assertAllEqual(embedding_lookup_result[0][0],
embedding_lookup_result[1][2])
def test_scattered_embedding_lookup_sparse(self):
with self.test_session():
embedding_weights = self._random_weights(num_shards=3)
sparse_tensor = sparse_tensor_lib.SparseTensor(
values=["foo", "bar", "foo", "bar"],
indices=[[0, 0], [1, 0], [1, 1], [3, 0]],
dense_shape=[5, 2])
embedding_lookup_result = (
embedding_ops.scattered_embedding_lookup_sparse(
embedding_weights, sparse_tensor, dimension=5,
combiner="mean").eval())
self.assertAllEqual(embedding_lookup_result.shape, [5, 5])
# Same non-zero embedding for the empty rows filled with a default value.
self.assertAllEqual(embedding_lookup_result[2],
embedding_lookup_result[4])
embedding_norm = np.sum(embedding_lookup_result[2]**2)
self.assertGreater(embedding_norm, 0)
self.assertAllEqual(embedding_lookup_result[1], 0.5 * (
embedding_lookup_result[0] + embedding_lookup_result[3]))
def test_embedding_lookup_unique(self):
d_embed = 5
n_embed = 10
idx_shape = (2, 3, 4)
embeds = np.random.randn(n_embed, d_embed)
idx = np.random.randint(0, n_embed, idx_shape)
with self.test_session():
embedded_np = embeds[idx]
embedded_tf = embedding_ops.embedding_lookup_unique(embeds, idx).eval()
self.assertEqual(embedded_np.shape, embedded_tf.shape)
np.testing.assert_almost_equal(embedded_np, embedded_tf)
def test_embedding_lookup_unique_param3d(self):
embeds = np.random.randn(5, 3, 3)
idx = np.random.randint(0, 5, 10)
idx2d = np.random.randint(0, 5, (10, 2))
with self.test_session():
embedded_np = embeds[idx]
embedded_np2d = embeds[idx2d]
embedded_tf = embedding_ops.embedding_lookup_unique(embeds, idx).eval()
embedded_tf_lst = embedding_ops.embedding_lookup_unique([embeds],
idx).eval()
embedded_tf2d = embedding_ops.embedding_lookup_unique(embeds,
idx2d).eval()
self.assertEqual(embedded_np.shape, embedded_tf.shape)
np.testing.assert_almost_equal(embedded_np, embedded_tf)
self.assertEqual(embedded_np.shape, embedded_tf_lst.shape)
np.testing.assert_almost_equal(embedded_np, embedded_tf_lst)
self.assertEqual(embedded_np2d.shape, embedded_tf2d.shape)
np.testing.assert_almost_equal(embedded_np2d, embedded_tf2d)
class SampledScatteredEmbeddingLookupTest(test.TestCase):
def setUp(self):
random_seed.set_random_seed(1)
self._hash_key = 1
def _random_weights(self, size=50, num_shards=1):
assert size > 0
assert num_shards > 0
assert num_shards <= size
embedding_weights = partitioned_variables.create_partitioned_variables(
shape=[size],
slicing=[num_shards],
initializer=init_ops.truncated_normal_initializer(
mean=0.0, stddev=1.0, dtype=dtypes.float32))
for w in embedding_weights:
w.initializer.run()
return embedding_weights
def test_hashed_embedding_consistency(self):
with self.test_session():
embedding_weights = self._random_weights()
values = constant_op.constant(["foo", "foo"])
# The first three sampled_candidates are equal, so the first three
# embedding weights will be equal.
sampled_candidates = constant_op.constant([[1, 3, 4, 6], [1, 3, 4, 7]])
embedding_lookup_result = ( # pylint: disable=protected-access
embedding_ops._sampled_scattered_embedding_lookup(
embedding_weights,
values,
sampled_candidates=sampled_candidates,
hash_key=self._hash_key).eval())
self.assertAllEqual(embedding_lookup_result.shape, [2, 4])
self.assertAllEqual(embedding_lookup_result[0][:3],
embedding_lookup_result[1][:3])
self.assertNotEqual(embedding_lookup_result[0][3],
embedding_lookup_result[1][3])
def test_hashed_embedding_multi_dimension(self):
with self.test_session():
embedding_weights = self._random_weights()
values = constant_op.constant([["foo", "bar", "bar"],
["bar", "bar", "foo"]])
sampled_candidates = constant_op.constant(
[[[1, 3, 4, 6], [1, 7, 8, 9], [1, 7, 8, 9]],
[[1, 7, 8, 9], [1, 7, 8, 9], [1, 3, 4, 6]]])
embedding_lookup_result = ( # pylint: disable=protected-access
embedding_ops._sampled_scattered_embedding_lookup(
embedding_weights,
values,
sampled_candidates=sampled_candidates,
hash_key=self._hash_key).eval())
self.assertAllEqual(embedding_lookup_result.shape, [2, 3, 4])
self.assertAllEqual(embedding_lookup_result[0][0],
embedding_lookup_result[1][2])
invalid_indices = constant_op.constant([[[1, 3, 4, 6], [1, 7, 8, 9]],
[[1, 7, 8, 9], [1, 7, 8, 9]]])
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError, (
r"\[The shape of sampled_candidates: \] \[2 2 4\] "
r"\[ does not match the shape of values: \] \[2 3\]")):
# pylint: disable=protected-access
embedding_ops._sampled_scattered_embedding_lookup(
embedding_weights, values,
sampled_candidates=invalid_indices).eval()
class SampledScatteredEmbeddingLookupSparseTest(test.TestCase):
def setUp(self):
random_seed.set_random_seed(1)
self._hash_key = 1
def test_output_shape(self):
"""Verifies the shape of the output tensor."""
with self.test_session():
sp_values = sparse_tensor_lib.SparseTensor(
values=["a", "a", "b", "c", "d", "e", "f"],
indices=[[1, 0], [2, 0], [2, 1], [2, 2], [2, 3], [2, 4], [2, 5]],
dense_shape=[3, 6])
params = constant_op.constant([.1, .2, .3])
result = embedding_ops._sampled_scattered_embedding_lookup_sparse(
params, sp_values, dimension=4, hash_key=self._hash_key)
self.assertEqual(result.eval().shape, (3, 4))
def test_output_values(self):
"""Verifies the values in a trivial case."""
with self.test_session():
sp_values = sparse_tensor_lib.SparseTensor(
values=["a"], indices=[[1, 0]], dense_shape=[3, 1])
params = constant_op.constant([.1, .2, .3])
result = embedding_ops._sampled_scattered_embedding_lookup_sparse(
params, sp_values, dimension=5, hash_key=self._hash_key)
self.assertAllClose(result.eval(), [[0., 0., 0., 0.,
0.], [.3, .2, .2, .3, .1],
[0., 0., 0., 0., 0.]])
def test_output_values_with_sampled_candidates(self):
"""Verifies the values for given sampled_candidates."""
with self.test_session():
sp_values = sparse_tensor_lib.SparseTensor(
values=["a", "a", "b", "c", "d", "e", "f"],
indices=[[1, 0], [2, 0], [2, 1], [2, 2], [2, 3], [2, 4], [2, 5]],
dense_shape=[3, 6])
params = constant_op.constant([.1, .2, .3])
sampled_candidates = [[1, 0], [2, 1], [3, 2]]
sampled_result = embedding_ops._sampled_scattered_embedding_lookup_sparse(
params,
sp_values,
sampled_candidates=constant_op.constant(sampled_candidates),
hash_key=self._hash_key)
full_result = embedding_ops._sampled_scattered_embedding_lookup_sparse(
params, sp_values, dimension=4, hash_key=self._hash_key)
sampled_result_val = sampled_result.eval()
full_result_val = full_result.eval()
self.assertEqual(sampled_result_val.shape, (3, 2))
for i in range(len(sampled_candidates)):
self.assertAllClose(sampled_result_val[i],
full_result_val[i, sampled_candidates[i]])
def test_output_values_with_sign_hash(self):
"""Verifies the values in a trivial case with hash_signs=True."""
with self.test_session():
sp_values = sparse_tensor_lib.SparseTensor(
values=["a"], indices=[[1, 0]], dense_shape=[3, 1])
params = constant_op.constant([.1, .1, .1])
result = embedding_ops._sampled_scattered_embedding_lookup_sparse(
params,
sp_values,
dimension=4,
with_sign_hash=True,
hash_key=self._hash_key)
self.assertAllClose(result.eval(), [[0., 0., 0., 0.], [-.1, -.1, -.1, .1],
[0., 0., 0., 0.]])
def test_distributive_property(self):
"""Verifies the distributive property of matrix multiplication."""
with self.test_session():
params = constant_op.constant([.1, .2, .3])
sp_values_a = sparse_tensor_lib.SparseTensor(
values=["a"], indices=[[0, 0]], dense_shape=[3, 1])
sp_values_b = sparse_tensor_lib.SparseTensor(
values=["b"], indices=[[2, 0]], dense_shape=[3, 1])
sp_values_c = sparse_tensor_lib.SparseTensor(
values=["c"], indices=[[2, 0]], dense_shape=[3, 1])
sp_values = sparse_tensor_lib.SparseTensor(
values=["a", "b", "c"],
indices=[[0, 0], [2, 0], [2, 1]],
dense_shape=[3, 2])
result_a = embedding_ops._sampled_scattered_embedding_lookup_sparse(
params, sp_values_a, dimension=4, hash_key=self._hash_key)
result_b = embedding_ops._sampled_scattered_embedding_lookup_sparse(
params, sp_values_b, dimension=4, hash_key=self._hash_key)
result_c = embedding_ops._sampled_scattered_embedding_lookup_sparse(
params, sp_values_c, dimension=4, hash_key=self._hash_key)
result = embedding_ops._sampled_scattered_embedding_lookup_sparse(
params, sp_values, dimension=4, hash_key=self._hash_key)
result_abc = math_ops.add_n([result_a, result_b, result_c])
self.assertAllClose(result.eval(), result_abc.eval())
def _PName(param_id):
return "p" + str(param_id)
def _EmbeddingParams(num_shards,
vocab_size,
dtype=dtypes.float32,
shape=None,
use_shapeless_placeholder=False):
p = []
params = {}
feed_dict = {}
if not shape:
shape = [10]
for i in range(num_shards):
shard_shape = [vocab_size // num_shards] + shape
if i < vocab_size % num_shards: # Excess goes evenly on the first shards
shard_shape[0] += 1
param_name = _PName(i)
if use_shapeless_placeholder:
param = array_ops.placeholder(dtype, shape=None, name=param_name)
else:
param = constant_op.constant(
1.0, shape=shard_shape, dtype=dtype, name=param_name)
p.append(param)
np_type = "f" if dtype == dtypes.float32 else "d"
val = (np.random.rand(*shard_shape).astype(np_type)) + 1
params[param_name + ":0"] = val
feed_dict[param.name] = val
return p, params, feed_dict
def _EmbeddingResult(params,
id_vals,
num_shards,
vocab_size,
partition_strategy="mod",
weight_vals=None):
if weight_vals is None:
weight_vals = np.copy(id_vals)
weight_vals.fill(1)
values = []
weights = []
weights_squared = []
for ids, wts in zip(id_vals, weight_vals):
value_aggregation = None
weight_aggregation = None
squared_weight_aggregation = None
if isinstance(ids, compat.integral_types):
ids = [ids]
wts = [wts]
for i, weight_value in zip(ids, wts):
if partition_strategy == "mod":
val = np.copy(params[_PName(i % num_shards) + ":0"][
i // num_shards, :]) * weight_value
elif partition_strategy == "div":
ids_per_partition, extras = divmod(vocab_size, num_shards)
threshold = extras * (ids_per_partition + 1)
if i < threshold:
partition = i // (ids_per_partition + 1)
offset = i % (ids_per_partition + 1)
else:
partition = extras + (i - threshold) // ids_per_partition
offset = (i - threshold) % ids_per_partition
val = np.copy(
params[_PName(partition) + ":0"][offset, :]) * weight_value
else:
assert False
if value_aggregation is None:
assert weight_aggregation is None
assert squared_weight_aggregation is None
value_aggregation = val
weight_aggregation = weight_value
squared_weight_aggregation = weight_value * weight_value
else:
assert weight_aggregation is not None
assert squared_weight_aggregation is not None
value_aggregation += val
weight_aggregation += weight_value
squared_weight_aggregation += weight_value * weight_value
values.append(value_aggregation)
weights.append(weight_aggregation)
weights_squared.append(squared_weight_aggregation)
values = np.array(values).astype(np.float32)
weights = np.array(weights).astype(np.float32)
weights_squared = np.array(weights_squared).astype(np.float32)
return values, weights, weights_squared
class EmbeddingLookupSparseWithDistributedAggregationTest(test.TestCase):
def _RandomIdsAndWeights(self, batch_size, vocab_size):
max_val_per_entry = 6
vals_per_batch_entry = np.random.randint(
1, max_val_per_entry, size=batch_size)
num_vals = np.sum(vals_per_batch_entry)
ids = np.random.randint(vocab_size, size=num_vals)
weights = 1 + np.random.rand(num_vals)
indices = []
for batch_entry, num_val in enumerate(vals_per_batch_entry):
for val_index in range(num_val):
indices.append([batch_entry, val_index])
shape = [batch_size, max_val_per_entry]
sp_ids = sparse_tensor_lib.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(ids, dtypes.int32),
constant_op.constant(shape, dtypes.int64))
sp_weights = sparse_tensor_lib.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(weights, dtypes.float32),
constant_op.constant(shape, dtypes.int64))
return sp_ids, sp_weights, ids, weights, vals_per_batch_entry
def _GroupByBatchEntry(self, vals, vals_per_batch_entry):
grouped_vals = []
index = 0
for num_val in vals_per_batch_entry:
grouped_vals.append(list(vals[index:(index + num_val)]))
index += num_val
return grouped_vals
def testEmbeddingLookupSparse(self):
vocab_size = 13
batch_size = 10
param_shape = [2, 5]
expected_lookup_result_shape = [None] + param_shape
sp_ids, sp_weights, ids, weights, vals_per_batch_entry = (
self._RandomIdsAndWeights(batch_size, vocab_size))
grouped_ids = self._GroupByBatchEntry(ids, vals_per_batch_entry)
grouped_weights = self._GroupByBatchEntry(weights, vals_per_batch_entry)
grouped_ignored_weights = self._GroupByBatchEntry(
np.ones(np.sum(vals_per_batch_entry)), vals_per_batch_entry)
for num_shards, combiner, dtype, ignore_weights in itertools.product(
[1, 5], ["sum", "mean", "sqrtn"], [dtypes.float32,
dtypes.float64], [True, False]):
with self.test_session():
p, params, feed_dict = _EmbeddingParams(
num_shards, vocab_size, shape=param_shape, dtype=dtype)
embedding_sum = \
embedding_ops.embedding_lookup_sparse_with_distributed_aggregation(
p,
sp_ids,
None if ignore_weights else sp_weights,
combiner=combiner)
self.assertEqual(embedding_sum.get_shape().as_list(),
expected_lookup_result_shape)
tf_embedding_sum = embedding_sum.eval(feed_dict=feed_dict)
np_embedding_sum, np_weight_sum, np_weight_sq_sum = _EmbeddingResult(
params,
grouped_ids,
num_shards,
vocab_size,
weight_vals=grouped_ignored_weights
if ignore_weights else grouped_weights)
if combiner == "mean":
np_embedding_sum /= np.reshape(np_weight_sum, (batch_size, 1, 1))
if combiner == "sqrtn":
np_embedding_sum /= np.reshape(
np.sqrt(np_weight_sq_sum), (batch_size, 1, 1))
self.assertAllClose(np_embedding_sum, tf_embedding_sum)
def testGradientsEmbeddingLookupSparse(self):
vocab_size = 12
batch_size = 4
param_shape = [2, 3]
sp_ids, sp_weights, _, _, _ = (self._RandomIdsAndWeights(
batch_size, vocab_size))
for num_shards, combiner, dtype, ignore_weights in itertools.product(
[1, 3], ["sum", "mean", "sqrtn"], [dtypes.float32,
dtypes.float64], [True, False]):
with self.test_session():
x, params, _ = _EmbeddingParams(
num_shards, vocab_size, shape=param_shape, dtype=dtype)
y = embedding_ops.embedding_lookup_sparse_with_distributed_aggregation(
x,
sp_ids,
None if ignore_weights else sp_weights,
combiner=combiner)
x_name = [_PName(i) for i in range(num_shards)]
x_init_value = [params[x_n + ":0"] for x_n in x_name]
x_shape = [i.shape for i in x_init_value]
y_shape = [batch_size] + list(params[_PName(0) + ":0"].shape[1:])
err = gradient_checker.compute_gradient_error(
x, x_shape, y, y_shape, x_init_value=x_init_value)
self.assertLess(err, 1e-5 if dtype == dtypes.float64 else 2e-3)
def testIncompatibleShapes(self):
with self.test_session():
x, _, _ = _EmbeddingParams(1, 10, dtype=dtypes.float32)
sp_ids = sparse_tensor_lib.SparseTensor(
constant_op.constant([[0, 0], [0, 1], [1, 0]], dtypes.int64),
constant_op.constant([0, 1, 2], dtypes.int32),
constant_op.constant([2, 2], dtypes.int64))
sp_weights = sparse_tensor_lib.SparseTensor(
constant_op.constant([[0, 0], [0, 1]], dtypes.int64),
constant_op.constant([12.0, 5.0], dtypes.float32),
constant_op.constant([1, 2], dtypes.int64))
with self.assertRaises(ValueError):
embedding_ops.embedding_lookup_sparse_with_distributed_aggregation(
x, sp_ids, sp_weights, combiner="mean")
if __name__ == "__main__":
test.main()
|
apache-2.0
|
AltSchool/django-allauth
|
allauth/socialaccount/providers/stripe/tests.py
|
7
|
1572
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from allauth.socialaccount.tests import OAuth2TestsMixin
from allauth.tests import MockedResponse, TestCase
from .provider import StripeProvider
class StripeTests(OAuth2TestsMixin, TestCase):
provider_id = StripeProvider.id
def get_mocked_response(self):
return MockedResponse(200, """{
"id": "acct_sometestid",
"object": "account",
"business_logo": null,
"business_name": null,
"business_url": "example.com",
"charges_enabled": true,
"country": "SE",
"currencies_supported": [
"usd",
"eur",
"sek"
],
"default_currency": "eur",
"details_submitted": true,
"display_name": "Test",
"email": "[email protected]",
"managed": false,
"metadata": {},
"statement_descriptor": "TEST.COM",
"support_phone": "+460123456789",
"timezone": "Europe/Stockholm",
"transfers_enabled": true
}""")
def get_login_response_json(self, with_refresh_token=True):
rt = ''
if with_refresh_token:
rt = ',"refresh_token": "testrf"'
return """{
"uid":"weibo",
"access_token":"testac",
"livemode": false,
"token_type": "bearer",
"stripe_publishable_key": "pk_test_someteskey",
"stripe_user_id": "acct_sometestid",
"scope": "read_write"
%s }""" % rt
|
mit
|
CXQERP/ODOOERP
|
addons/hr_payroll/report/report_payslip.py
|
377
|
1982
|
#-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.report import report_sxw
class payslip_report(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(payslip_report, self).__init__(cr, uid, name, context)
self.localcontext.update({
'get_payslip_lines': self.get_payslip_lines,
})
def get_payslip_lines(self, obj):
payslip_line = self.pool.get('hr.payslip.line')
res = []
ids = []
for id in range(len(obj)):
if obj[id].appears_on_payslip is True:
ids.append(obj[id].id)
if ids:
res = payslip_line.browse(self.cr, self.uid, ids)
return res
class wrapped_report_payslip(osv.AbstractModel):
_name = 'report.hr_payroll.report_payslip'
_inherit = 'report.abstract_report'
_template = 'hr_payroll.report_payslip'
_wrapped_report_class = payslip_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
potherca/phpDocumentor2
|
docs/.templates/responsive/conf.py
|
22
|
3320
|
import sys, os
project = u'phpDocumentor'
copyright = u'2013, Mike van Riel'
version = '2.1'
release = '2.1.0'
sys.path.append(os.path.abspath('../../.exts'))
extensions = ['sphinx.ext.intersphinx', 'sphinx.ext.ifconfig', 'plantuml']
templates_path = ['.']
source_suffix = '.rst'
master_doc = 'index'
exclude_patterns = ['.build']
pygments_style = 'sphinx'
# -- Options for HTML output ---------------------------------------------------
html_theme = 'default'
html_title = 'phpDocumentor'
#html_favicon = None
html_static_path = ['../../.static']
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'phpDocumentor'
# -- Options for LaTeX output --------------------------------------------------
latex_paper_size = 'a4'
#latex_font_size = '10pt'
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('for-users', 'phpDocumentor.tex', u'phpDocumentor', u'Mike van Riel', 'manual'),
('for-template-builders', 'phpDocumentor-for-template-builders.tex', u'phpDocumentor', u'Mike van Riel', 'manual'),
('for-developers', 'phpDocumentor-for-developers.tex', u'phpDocumentor', u'Mike van Riel', 'manual'),
('for-developers/serialization', 'phpDocumentor-serialization.tex', u'phpDocumentor', u'Mike van Riel', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'phpDocumentor', u'phpDocumentor', [u'Mike van Riel'], 1)
]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'phpDocumentor'
epub_author = u'Mike van Riel'
epub_publisher = u'Mike van Riel'
epub_copyright = u'2012, Mike van Riel'
epub_scheme = 'http://www.phpdoc.org'
epub_identifier = 'http://www.phpdoc.org'
# A unique identification for the text.
#epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
# UML diagramming tool
plantuml = ['java', '-jar', '.exts/plantuml.jar']
plantuml_latex_output_format = 'pdf'
|
mit
|
Time-Green/-tg-station-yog-rebase
|
tools/mapmerge/map_helpers.py
|
29
|
18534
|
import sys
try:
version = sys.version_info
if version.major < 3 or (version.major == 3 and version.minor < 5):
print("ERROR: You are running an incompatible version of Python. The current minimum version required is [3.5].\nYour version: {}".format(sys.version))
sys.exit()
except:
print("ERROR: Something went wrong, you might be running an incompatible version of Python. The current minimum version required is [3.5].\nYour version: {}".format(sys.version))
sys.exit()
import collections
error = {0:"OK", 1:"WARNING: Key lengths are different, all the lines change."}
maxx = 0
maxy = 0
key_length = 1
def reset_globals():
global key_length
global maxx
global maxy
key_length = 1
maxx = 0
maxy = 0
def merge_map(newfile, backupfile, tgm):
reset_globals()
shitmap = parse_map(newfile)
originalmap = parse_map(backupfile)
global key_length
if shitmap["key_length"] != originalmap["key_length"]:
if tgm:
write_dictionary_tgm(newfile, shitmap["dictionary"])
write_grid_coord_small(newfile, shitmap["grid"])
return 1
else:
key_length = originalmap["key_length"]
shitDict = shitmap["dictionary"] #key to tile data dictionary
shitGrid = shitmap["grid"] #x,y coords to tiles (keys) dictionary (the map's layout)
originalDict = sort_dictionary(originalmap["dictionary"])
originalGrid = originalmap["grid"]
mergeGrid = dict() #final map layout
known_keys = dict() #mapping known keys to original keys
tempGrid = dict() #saving tiles with newly generated keys for later processing
temp_keys = dict() #mapping known keys to newly generated keys
unused_keys = list(originalDict.keys()) #list with all existing keys that aren't being used
tempDict = collections.OrderedDict() #mapping new keys to new data
originalDict_size = len(originalDict)
for y in range(1,maxy+1):
for x in range(1,maxx+1):
shitKey = shitGrid[x,y]
#if this key was seen before, add it to the pile immediately
if shitKey in known_keys:
mergeGrid[x,y] = known_keys[shitKey]
continue
#if this key was seen before, add it to the pile immediately
if shitKey in temp_keys:
tempGrid[x,y] = temp_keys[shitKey]
continue
shitData = shitDict[shitKey]
originalKey = originalGrid[x,y]
originalData = originalDict[originalKey]
#if new tile data at x,y is the same as original tile data at x,y, add to the pile
if shitData == originalData:
mergeGrid[x,y] = originalKey
known_keys[shitKey] = originalKey
unused_keys.remove(originalKey)
else:
#search for the new tile data in the original dictionary, if a key is found add it to the pile, else generate a new key
newKey = search_key(originalDict, shitData)
if newKey != None:
try:
unused_keys.remove(newKey)
except ValueError: #caused by a duplicate entry
print("NOTICE: Correcting duplicate dictionary entry. ({})".format(shitKey))
mergeGrid[x,y] = newKey
known_keys[shitKey] = newKey
#if data at original x,y no longer exists we reuse the key immediately
elif search_key(shitDict, originalData) == None:
mergeGrid[x,y] = originalKey
originalDict[originalKey] = shitData
unused_keys.remove(originalKey)
known_keys[shitKey] = originalKey
else:
if len(tempDict) == 0:
newKey = generate_new_key(originalDict)
else:
newKey = generate_new_key(tempDict)
tempGrid[x,y] = newKey
temp_keys[shitKey] = newKey
tempDict[newKey] = shitData
sort = 0
#find gaps in the dictionary keys sequence and add the missing keys to be recycled
dict_list = list(originalDict.keys())
for index in range(0, len(dict_list)):
if index + 1 == len(dict_list):
break
key = dict_list[index]
next_key = dict_list[index+1]
difference = key_difference(key, next_key)
if difference > 1:
i = 1
nextnew = key
while i < difference:
nextnew = get_next_key(nextnew)
unused_keys.append(nextnew)
i += 1
sort = 1
#Recycle outdated keys with any new tile data, starting from the bottom of the dictionary
i = 0
for key, value in reversed(tempDict.items()):
recycled_key = key
if len(unused_keys) > 0:
recycled_key = unused_keys.pop()
for coord, gridkey in tempGrid.items():
if gridkey == None:
continue
if gridkey == key:
mergeGrid[coord] = recycled_key
tempGrid[coord] = None
originalDict[recycled_key] = value
#if gaps in the key sequence were found, sort the dictionary for cleanliness
if sort == 1:
originalDict = sort_dictionary(originalDict)
if tgm:
write_dictionary_tgm(newfile, originalDict)
write_grid_coord_small(newfile, mergeGrid)
else:
write_dictionary(newfile, originalDict)
write_grid(newfile, mergeGrid)
return 0
#write dictionary in tgm format
def write_dictionary_tgm(filename, dictionary):
with open(filename, "w") as output:
output.write("//MAP CONVERTED BY dmm2tgm.py THIS HEADER COMMENT PREVENTS RECONVERSION, DO NOT REMOVE \n")
for key, list_ in dictionary.items():
output.write("\"{}\" = (\n".format(key))
for thing in list_:
buffer = ""
in_quote_block = False
in_varedit_block = False
for char in thing:
if in_quote_block:
if char == "\"":
in_quote_block = False
buffer = buffer + char
continue
elif char == "\"":
in_quote_block = True
buffer = buffer + char
continue
if not in_varedit_block:
if char == "{":
in_varedit_block = True
buffer = buffer + "{\n\t"
continue
else:
if char == ";":
buffer = buffer + ";\n\t"
continue
elif char == "}":
buffer = buffer + "\n\t}"
in_varedit_block = False
continue
buffer = buffer + char
if list_.index(thing) != len(list_) - 1:
buffer = buffer + ",\n"
output.write(buffer)
output.write(")\n")
#thanks to YotaXP for finding out about this one
def write_grid_coord_small(filename, grid):
with open(filename, "a") as output:
output.write("\n")
for x in range(1, maxx+1):
output.write("({},{},1) = {{\"\n".format(x, 1, 1))
for y in range(1, maxy):
output.write("{}\n".format(grid[x,y]))
output.write("{}\n\"}}\n".format(grid[x,maxy]))
def search_key(dictionary, data):
for key, value in dictionary.items():
if value == data:
return key
return None
def generate_new_key(dictionary):
last_key = next(reversed(dictionary))
return get_next_key(last_key)
def get_next_key(key):
if key == "":
return "".join("a" for _ in range(key_length))
length = len(key)
new_key = ""
carry = 1
for char in key[::-1]:
if carry <= 0:
new_key = new_key + char
continue
if char == 'Z':
new_key = new_key + 'a'
carry += 1
length -= 1
if length <= 0:
return "OVERFLOW"
elif char == 'z':
new_key = new_key + 'A'
else:
new_key = new_key + chr(ord(char) + 1)
if carry > 0:
carry -= 1
return new_key[::-1]
def sort_dictionary(dictionary):
sorted_dict = collections.OrderedDict()
next_key = get_next_key("")
while len(sorted_dict) < len(dictionary):
try:
sorted_dict[next_key] = dictionary[next_key]
except KeyError:
pass
next_key = get_next_key(next_key)
return sorted_dict
#still does not support more than one z level per file, but should parse any format
def parse_map(map_file):
with open(map_file, "r") as map_input:
characters = map_input.read()
in_quote_block = False
in_key_block = False
in_data_block = False
in_varedit_block = False
after_data_block = False
escaping = False
skip_whitespace = False
dictionary = collections.OrderedDict()
curr_key = ""
curr_datum = ""
curr_data = list()
in_map_block = False
in_coord_block = False
in_map_string = False
iter_x = 0
adjust_y = True
curr_num = ""
reading_coord = "x"
global maxx
global maxy
key_length_local = 0
curr_x = 0
curr_y = 0
curr_z = 1
grid = dict()
for char in characters:
if not in_map_block:
if char == "\n" or char == "\t":
continue
if in_data_block:
if in_varedit_block:
if in_quote_block:
if char == "\\":
curr_datum = curr_datum + char
escaping = True
continue
if escaping:
curr_datum = curr_datum + char
escaping = False
continue
if char == "\"":
curr_datum = curr_datum + char
in_quote_block = False
continue
curr_datum = curr_datum + char
continue
if skip_whitespace and char == " ":
skip_whitespace = False
continue
skip_whitespace = False
if char == "\"":
curr_datum = curr_datum + char
in_quote_block = True
continue
if char == ";":
skip_whitespace = True
curr_datum = curr_datum + char
continue
if char == "}":
curr_datum = curr_datum + char
in_varedit_block = False
continue
curr_datum = curr_datum + char
continue
if char == "{":
curr_datum = curr_datum + char
in_varedit_block = True
continue
if char == ",":
curr_data.append(curr_datum)
curr_datum = ""
continue
if char == ")":
curr_data.append(curr_datum)
dictionary[curr_key] = tuple(curr_data)
curr_data = list()
curr_datum = ""
curr_key = ""
in_data_block = False
after_data_block = True
continue
curr_datum = curr_datum + char
continue
if in_key_block:
if char == "\"":
in_key_block = False
key_length_local = len(curr_key)
else:
curr_key = curr_key + char
continue
#else we're looking for a key block, a data block or the map block
if char == "\"":
in_key_block = True
after_data_block = False
continue
if char == "(":
if after_data_block:
in_map_block = True
in_coord_block = True
after_data_block = False
curr_key = ""
continue
else:
in_data_block = True
after_data_block = False
continue
else:
if in_coord_block:
if char == ",":
if reading_coord == "x":
curr_x = string_to_num(curr_num)
if curr_x > maxx:
maxx = curr_x
iter_x = 0
curr_num = ""
reading_coord = "y"
elif reading_coord == "y":
curr_y = string_to_num(curr_num)
if curr_y > maxy:
maxy = curr_y
curr_num = ""
reading_coord = "z"
else:
pass
continue
if char == ")":
in_coord_block = False
reading_coord = "x"
curr_num = ""
#read z here if needed
continue
curr_num = curr_num + char
continue
if in_map_string:
if char == "\"":
in_map_string = False
adjust_y = True
curr_y -= 1
continue
if char == "\n":
if adjust_y:
adjust_y = False
else:
curr_y += 1
if curr_x > maxx:
maxx = curr_x
if iter_x > 1:
curr_x = 1
iter_x = 0
continue
curr_key = curr_key + char
if len(curr_key) == key_length_local:
iter_x += 1
if iter_x > 1:
curr_x += 1
grid[curr_x, curr_y] = curr_key
curr_key = ""
continue
#else look for coordinate block or a map string
if char == "(":
in_coord_block = True
continue
if char == "\"":
in_map_string = True
continue
if curr_y > maxy:
maxy = curr_y
data = dict()
data["dictionary"] = dictionary
data["grid"] = grid
data["key_length"] = key_length_local
return data
#subtract keyB from keyA
def key_difference(keyA, keyB):
if len(keyA) != len(keyB):
return "you fucked up"
Ayek = keyA[::-1]
Byek = keyB[::-1]
result = 0
for i in range(0, len(keyA)):
base = 52**i
A = 26 if Ayek[i].isupper() else 0
B = 26 if Byek[i].isupper() else 0
result += ( (ord(Byek[i].lower()) + B) - (ord(Ayek[i].lower()) + A) ) * base
return result
def string_to_num(s):
try:
return int(s)
except ValueError:
return -1
#writes a tile data dictionary the same way Dreammaker does
def write_dictionary(filename, dictionary):
with open(filename, "w") as output:
for key, value in dictionary.items():
output.write("\"{}\" = ({})\n".format(key, ",".join(value)))
#writes a map grid the same way Dreammaker does
def write_grid(filename, grid):
with open(filename, "a") as output:
output.write("\n")
output.write("(1,1,1) = {\"\n")
for y in range(1, maxy+1):
for x in range(1, maxx+1):
try:
output.write(grid[x,y])
except KeyError:
print("Key error: ({},{})".format(x,y))
output.write("\n")
output.write("\"}")
output.write("\n")
#inflated map grid; unused
def write_grid_coord(filename, grid):
with open(filename, "a") as output:
output.write("\n")
for y in range(1, maxy+1):
for x in range(1, maxx+1):
output.write("({},{},1) = {{\"{}\"}}\n".format(x, y, grid[x,y]))
def key_compare(keyA, keyB): #thanks byond for not respecting ascii
pos = 0
for a in keyA:
pos += 1
count = pos
for b in keyB:
if(count > 1):
count -= 1
continue
if a.islower() and b.islower():
if(a < b):
return -1
if(a > b):
return 1
break
if a.islower() and b.isupper():
return -1
if a.isupper() and b.islower():
return 1
if a.isupper() and b.isupper():
if(a < b):
return -1
if(a > b):
return 1
break
return 0
|
agpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.