repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
ColOfAbRiX/ansible | lib/ansible/plugins/filter/json_query.py | 93 | 1583 | # (c) 2015, Filipe Niero Felisbino <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.utils.listify import listify_lookup_plugin_terms
try:
import jmespath
HAS_LIB = True
except ImportError:
HAS_LIB = False
def json_query(data, expr):
'''Query data using jmespath query language ( http://jmespath.org ). Example:
- debug: msg="{{ instance | json_query(tagged_instances[*].block_device_mapping.*.volume_id') }}"
'''
if not HAS_LIB:
raise AnsibleError('You need to install "jmespath" prior to running '
'json_query filter')
return jmespath.search(expr, data)
class FilterModule(object):
''' Query filter '''
def filters(self):
return {
'json_query': json_query
}
| gpl-3.0 |
IPMITMO/statan | coala-bears/bears/c_languages/codeclone_detection/ClangFunctionDifferenceBear.py | 3 | 7854 | import functools
from itertools import combinations
from bears.c_languages.ClangBear import clang_available, ClangBear
from bears.c_languages.codeclone_detection.ClangCountingConditions import (
condition_dict)
from bears.c_languages.codeclone_detection.ClangCountVectorCreator import (
ClangCountVectorCreator)
from bears.c_languages.codeclone_detection.CloneDetectionRoutines import (
compare_functions, get_count_matrices)
from coala_utils.string_processing.StringConverter import StringConverter
from coalib.bears.GlobalBear import GlobalBear
from dependency_management.requirements.PipRequirement import PipRequirement
from coalib.collecting.Collectors import collect_dirs
from coalib.results.HiddenResult import HiddenResult
from coalib.settings.Setting import path_list, typed_ordered_dict
# counting_condition_dict is a function object generated by typed_dict. This
# function takes a setting and creates a dictionary out of it while it
# converts all keys to counting condition function objects (via the
# condition_dict) and all values to floats while unset values default to 1.
counting_condition_dict = typed_ordered_dict(
lambda setting: condition_dict[str(setting).lower()],
float,
1)
default_cc_dict = counting_condition_dict(StringConverter(
"""
used: 0,
returned: 1.4,
is_condition: 0,
in_condition: 1.4,
in_second_level_condition: 1.4,
in_third_level_condition: 1.0,
is_assignee: 0,
is_assigner: 0.6,
loop_content: 0,
second_level_loop_content,
third_level_loop_content,
is_param: 2,
is_called: 1.4,
is_call_param: 0.0,
in_sum: 2.0,
in_product: 0,
in_binary_operation,
member_accessed"""))
def get_difference(function_pair,
count_matrices,
average_calculation,
poly_postprocessing,
exp_postprocessing):
"""
Retrieves the difference between two functions using the munkres algorithm.
:param function_pair: A tuple containing both indices for the
count_matrices dictionary.
:param count_matrices: A dictionary holding CMs.
:param average_calculation: If set to true the difference calculation
function will take the average of all variable
differences as the difference, else it will
normalize the function as a whole and thus
weighting in variables dependent on their size.
:param poly_postprocessing: If set to true, the difference value of big
function pairs will be reduced using a
polynomial approach.
:param exp_postprocessing: If set to true, the difference value of big
function pairs will be reduced using an
exponential approach.
:return: A tuple containing both function ids and their
difference.
"""
function_1, function_2 = function_pair
return (function_1,
function_2,
compare_functions(count_matrices[function_1],
count_matrices[function_2],
average_calculation,
poly_postprocessing,
exp_postprocessing))
class ClangFunctionDifferenceBear(GlobalBear):
check_prerequisites = classmethod(clang_available)
LANGUAGES = ClangBear.LANGUAGES
REQUIREMENTS = ClangBear.REQUIREMENTS | {PipRequirement('munkres3', '1.0')}
def run(self,
counting_conditions: counting_condition_dict=default_cc_dict,
average_calculation: bool=False,
poly_postprocessing: bool=True,
exp_postprocessing: bool=False,
extra_include_paths: path_list=()):
"""
Retrieves similarities for code clone detection. Those can be reused in
another bear to produce results.
Postprocessing may be done because small functions are less likely to
be clones at the same difference value than big functions which may
provide a better refactoring opportunity for the user.
:param counting_conditions: A comma seperated list of counting
conditions. Possible values are: used,
returned, is_condition, in_condition,
in_second_level_condition,
in_third_level_condition, is_assignee,
is_assigner, loop_content,
second_level_loop_content,
third_level_loop_content, is_param,
in_sum, in_product, in_binary_operation,
member_accessed.
Weightings can be assigned to each
condition due to providing a dict
value, i.e. having used weighted in
half as much as other conditions would
simply be: "used: 0.5, is_assignee".
Weightings default to 1 if unset.
:param average_calculation: If set to true the difference calculation
function will take the average of all
variable differences as the difference,
else it will normalize the function as a
whole and thus weighting in variables
dependent on their size.
:param poly_postprocessing: If set to true, the difference value of big
function pairs will be reduced using a
polynomial approach.
:param extra_include_paths: A list containing additional include paths.
:param exp_postprocessing: If set to true, the difference value of big
function pairs will be reduced using an
exponential approach.
"""
self.debug('Using the following counting conditions:')
for key, val in counting_conditions.items():
self.debug(' *', key.__name__, '(weighting: {})'.format(val))
self.debug('Creating count matrices...')
count_matrices = get_count_matrices(
ClangCountVectorCreator(list(counting_conditions.keys()),
list(counting_conditions.values())),
list(self.file_dict.keys()),
lambda prog: self.debug('{:2.4f}%...'.format(prog)),
self.section['files'].origin,
collect_dirs(extra_include_paths))
self.debug('Calculating differences...')
differences = []
function_count = len(count_matrices)
# Thats n over 2, hardcoded to simplify calculation
combination_length = function_count * (function_count-1) / 2
partial_get_difference = functools.partial(
get_difference,
count_matrices=count_matrices,
average_calculation=average_calculation,
poly_postprocessing=poly_postprocessing,
exp_postprocessing=exp_postprocessing)
for i, elem in enumerate(
map(partial_get_difference,
[(f1, f2) for f1, f2 in combinations(count_matrices, 2)])):
if i % 50 == 0:
self.debug('{:2.4f}%...'.format(100*i/combination_length))
differences.append(elem)
yield HiddenResult(self, differences)
yield HiddenResult(self, count_matrices)
| mit |
chiluf/visvis.dev | backends/backend_fltk.py | 5 | 9139 | # -*- coding: utf-8 -*-
# Copyright (C) 2012, Almar Klein
#
# Visvis is distributed under the terms of the (new) BSD License.
# The full license can be found in 'license.txt'.
""" The FLTK backend.
"""
import visvis
from visvis import BaseFigure, events, constants
import fltk
KEYMAP = { fltk.FL_SHIFT: constants.KEY_SHIFT,
fltk.FL_Shift_L: constants.KEY_SHIFT,
fltk.FL_Shift_R: constants.KEY_SHIFT,
fltk.FL_ALT : constants.KEY_ALT,
fltk.FL_Alt_L : constants.KEY_ALT,
fltk.FL_Alt_R : constants.KEY_ALT,
fltk.FL_Control_L: constants.KEY_CONTROL,
fltk.FL_Control_R: constants.KEY_CONTROL,
fltk.FL_Left: constants.KEY_LEFT,
fltk.FL_Up: constants.KEY_UP,
fltk.FL_Right: constants.KEY_RIGHT,
fltk.FL_Down: constants.KEY_DOWN,
fltk.FL_Page_Up: constants.KEY_PAGEUP,
fltk.FL_Page_Down: constants.KEY_PAGEDOWN,
fltk.FL_Delete: constants.KEY_DELETE
}
# Make uppercase letters be lowercase
for i in range(ord('A'), ord('Z')):
KEYMAP[i] = i+32
def modifiers():
"""Convert the fltk modifier state into a tuple of active modifier keys."""
mod = ()
fltkmod = fltk.Fl.event_state()
if fltk.FL_SHIFT & fltkmod:
mod += constants.KEY_SHIFT,
if fltk.FL_CTRL & fltkmod:
mod += constants.KEY_CONTROL,
if fltk.FL_ALT & fltkmod:
mod += constants.KEY_ALT,
return mod
class GLWidget(fltk.Fl_Gl_Window):
""" Implementation of the GL_window, which passes a number of
events to the Figure object that wraps it.
"""
def __init__(self, figure, *args, **kwargs):
fltk.Fl_Gl_Window.__init__(self, *args, **kwargs)
self.figure = figure
# Callback when closed
self.callback(self.OnClose)
def handle(self, event):
""" All events come in here. """
if not self.figure:
return 1
# map fltk buttons to visvis buttons
buttons = [0,1,0,2,0,0,0]
if event == fltk.FL_PUSH:
x, y = fltk.Fl.event_x(), fltk.Fl.event_y()
but = buttons[fltk.Fl.event_button()]
self.figure._GenerateMouseEvent('down', x, y, but, modifiers())
elif event == fltk.FL_RELEASE:
x, y = fltk.Fl.event_x(), fltk.Fl.event_y()
but = buttons[fltk.Fl.event_button()]
if fltk.Fl.event_clicks() == 1:
# double click
self.figure._GenerateMouseEvent('double', x, y, but, modifiers())
else:
# normal release
self.figure._GenerateMouseEvent('up', x, y, but, modifiers())
elif event in [fltk.FL_MOVE, fltk.FL_DRAG]:
w,h = self.w(), self.h()
self.OnMotion(None)
elif event == fltk.FL_ENTER:
ev = self.figure.eventEnter
ev.Set(0,0,0)
ev.Fire()
elif event == fltk.FL_LEAVE:
ev = self.figure.eventLeave
ev.Set(0,0,0)
ev.Fire()
elif event == fltk.FL_KEYDOWN:
self.OnKeyDown(None)
elif event == fltk.FL_KEYUP:
self.OnKeyUp(None)
elif event == fltk.FL_CLOSE:
self.OnClose(None)
elif event == fltk.FL_FOCUS:
self.OnFocus(None)
else:
return 1 # maybe someone else knows what to do with it
return 1 # event was handled.
def resize(self, x, y, w, h):
# Overload resize function to also draw after resizing
if self.figure:
fltk.Fl_Gl_Window.resize(self, x, y, w, h)
self.figure._OnResize()
def draw(self):
# Do the draw commands now
self.figure.OnDraw()
def OnMotion(self, event):
# prepare and fire event
x, y = fltk.Fl.event_x(), fltk.Fl.event_y()
self.figure._GenerateMouseEvent('motion', x, y, 0, modifiers())
def OnKeyDown(self, event):
key, text = self._ProcessKey()
ev = self.figure.eventKeyDown
ev.Set(key, text, modifiers())
ev.Fire()
def OnKeyUp(self, event):
key, text = self._ProcessKey()
ev = self.figure.eventKeyUp
ev.Set(key, text, modifiers())
ev.Fire()
def _ProcessKey(self):
""" evaluates the keycode of fltk, and transform to visvis key.
Also produce text version.
return key, text. """
key = fltk.Fl.event_key()
if isinstance(key,basestring):
key = ord(key)
# special cases for shift control and alt -> map to 17 18 19
if key in KEYMAP:
return KEYMAP[key], ''
else:
# other key, try producing text
#print key, self._shiftDown
if (97 <= key <= 122) and fltk.Fl.event_shift():
key -= 32
try:
return key, chr(key)
except ValueError:
return key, ''
def OnClose(self, event=None):
if self.figure:
self.figure.Destroy()
self.hide()
def OnFocus(self, event):
BaseFigure._currentNr = self.figure.nr
class Figure(BaseFigure):
""" This is the fltk implementation of the figure class.
A Figure represents the OpenGl context and is the root
of the visualization tree; a Figure Wibject does not have a parent.
A Figure can be created with the function vv.figure() or vv.gcf().
"""
def __init__(self, *args, **kwargs):
# Make sure there is a native app and the timer is started
# (also when embedded)
app.Create()
# create widget
self._widget = GLWidget(self, *args, **kwargs)
# call original init AFTER we created the widget
BaseFigure.__init__(self)
def _SetCurrent(self):
""" make this scene the current context """
if self._widget:
self._widget.make_current()
def _SwapBuffers(self):
""" Swap the memory and screen buffer such that
what we rendered appears on the screen """
self._widget.swap_buffers()
def _SetTitle(self, title):
""" Set the title of the figure... """
window = self._widget
if hasattr(window,'label'):
window.label(title)
def _SetPosition(self, x, y, w, h):
""" Set the position of the widget. """
# select widget to resize. If it
widget = self._widget
# apply
widget.position(x,y)
widget.size(w, h)
def _GetPosition(self):
""" Get the position of the widget. """
# select widget to resize. If it
widget = self._widget
# get and return
return widget.x(), widget.y(), widget.w(), widget.h()
def _RedrawGui(self):
if self._widget:
self._widget.redraw()
def _ProcessGuiEvents(self):
fltk.Fl.wait(0)
def _Close(self, widget=None):
if widget is None:
widget = self._widget
if widget:
widget.OnClose()
def newFigure():
""" Create a window with a figure widget.
"""
# Create figure
size = visvis.settings.figureSize
figure = Figure(size[0], size[1], "Figure")
figure._widget.size_range(100,100,0,0,0,0)
figure._widget.show() # Show AFTER canvas is added
# Make OpenGl Initialize and return
# Also call draw(), otherwise it will not really draw and crash on Linux
figure.DrawNow()
figure._widget.draw()
return figure
class VisvisEventsTimer:
""" Timer that can be started and stopped.
"""
def __init__(self):
self._running = False
def Start(self):
if not self._running:
self._running = True
self._PostOneShot()
def Stop(self):
self._running = False
def _PostOneShot(self):
fltk.Fl.add_timeout(0.01, self._Fire)
def _Fire(self):
if self._running:
events.processVisvisEvents()
self._PostOneShot() # Repost
class App(events.App):
""" App()
Application class to wrap the GUI applications in a class
with a simple interface that is the same for all backends.
This is the fltk implementation.
"""
def __init__(self):
# Init timer
self._timer = VisvisEventsTimer()
def _GetNativeApp(self):
self._timer.Start()
return fltk.Fl
def _ProcessEvents(self):
app = self._GetNativeApp()
app.wait(0)
def _Run(self):
app = self._GetNativeApp()
if hasattr(app, '_in_event_loop') and app._in_event_loop:
pass # Already in event loop
else:
app.run()
# Create application instance now
app = App()
| bsd-3-clause |
havatv/QGIS | python/plugins/processing/algs/qgis/RasterLayerHistogram.py | 15 | 4036 | # -*- coding: utf-8 -*-
"""
***************************************************************************
RasterLayerHistogram.py
---------------------
Date : January 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'January 2013'
__copyright__ = '(C) 2013, Victor Olaya'
import warnings
from qgis.core import (QgsProcessingParameterRasterLayer,
QgsProcessingParameterBand,
QgsProcessingParameterNumber,
QgsProcessingParameterFileDestination,
QgsProcessingException)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
from processing.tools import raster
from qgis.PyQt.QtCore import QCoreApplication
class RasterLayerHistogram(QgisAlgorithm):
INPUT = 'INPUT'
BINS = 'BINS'
OUTPUT = 'OUTPUT'
BAND = 'BAND'
def group(self):
return self.tr('Plots')
def groupId(self):
return 'plots'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterRasterLayer(self.INPUT,
self.tr('Input layer')))
self.addParameter(QgsProcessingParameterBand(self.BAND,
self.tr('Band number'),
1,
self.INPUT))
self.addParameter(QgsProcessingParameterNumber(self.BINS,
self.tr('number of bins'), minValue=2, defaultValue=10))
self.addParameter(QgsProcessingParameterFileDestination(self.OUTPUT, self.tr('Histogram'), self.tr('HTML files (*.html)')))
def name(self):
return 'rasterlayerhistogram'
def displayName(self):
return self.tr('Raster layer histogram')
def processAlgorithm(self, parameters, context, feedback):
try:
# importing plotly throws Python warnings from within the library - filter these out
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=ResourceWarning)
warnings.filterwarnings("ignore", category=ImportWarning)
import plotly as plt
import plotly.graph_objs as go
except ImportError:
raise QgsProcessingException(QCoreApplication.translate('RasterLayerHistogram', 'This algorithm requires the Python “plotly” library. Please install this library and try again.'))
layer = self.parameterAsRasterLayer(parameters, self.INPUT, context)
band = self.parameterAsInt(parameters, self.BAND, context)
nbins = self.parameterAsInt(parameters, self.BINS, context)
output = self.parameterAsFileOutput(parameters, self.OUTPUT, context)
# ALERT: this is potentially blocking if the layer is too big
values = raster.scanraster(layer, feedback, band)
valueslist = []
for v in values:
if v is not None:
valueslist.append(v)
data = [go.Histogram(x=valueslist,
nbinsx=nbins)]
plt.offline.plot(data, filename=output, auto_open=False)
return {self.OUTPUT: output}
| gpl-2.0 |
40223202/test | static/Brython3.1.1-20150328-091302/Lib/site-packages/pygame/event.py | 603 | 19086 | #!/usr/bin/env python
'''Pygame module for interacting with events and queues.
Pygame handles all it's event messaging through an event queue. The routines
in this module help you manage that event queue. The input queue is heavily
dependent on the pygame display module. If the display has not been
initialized and a video mode not set, the event queue will not really work.
The queue is a regular queue of Event objects, there are a variety of ways
to access the events it contains. From simply checking for the existance of
events, to grabbing them directly off the stack.
All events have a type identifier. This event type is in between the values
of NOEVENT and NUMEVENTS. All user defined events can have the value of
USEREVENT or higher. It is recommended make sure your event id's follow this
system.
To get the state of various input devices, you can forego the event queue
and access the input devices directly with their appropriate modules; mouse,
key, and joystick. If you use this method, remember that pygame requires some
form of communication with the system window manager and other parts of the
platform. To keep pygame in synch with the system, you will need to call
pygame.event.pump() to keep everything current. You'll want to call this
function usually once per game loop.
The event queue offers some simple filtering. This can help performance
slightly by blocking certain event types from the queue, use the
pygame.event.set_allowed() and pygame.event.set_blocked() to work with
this filtering. All events default to allowed.
Joysticks will not send any events until the device has been initialized.
An Event object contains an event type and a readonly set of member data.
The Event object contains no method functions, just member data. Event
objects are retrieved from the pygame event queue. You can create your
own new events with the pygame.event.Event() function.
Your program must take steps to keep the event queue from overflowing. If the
program is not clearing or getting all events off the queue at regular
intervals, it can overflow. When the queue overflows an exception is thrown.
All Event objects contain an event type identifier in the Event.type member.
You may also get full access to the Event's member data through the Event.dict
method. All other member lookups will be passed through to the Event's
dictionary values.
While debugging and experimenting, you can print the Event objects for a
quick display of its type and members. Events that come from the system
will have a guaranteed set of member items based on the type. Here is a
list of the Event members that are defined with each type.
QUIT
(none)
ACTIVEEVENT
gain, state
KEYDOWN
unicode, key, mod
KEYUP
key, mod
MOUSEMOTION
pos, rel, buttons
MOUSEBUTTONUP
pos, button
MOUSEBUTTONDOWN
pos, button
JOYAXISMOTION
joy, axis, value
JOYBALLMOTION
joy, ball, rel
JOYHATMOTION
joy, hat, value
JOYBUTTONUP
joy, button
JOYBUTTONDOWN
joy, button
VIDEORESIZE
size, w, h
VIDEOEXPOSE
(none)
USEREVENT
code
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
from copy import copy
#from ctypes import * #brython
from SDL import *
import pygame.base
import pygame.locals
import pygame.display
def pump():
'''Internally process pygame event handlers.
For each frame of your game, you will need to make some sort of call to
the event queue. This ensures your program can internally interact with
the rest of the operating system. If you are not using other event
functions in your game, you should call pygame.event.pump() to allow
pygame to handle internal actions.
This function is not necessary if your program is consistently processing
events on the queue through the other pygame.event functions.
There are important things that must be dealt with internally in the event
queue. The main window may need to be repainted or respond to the system.
If you fail to make a call to the event queue for too long, the system may
decide your program has locked up.
'''
pygame.display._video_init_check()
SDL_PumpEvents()
def get(typelist=None):
'''Get events from the queue.
pygame.event.get(): return Eventlist
pygame.event.get(type): return Eventlist
pygame.event.get(typelist): return Eventlist
This will get all the messages and remove them from the queue. If a type
or sequence of types is given only those messages will be removed from the
queue.
If you are only taking specific events from the queue, be aware that the
queue could eventually fill up with the events you are not interested.
:Parameters:
`typelist` : int or sequence of int
Event type or list of event types that can be returned.
:rtype: list of `Event`
'''
pygame.display._video_init_check()
if typelist is None:
mask = SDL_ALLEVENTS
else:
if hasattr(typelist, '__len__'):
mask = reduce(lambda a,b: a | SDL_EVENTMASK(b), typelist, 0)
else:
mask = int(typelist)
SDL_PumpEvents()
events = []
new_events = SDL_PeepEvents(1, SDL_GETEVENT, mask)
while new_events:
events.append(Event(0, sdl_event=new_events[0]))
new_events = SDL_PeepEvents(1, SDL_GETEVENT, mask)
return events
def poll():
'''Get a single event from the queue.
Returns a single event from the queue. If the event queue is empty an event
of type pygame.NOEVENT will be returned immediately. The returned event is
removed from the queue.
:rtype: Event
'''
pygame.display._video_init_check()
event = SDL_PollEventAndReturn()
if event:
return Event(0, sdl_event=event, keep_userdata=True)
else:
return Event(pygame.locals.NOEVENT)
def wait():
'''Wait for a single event from the queue.
Returns a single event from the queue. If the queue is empty this function
will wait until one is created. While the program is waiting it will sleep
in an idle state. This is important for programs that want to share the
system with other applications.
:rtype: Event
'''
pygame.display._video_init_check()
return Event(0, sdl_event=SDL_WaitEventAndReturn())
def peek(typelist=None):
'''Test if event types are waiting on the queue.
Returns true if there are any events of the given type waiting on the
queue. If a sequence of event types is passed, this will return True if
any of those events are on the queue.
:Parameters:
`typelist` : int or sequence of int
Event type or list of event types to look for.
:rtype: bool
'''
pygame.display._video_init_check()
if typelist is None:
mask = SDL_ALLEVENTS
else:
if hasattr(typelist, '__len__'):
mask = reduce(lambda a,b: a | SDL_EVENTMASK(b), typelist, 0)
else:
mask = SDL_EVENTMASK(int(typelist))
SDL_PumpEvents()
events = SDL_PeepEvents(1, SDL_PEEKEVENT, mask)
if typelist is None:
if events:
return Event(0, sdl_event=events[0], keep_userdata=True)
else:
return Event(pygame.locals.NOEVENT) # XXX deviation from pygame
return len(events) > 0
def clear(typelist=None):
'''Remove all events from the queue.
Remove all events or events of a specific type from the queue. This has the
same effect as `get` except nothing is returned. This can be slightly more
effecient when clearing a full event queue.
:Parameters:
`typelist` : int or sequence of int
Event type or list of event types to remove.
'''
pygame.display._video_init_check()
if typelist is None:
mask = SDL_ALLEVENTS
else:
if hasattr(typelist, '__len__'):
mask = reduce(lambda a,b: a | SDL_EVENTMASK(b), typelist, 0)
else:
mask = int(typelist)
SDL_PumpEvents()
events = []
new_events = SDL_PeepEvents(1, SDL_GETEVENT, mask)
while new_events:
new_events = SDL_PeepEvents(1, SDL_GETEVENT, mask)
_event_names = {
SDL_ACTIVEEVENT: 'ActiveEvent',
SDL_KEYDOWN: 'KeyDown',
SDL_KEYUP: 'KeyUp',
SDL_MOUSEMOTION: 'MouseMotion',
SDL_MOUSEBUTTONDOWN:'MouseButtonDown',
SDL_MOUSEBUTTONUP: 'MouseButtonUp',
SDL_JOYAXISMOTION: 'JoyAxisMotion',
SDL_JOYBALLMOTION: 'JoyBallMotion',
SDL_JOYHATMOTION: 'JoyHatMotion',
SDL_JOYBUTTONUP: 'JoyButtonUp',
SDL_JOYBUTTONDOWN: 'JoyButtonDown',
SDL_QUIT: 'Quit',
SDL_SYSWMEVENT: 'SysWMEvent',
SDL_VIDEORESIZE: 'VideoResize',
SDL_VIDEOEXPOSE: 'VideoExpose',
SDL_NOEVENT: 'NoEvent'
}
def event_name(event_type):
'''Get the string name from an event id.
Pygame uses integer ids to represent the event types. If you want to
report these types to the user they should be converted to strings. This
will return a the simple name for an event type. The string is in the
CamelCase style.
:Parameters:
- `event_type`: int
:rtype: str
'''
if event_type >= SDL_USEREVENT and event_type < SDL_NUMEVENTS:
return 'UserEvent'
return _event_names.get(event_type, 'Unknown')
def set_blocked(typelist):
'''Control which events are allowed on the queue.
The given event types are not allowed to appear on the event queue. By
default all events can be placed on the queue. It is safe to disable an
event type multiple times.
If None is passed as the argument, this has the opposite effect and none of
the event types are allowed to be placed on the queue.
:note: events posted with `post` will not be blocked.
:Parameters:
`typelist` : int or sequence of int or None
Event type or list of event types to disallow.
'''
pygame.display._video_init_check()
if typelist is None:
SDL_EventState(SDL_ALLEVENTS, SDL_IGNORE)
elif hasattr(typelist, '__len__'):
for val in typelist:
SDL_EventState(val, SDL_IGNORE)
else:
SDL_EventState(typelist, SDL_IGNORE)
def set_allowed(typelist):
'''Control which events are allowed on the queue.
The given event types are allowed to appear on the event queue. By default
all events can be placed on the queue. It is safe to enable an event type
multiple times.
If None is passed as the argument, this has the opposite effect and all of
the event types are allowed to be placed on the queue.
:Parameters:
`typelist` : int or sequence of int or None
Event type or list of event types to disallow.
'''
pygame.display._video_init_check()
if typelist is None:
SDL_EventState(SDL_ALLEVENTS, SDL_ENABLE)
elif hasattr(typelist, '__len__'):
for val in typelist:
SDL_EventState(val, SDL_ENABLE)
else:
SDL_EventState(typelist, SDL_ENABLE)
def get_blocked(typelist):
'''Test if a type of event is blocked from the queue.
Returns true if the given event type is blocked from the queue.
:Parameters:
- `event_type`: int
:rtype: int
'''
pygame.display._video_init_check()
if typelist == None:
return SDL_EventState(SDL_ALLEVENTS, SDL_QUERY) == SDL_ENABLE
elif hasattr(typelist, '__len__'): # XXX undocumented behaviour
for val in typelist:
if SDL_EventState(val, SDL_QUERY) == SDL_ENABLE:
return True
return False
else:
return SDL_EventState(typelist, SDL_QUERY) == SDL_ENABLE
def set_grab(grab):
'''Control the sharing of input devices with other applications.
When your program runs in a windowed environment, it will share the mouse
and keyboard devices with other applications that have focus. If your
program sets the event grab to True, it will lock all input into your
program.
It is best to not always grab the input, since it prevents the user from
doing other things on their system.
:Parameters:
- `grab`: bool
'''
pygame.display._video_init_check()
if grab:
SDL_WM_GrabInput(SDL_GRAB_ON)
else:
SDL_WM_GrabInput(SDL_GRAB_OFF)
def get_grab():
'''Test if the program is sharing input devices.
Returns true when the input events are grabbed for this application. Use
`set_grab` to control this state.
:rtype: bool
'''
pygame.display._video_init_check()
return SDL_WM_GrabInput(SDL_GRAB_QUERY) == SDL_GRAB_ON
_USEROBJECT_CHECK1 = int(0xdeadbeef) # signed
_USEROBJECT_CHECK2 = 0xfeedf00d
_user_event_objects = {}
_user_event_nextid = 1
def post(event):
'''Place a new event on the queue.
This places a new event at the end of the event queue. These Events will
later be retrieved from the other queue functions.
This is usually used for placing pygame.USEREVENT events on the queue.
Although any type of event can be placed, if using the sytem event types
your program should be sure to create the standard attributes with
appropriate values.
:Parameters:
`event` : Event
Event to add to the queue.
'''
global _user_event_nextid
pygame.display._video_init_check()
sdl_event = SDL_Event(event.type)
sdl_event.user.code = _USEROBJECT_CHECK1
sdl_event.user.data1 = c_void_p(_USEROBJECT_CHECK2)
sdl_event.user.data2 = c_void_p(_user_event_nextid)
_user_event_objects[_user_event_nextid] = event
_user_event_nextid += 1
SDL_PushEvent(sdl_event)
class Event:
def __init__(self, event_type, event_dict=None, sdl_event=None,
keep_userdata=False, **attributes):
'''Create a new event object.
Creates a new event with the given type. The event is created with the
given attributes and values. The attributes can come from a dictionary
argument, or as string keys from a dictionary.
The given attributes will be readonly attributes on the new event
object itself. These are the only attributes on the Event object,
there are no methods attached to Event objects.
:Parameters:
`event_type` : int
Event type to create
`event_dict` : dict
Dictionary of attributes to assign.
`sdl_event` : `SDL_Event`
Construct a Pygame event from the given SDL_Event; used
internally.
`keep_userdata` : bool
Used internally.
`attributes` : additional keyword arguments
Additional attributes to assign to the event.
'''
if sdl_event:
uevent = cast(pointer(sdl_event), POINTER(SDL_UserEvent)).contents
if uevent.code == _USEROBJECT_CHECK1 and \
uevent.data1 == _USEROBJECT_CHECK2 and \
uevent.data2 in _user_event_objects:
# An event that was posted; grab dict from local store.
id = sdl_event.data2
for key, value in _user_event_objects[id].__dict__.items():
setattr(self, key, value)
# Free memory unless just peeking
if not keep_userdata:
del _user_event_objects[id]
else:
# Standard SDL event
self.type = sdl_event.type
if self.type == SDL_QUIT:
pass
elif self.type == SDL_ACTIVEEVENT:
self.gain = sdl_event.gain
self.state = sdl_event.state
elif self.type == SDL_KEYDOWN:
self.unicode = sdl_event.keysym.unicode
self.key = sdl_event.keysym.sym
self.mod = sdl_event.keysym.mod
elif self.type == SDL_KEYUP:
self.key = sdl_event.keysym.sym
self.mod = sdl_event.keysym.mod
elif self.type == SDL_MOUSEMOTION:
self.pos = (sdl_event.x, sdl_event.y)
self.rel = (sdl_event.xrel, sdl_event.yrel)
self.buttons = (sdl_event.state & SDL_BUTTON(1) != 0,
sdl_event.state & SDL_BUTTON(2) != 0,
sdl_event.state & SDL_BUTTON(3) != 0)
elif self.type in (SDL_MOUSEBUTTONDOWN, SDL_MOUSEBUTTONUP):
self.pos = (sdl_event.x, sdl_event.y)
self.button = sdl_event.button
elif self.type == SDL_JOYAXISMOTION:
self.joy = sdl_event.which
self.axis = sdl_event.axis
self.value = sdl_event.value / 32767.0
elif self.type == SDL_JOYBALLMOTION:
self.joy = sdl_event.which
self.ball = sdl_event.ball
self.rel = (sdl_event.xrel, sdl_event.yrel)
elif self.type == SDL_JOYHATMOTION:
self.joy = sdl_event.which
self.hat = sdl_event.hat
hx = hy = 0
if sdl_event.value & SDL_HAT_UP:
hy = 1
if sdl_event.value & SDL_HAT_DOWN:
hy = -1
if sdl_event.value & SDL_HAT_RIGHT:
hx = 1
if sdl_event.value & SDL_HAT_LEFT:
hx = -1
self.value = (hx, hy)
elif self.type in (SDL_JOYBUTTONUP, SDL_JOYBUTTONDOWN):
self.joy = sdl_event.which
self.button = sdl_event.button
elif self.type == SDL_VIDEORESIZE:
self.size = (sdl_event.w, sdl_event.h)
self.w = sdl_event.w
self.h = sdl_event.h
elif self.type == SDL_VIDEOEXPOSE:
pass
elif self.type == SDL_SYSWMEVENT:
pass ### XXX: not implemented
elif self.type >= SDL_USEREVENT and self.type < SDL_NUMEVENTS:
self.code = sdl_event.code
else:
# Create an event (not from event queue)
self.type = event_type
if event_dict:
for key, value in event_dict.items():
setattr(self, key, value)
for key, value in attributes.items():
setattr(self, key, value)
# Bizarre undocumented but used by some people.
self.dict = self.__dict__
def __repr__(self):
d = copy(self.__dict__)
del d['type']
return '<Event(%d-%s %r)>' % \
(self.type, event_name(self.type), d)
def __nonzero__(self):
return self.type != SDL_NOEVENT
EventType = Event
| agpl-3.0 |
noslenfa/tdjangorest | uw/lib/python2.7/site-packages/IPython/parallel/controller/dependency.py | 2 | 7155 | """Dependency utilities
Authors:
* Min RK
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
from types import ModuleType
from IPython.parallel.client.asyncresult import AsyncResult
from IPython.parallel.error import UnmetDependency
from IPython.parallel.util import interactive
from IPython.utils import py3compat
from IPython.utils.pickleutil import can, uncan
class depend(object):
"""Dependency decorator, for use with tasks.
`@depend` lets you define a function for engine dependencies
just like you use `apply` for tasks.
Examples
--------
::
@depend(df, a,b, c=5)
def f(m,n,p)
view.apply(f, 1,2,3)
will call df(a,b,c=5) on the engine, and if it returns False or
raises an UnmetDependency error, then the task will not be run
and another engine will be tried.
"""
def __init__(self, f, *args, **kwargs):
self.f = f
self.args = args
self.kwargs = kwargs
def __call__(self, f):
return dependent(f, self.f, *self.args, **self.kwargs)
class dependent(object):
"""A function that depends on another function.
This is an object to prevent the closure used
in traditional decorators, which are not picklable.
"""
def __init__(self, f, df, *dargs, **dkwargs):
self.f = f
self.func_name = getattr(f, '__name__', 'f')
self.df = df
self.dargs = dargs
self.dkwargs = dkwargs
def check_dependency(self):
if self.df(*self.dargs, **self.dkwargs) is False:
raise UnmetDependency()
def __call__(self, *args, **kwargs):
return self.f(*args, **kwargs)
if not py3compat.PY3:
@property
def __name__(self):
return self.func_name
@interactive
def _require(*modules, **mapping):
"""Helper for @require decorator."""
from IPython.parallel.error import UnmetDependency
from IPython.utils.pickleutil import uncan
user_ns = globals()
for name in modules:
try:
exec 'import %s' % name in user_ns
except ImportError:
raise UnmetDependency(name)
for name, cobj in mapping.items():
user_ns[name] = uncan(cobj, user_ns)
return True
def require(*objects, **mapping):
"""Simple decorator for requiring local objects and modules to be available
when the decorated function is called on the engine.
Modules specified by name or passed directly will be imported
prior to calling the decorated function.
Objects other than modules will be pushed as a part of the task.
Functions can be passed positionally,
and will be pushed to the engine with their __name__.
Other objects can be passed by keyword arg.
Examples
--------
In [1]: @require('numpy')
...: def norm(a):
...: return numpy.linalg.norm(a,2)
In [2]: foo = lambda x: x*x
In [3]: @require(foo)
...: def bar(a):
...: return foo(1-a)
"""
names = []
for obj in objects:
if isinstance(obj, ModuleType):
obj = obj.__name__
if isinstance(obj, basestring):
names.append(obj)
elif hasattr(obj, '__name__'):
mapping[obj.__name__] = obj
else:
raise TypeError("Objects other than modules and functions "
"must be passed by kwarg, but got: %s" % type(obj)
)
for name, obj in mapping.items():
mapping[name] = can(obj)
return depend(_require, *names, **mapping)
class Dependency(set):
"""An object for representing a set of msg_id dependencies.
Subclassed from set().
Parameters
----------
dependencies: list/set of msg_ids or AsyncResult objects or output of Dependency.as_dict()
The msg_ids to depend on
all : bool [default True]
Whether the dependency should be considered met when *all* depending tasks have completed
or only when *any* have been completed.
success : bool [default True]
Whether to consider successes as fulfilling dependencies.
failure : bool [default False]
Whether to consider failures as fulfilling dependencies.
If `all=success=True` and `failure=False`, then the task will fail with an ImpossibleDependency
as soon as the first depended-upon task fails.
"""
all=True
success=True
failure=True
def __init__(self, dependencies=[], all=True, success=True, failure=False):
if isinstance(dependencies, dict):
# load from dict
all = dependencies.get('all', True)
success = dependencies.get('success', success)
failure = dependencies.get('failure', failure)
dependencies = dependencies.get('dependencies', [])
ids = []
# extract ids from various sources:
if isinstance(dependencies, (basestring, AsyncResult)):
dependencies = [dependencies]
for d in dependencies:
if isinstance(d, basestring):
ids.append(d)
elif isinstance(d, AsyncResult):
ids.extend(d.msg_ids)
else:
raise TypeError("invalid dependency type: %r"%type(d))
set.__init__(self, ids)
self.all = all
if not (success or failure):
raise ValueError("Must depend on at least one of successes or failures!")
self.success=success
self.failure = failure
def check(self, completed, failed=None):
"""check whether our dependencies have been met."""
if len(self) == 0:
return True
against = set()
if self.success:
against = completed
if failed is not None and self.failure:
against = against.union(failed)
if self.all:
return self.issubset(against)
else:
return not self.isdisjoint(against)
def unreachable(self, completed, failed=None):
"""return whether this dependency has become impossible."""
if len(self) == 0:
return False
against = set()
if not self.success:
against = completed
if failed is not None and not self.failure:
against = against.union(failed)
if self.all:
return not self.isdisjoint(against)
else:
return self.issubset(against)
def as_dict(self):
"""Represent this dependency as a dict. For json compatibility."""
return dict(
dependencies=list(self),
all=self.all,
success=self.success,
failure=self.failure
)
__all__ = ['depend', 'require', 'dependent', 'Dependency']
| apache-2.0 |
jeremiahyan/odoo | odoo/addons/base/tests/test_res_users.py | 4 | 4686 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.tests.common import TransactionCase, Form, tagged
class TestUsers(TransactionCase):
def test_name_search(self):
""" Check name_search on user. """
User = self.env['res.users']
test_user = User.create({'name': 'Flad the Impaler', 'login': 'vlad'})
like_user = User.create({'name': 'Wlad the Impaler', 'login': 'vladi'})
other_user = User.create({'name': 'Nothing similar', 'login': 'nothing similar'})
all_users = test_user | like_user | other_user
res = User.name_search('vlad', operator='ilike')
self.assertEqual(User.browse(i[0] for i in res) & all_users, test_user)
res = User.name_search('vlad', operator='not ilike')
self.assertEqual(User.browse(i[0] for i in res) & all_users, all_users)
res = User.name_search('', operator='ilike')
self.assertEqual(User.browse(i[0] for i in res) & all_users, all_users)
res = User.name_search('', operator='not ilike')
self.assertEqual(User.browse(i[0] for i in res) & all_users, User)
res = User.name_search('lad', operator='ilike')
self.assertEqual(User.browse(i[0] for i in res) & all_users, test_user | like_user)
res = User.name_search('lad', operator='not ilike')
self.assertEqual(User.browse(i[0] for i in res) & all_users, other_user)
def test_user_partner(self):
""" Check that the user partner is well created """
User = self.env['res.users']
Partner = self.env['res.partner']
Company = self.env['res.company']
company_1 = Company.create({'name': 'company_1'})
company_2 = Company.create({'name': 'company_2'})
partner = Partner.create({
'name': 'Bob Partner',
'company_id': company_2.id
})
# case 1 : the user has no partner
test_user = User.create({
'name': 'John Smith',
'login': 'jsmith',
'company_ids': [company_1.id],
'company_id': company_1.id
})
self.assertFalse(
test_user.partner_id.company_id,
"The partner_id linked to a user should be created without any company_id")
# case 2 : the user has a partner
test_user = User.create({
'name': 'Bob Smith',
'login': 'bsmith',
'company_ids': [company_1.id],
'company_id': company_1.id,
'partner_id': partner.id
})
self.assertEqual(
test_user.partner_id.company_id,
company_1,
"If the partner_id of a user has already a company, it is replaced by the user company"
)
def test_change_user_company(self):
""" Check the partner company update when the user company is changed """
User = self.env['res.users']
Company = self.env['res.company']
test_user = User.create({'name': 'John Smith', 'login': 'jsmith'})
company_1 = Company.create({'name': 'company_1'})
company_2 = Company.create({'name': 'company_2'})
test_user.company_ids += company_1
test_user.company_ids += company_2
# 1: the partner has no company_id, no modification
test_user.write({
'company_id': company_1.id
})
self.assertFalse(
test_user.partner_id.company_id,
"On user company change, if its partner_id has no company_id,"
"the company_id of the partner_id shall NOT be updated")
# 2: the partner has a company_id different from the new one, update it
test_user.partner_id.write({
'company_id': company_1.id
})
test_user.write({
'company_id': company_2.id
})
self.assertEqual(
test_user.partner_id.company_id,
company_2,
"On user company change, if its partner_id has already a company_id,"
"the company_id of the partner_id shall be updated"
)
@tagged('post_install', '-at_install')
class TestUsers2(TransactionCase):
def test_reified_groups(self):
""" The groups handler doesn't use the "real" view with pseudo-fields
during installation, so it always works (because it uses the normal
groups_id field).
"""
# use the specific views which has the pseudo-fields
f = Form(self.env['res.users'], view='base.view_users_form')
f.name = "bob"
f.login = "bob"
user = f.save()
self.assertIn(self.env.ref('base.group_user'), user.groups_id)
| gpl-3.0 |
klyone/PacketTesterXML | PTXML_server.py | 1 | 1324 | #! /usr/bin/python
# ******************************************************************************
# @file PTXML_client.py
# @brief Example using PTXMLServer
#
# Copyright (C) 2014
#
# @author Miguel Jimenez Lopez <[email protected]>
#
# ******************************************************************************
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http//www.gnu.org/licenses/>.
# ******************************************************************************
import sys
import PacketTesterXML
if __name__ == "__main__":
if len(sys.argv) != 2:
print "Usage: PTXML_server.py <XML server configuration file>"
sys.exit(-1)
file_xml = sys.argv[1]
server = PacketTesterXML.PTXMLServer(file_xml)
server.run(1)
| lgpl-3.0 |
shikhar394/uptane | tests/test_der_encoder.py | 1 | 21951 | """
<Program Name>
test_der_encoder.py
<Purpose>
Unit testing for DER Encoding, uptane/encoding/asn1_codec.py
"""
from __future__ import print_function
from __future__ import unicode_literals
import uptane # Import before TUF modules; may change tuf.conf values.
import tuf.formats
import tuf.keys
import tuf.repository_tool as repo_tool
import uptane.formats
import uptane.common
import uptane.encoding.asn1_codec as asn1_codec
import uptane.encoding.timeserver_asn1_coder as timeserver_asn1_coder
import uptane.encoding.ecu_manifest_asn1_coder as ecu_manifest_asn1_coder
import uptane.encoding.asn1_definitions as asn1_spec
import pyasn1.codec.der.encoder as p_der_encoder
import pyasn1.codec.der.decoder as p_der_decoder
from pyasn1.type import tag, univ
import sys # to test Python version 2 vs 3, for byte string behavior
import hashlib
import unittest
import os
import time
import copy
import shutil
# For temporary convenience
import demo # for generate_key, import_public_key, import_private_key
TEST_DATA_DIR = os.path.join(uptane.WORKING_DIR, 'tests', 'test_data')
TEMP_TEST_DIR = os.path.join(TEST_DATA_DIR, 'temp_test_encoding')
test_signing_key = None
def destroy_temp_dir():
# Clean up anything that may currently exist in the temp test directory.
if os.path.exists(TEMP_TEST_DIR):
shutil.rmtree(TEMP_TEST_DIR)
def setUpModule():
"""
This is run once for the full module, before all tests.
It prepares some globals, including a single Primary ECU client instance.
When finished, it will also start up an OEM Repository Server,
Director Server, and Time Server. Currently, it requires them to be already
running.
"""
global primary_ecu_key
destroy_temp_dir()
private_key_fname = os.path.join(
os.getcwd(), 'demo', 'keys', 'director')
global test_signing_key
test_signing_key = repo_tool.import_ed25519_privatekey_from_file(
private_key_fname, 'pw')
def tearDownModule():
"""This is run once for the full module, after all tests."""
destroy_temp_dir()
class TestASN1(unittest.TestCase):
"""
"unittest"-style test class for the Primary module in the reference
implementation
Please note that these tests are NOT entirely independent of each other.
Several of them build on the results of previous tests. This is an unusual
pattern but saves code and works at least for now.
"""
def test_01_encode_token(self):
# Test the ASN.1 data definitions in asn1_spec.
# Begin with a very simple object, which simply encodes an integer.
# Create ASN.1 object for a token (nonce-like int provided by a Secondary
# in order to validate recency of the time returned).
t = asn1_spec.Token(42)
# Encode as DER
t_der = p_der_encoder.encode(t)
# Decode back into ASN1
# decode() returns a 2-tuple. The first element is the decoded portion.
# The second element (which should be empty) is any remainder after the
# decoding.
(decoded, remainder) = p_der_decoder.decode(
t_der, asn1_spec.Token())
self.assertFalse(remainder)
self.assertEqual(t, decoded)
def test_02_encode_tokens(self):
# Continue to test the ASN.1 data definitions in asn1_spec.
# This will be an array of tokens that each encode an integer.
# We create a Tokens object with the subtype definition required by the
# classes that contain Tokens objects.
tokens = asn1_spec.Tokens().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))
# Add a first token to the list.
t = asn1_spec.Token(42)
tokens.setComponentByPosition(0, t, False) # TODO: Figure out what the third argument means....
# tokens should look like:
# Tokens(tagSet=TagSet((), Tag(tagClass=128, tagFormat=32, tagId=1))).setComponents(Token(42))
tokens_der = p_der_encoder.encode(tokens)
# tokens_der should look like: '\xa1\x03\x02\x01*'
(tokens_again, remainder) = p_der_decoder.decode(
tokens_der,
asn1_spec.Tokens().subtype(implicitTag=tag.Tag(
tag.tagClassContext, tag.tagFormatSimple, 1)))
self.assertFalse(remainder)
self.assertEqual(tokens, tokens_again)
def test_03_encode_tokensandtimestamp(self):
"""
Using lower level code (than asn1_codec), test translation of a timeserver
attestation from standard Uptane Python dictionary format to an ASN.1
representation, then into a DER encoding, and then back to the original
form.
"""
sample_attestation = {
'nonces': [15856], 'time': '2017-03-01T19:30:45Z'}
uptane.formats.TIMESERVER_ATTESTATION_SCHEMA.check_match(sample_attestation)
asn1_attestation = timeserver_asn1_coder.get_asn_signed(sample_attestation)
der_attestation = p_der_encoder.encode(asn1_attestation)
self.assertTrue(is_valid_nonempty_der(der_attestation))
# Decoding requires that we provide an object with typing that precisely
# matches what we expect to decode. This is such an object.
exemplar_object = asn1_spec.TokensAndTimestamp().subtype(
implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatConstructed,
0))
(asn1_attestation_again, remainder) = p_der_decoder.decode(
der_attestation, asn1Spec=exemplar_object)
self.assertFalse(remainder)
self.assertEqual(asn1_attestation, asn1_attestation_again)
def test_04_encode_full_signable_attestation(self):
"""
Tests the conversion of signable time attestations from Python dictionaries
to ASN.1/DER objects, and back.
Similar to test 03 above, except that it encodes the signable dictionary
(signed and signatures) instead of what is effectively just the 'signed'
portion.
Employing the asn1_codec code instead of using
- the lower level code from pyasn1.der, asn1_spec, and
timeserver_asn1_coder.
- using a signable version of the time attestation (encapsulated in
'signed', with 'signatures')
This test doesn't re-sign the attestation over the hash of the DER encoding.
One of the other tests below does that. The signatures here remain over
the human-readable internal representation of the time attestation.
"""
# Using str() here because in Python2, I'll get u'' if I don't, and the
# self.assertEqual(signable_attestation, attestation_again) will fail
# because they won't both have u' prefixes.
signable_attestation = {
str('signatures'): [
{str('keyid'):
str('79c796d7e87389d1ebad04edce49faef611d139ee41ea9fb1931732afbfaac2e'),
str('sig'):
str('a5ea6a3b685ad64f96c8c12145beda4efafddfac60bcdb45def35fe43c7d1150a182a1b50a1463bfffb0ef8d30b6203aa8b5365b0b7176312e1e9d7e355e550e'),
str('method'): str('ed25519')}],
str('signed'): {str('nonces'): [1],
str('time'): str('2017-03-08T17:09:56Z')}}
# str('signed'): {str('nonces'): [834845858], str('time'): str('2017-03-01T19:30:45Z')},
# str('signatures'): [{str('keyid'): str('12'), str('method'): str('ed25519'), str('sig'): str('123495')}]}
# u'signed': {u'nonces': [834845858], u'time': u'2017-03-01T19:30:45Z'},
# u'signatures': [{u'keyid': u'12', u'method': u'ed25519', u'sig': u'12345'}]}
uptane.formats.SIGNABLE_TIMESERVER_ATTESTATION_SCHEMA.check_match(
signable_attestation)
# Converts it, without re-signing (default for this method).
der_attestation = asn1_codec.convert_signed_metadata_to_der(
signable_attestation)
self.assertTrue(is_valid_nonempty_der(der_attestation))
attestation_again = asn1_codec.convert_signed_der_to_dersigned_json(
der_attestation)
self.assertEqual(attestation_again, signable_attestation)
def test_05_encode_full_signable_attestation_manual(self):
"""
Similar to test 04 above, but performs much of the work manually.
Useful separately only for debugging.
Instead of just employing asn1_codec, uses lower level code from pyasn1.der,
asn1_spec, and timeserver_asn1_coder.
"""
signable_attestation = {
'signatures': [
{'keyid': '79c796d7e87389d1ebad04edce49faef611d139ee41ea9fb1931732afbfaac2e',
'sig': 'a5ea6a3b685ad64f96c8c12145beda4efafddfac60bcdb45def35fe43c7d1150a182a1b50a1463bfffb0ef8d30b6203aa8b5365b0b7176312e1e9d7e355e550e',
'method': 'ed25519'}],
'signed': {'nonces': [1], 'time': '2017-03-08T17:09:56Z'}}
uptane.formats.SIGNABLE_TIMESERVER_ATTESTATION_SCHEMA.check_match(
signable_attestation)
json_signed = signable_attestation['signed']
asn_signed = timeserver_asn1_coder.get_asn_signed(json_signed)
asn_signatures_list = asn1_spec.Signatures().subtype(
implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatSimple, 2))
i = 0 # Index for iterating through asn signatures
for pydict_sig in signable_attestation['signatures']:
asn_sig = asn1_spec.Signature()
asn_sig['keyid'] = asn1_spec.Keyid().subtype(
explicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatConstructed, 0))
asn_sig['keyid']['octetString'] = univ.OctetString(
hexValue=pydict_sig['keyid']).subtype(implicitTag=tag.Tag(
tag.tagClassContext, tag.tagFormatSimple, 1))
asn_sig['method'] = int(asn1_spec.SignatureMethod(
pydict_sig['method']))
asn_sig['value'] = asn1_spec.BinaryData().subtype(
explicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatConstructed, 2))
asn_sig['value']['octetString'] = univ.OctetString(
hexValue=pydict_sig['sig']).subtype(implicitTag=tag.Tag(
tag.tagClassContext, tag.tagFormatSimple, 1))
asn_signatures_list[i] = asn_sig # has no append method
i += 1
asn_signable = asn1_spec.TokensAndTimestampSignable()#.subtype(implicitTag=tag.Tag(
#tag.tagClassContext, tag.tagFormatConstructed, 3))
asn_signable['signed'] = asn_signed #considering using der_signed instead - requires changes
asn_signable['signatures'] = asn_signatures_list # TODO: Support multiple sigs, or integrate with TUF.
asn_signable['numberOfSignatures'] = len(asn_signatures_list)
der_attestation = p_der_encoder.encode(asn_signable)
self.assertTrue(is_valid_nonempty_der(der_attestation))
# Decoding requires that we provide an object with typing that precisely
# matches what we expect to decode. This is such an object.
exemplar_object = asn1_spec.TokensAndTimestampSignable()#.subtype(implicitTag=tag.Tag(
#tag.tagClassContext, tag.tagFormatConstructed, 3))
(asn1_attestation_again, remainder) = p_der_decoder.decode(
der_attestation, asn1Spec=exemplar_object)
self.assertFalse(remainder)
self.assertEqual(asn1_attestation_again, asn_signable)
# TODO: Test rest of the way back: ASN1 to Python dictionary.
# (This is in addition to the next test, which does that with the higher
# level code in asn1_codec.)
def test_06_encode_and_validate_resigned_time_attestation(self):
"""
Test timeserver attestation encoding and decoding, with signing over DER
('re-sign' functionality in asn1_codec) and signature validation.
"""
signable_attestation = {
str('signatures'): [
{str('keyid'):
str('79c796d7e87389d1ebad04edce49faef611d139ee41ea9fb1931732afbfaac2e'),
str('sig'):
str('a5ea6a3b685ad64f96c8c12145beda4efafddfac60bcdb45def35fe43c7d1150a182a1b50a1463bfffb0ef8d30b6203aa8b5365b0b7176312e1e9d7e355e550e'),
str('method'): str('ed25519')}],
str('signed'): {str('nonces'): [1],
str('time'): str('2017-03-08T17:09:56Z')}}
# Load the timeserver's private key to sign a time attestation, and public
# key to verify that signature.
timeserver_key = demo.import_private_key('timeserver')
timeserver_key_pub = demo.import_public_key('timeserver')
tuf.formats.ANYKEY_SCHEMA.check_match(timeserver_key)
tuf.formats.ANYKEY_SCHEMA.check_match(timeserver_key_pub)
# First, calculate what we'll be verifying at the end of this test.
# The re-signing in the previous line produces a signature over the SHA256
# hash of the DER encoding of the ASN.1 format of the 'signed' portion of
# signable_attestation. We produce it here so that we can check it against
# the result of encoding, resigning, and decoding.
der_signed = asn1_codec.convert_signed_metadata_to_der(
signable_attestation, only_signed=True)
der_signed_hash = hashlib.sha256(der_signed).digest()
# Now perform the actual conversion to ASN.1/DER of the full
# signable_attestation, replacing the signature (which was given as
# signatures over the Python 'signed' dictionary) with a signature over
# the hash of the DER encoding of the 'signed' ASN.1 data.
# This is the final product to be distributed back to a Primary client.
der_attestation = asn1_codec.convert_signed_metadata_to_der(
signable_attestation, private_key=timeserver_key, resign=True)
# Now, in order to test the final product, decode it back from DER into
# pyasn1 ASN.1, and convert back into Uptane's standard Python dictionary
# form.
pydict_again = asn1_codec.convert_signed_der_to_dersigned_json(
der_attestation)
# Check the extracted signature against the hash we produced earlier.
self.assertTrue(tuf.keys.verify_signature(
timeserver_key_pub,
pydict_again['signatures'][0],
der_signed_hash))
def test_07_encode_and_validate_resigned_time_attestation_again(self):
"""
This is a redundant test, repeating much of test 06 above with a slightly
different method.
"""
signable_attestation = {
str('signatures'): [
{str('keyid'):
str('79c796d7e87389d1ebad04edce49faef611d139ee41ea9fb1931732afbfaac2e'),
str('sig'):
str('a5ea6a3b685ad64f96c8c12145beda4efafddfac60bcdb45def35fe43c7d1150a182a1b50a1463bfffb0ef8d30b6203aa8b5365b0b7176312e1e9d7e355e550e'),
str('method'): str('ed25519')}],
str('signed'): {str('nonces'): [1],
str('time'): str('2017-03-08T17:09:56Z')}}
conversion_tester(
signable_attestation, 'time_attestation', self)
def test_10_ecu_manifest_asn1_conversion(self):
# First try the low-level asn1 conversion.
asn_signed = ecu_manifest_asn1_coder.get_asn_signed(
SAMPLE_ECU_MANIFEST_SIGNABLE['signed'])#ecu_manifest_signed_component)
# Convert back to a basic Python dictionary.
json_signed = ecu_manifest_asn1_coder.get_json_signed({'signed': asn_signed})
# Make sure that the result of conversion to ASN.1 and back is the same
# as the original.
self.assertEqual(json_signed, SAMPLE_ECU_MANIFEST_SIGNABLE['signed'])
def test_11_ecu_manifest_der_conversion(self):
conversion_tester(
SAMPLE_ECU_MANIFEST_SIGNABLE, 'ecu_manifest', self)
# Redundant tests. The above call should cover all of the following tests.
der_ecu_manifest = asn1_codec.convert_signed_metadata_to_der(
SAMPLE_ECU_MANIFEST_SIGNABLE, only_signed=True, datatype='ecu_manifest')
der_ecu_manifest = asn1_codec.convert_signed_metadata_to_der(
SAMPLE_ECU_MANIFEST_SIGNABLE, datatype='ecu_manifest')
pydict_ecu_manifest = asn1_codec.convert_signed_der_to_dersigned_json(
der_ecu_manifest, datatype='ecu_manifest')
self.assertEqual(
pydict_ecu_manifest, SAMPLE_ECU_MANIFEST_SIGNABLE)
def test_20_vehicle_manifest_der_conversion(self):
uptane.formats.SIGNABLE_VEHICLE_VERSION_MANIFEST_SCHEMA.check_match(
SAMPLE_VEHICLE_MANIFEST_SIGNABLE)
conversion_tester(
SAMPLE_VEHICLE_MANIFEST_SIGNABLE, 'vehicle_manifest', self)
def conversion_tester(signable_pydict, datatype, cls): # cls: clunky
"""
Tests each of the different kinds of conversions into ASN.1/DER, and tests
converting back. In one type of conversion, compares to make sure the data
has not changed.
This function takes as a third parameter the unittest.TestCase object whose
functions (assertTrue etc) it can use. This is awkward and inappropriate. :P
Find a different means of providing modularity instead of this one.
(Can't just have this method in the class above because it would be run as
a test. Could have default parameters and do that, but that's clunky, too.)
Does unittest allow/test private functions in UnitTest classes?
"""
# Test type 1: only-signed
# Convert and return only the 'signed' portion, the metadata payload itself,
# without including any signatures.
signed_der = asn1_codec.convert_signed_metadata_to_der(
signable_pydict, only_signed=True, datatype=datatype)
cls.assertTrue(is_valid_nonempty_der(signed_der))
# TODO: Add function to asn1_codec that will convert signed-only DER back to
# Python dictionary. Might be useful, and is useful for testing only_signed
# in any case.
# Test type 2: full conversion
# Convert the full signable ('signed' and 'signatures'), maintaining the
# existing signature in a new format and encoding.
signable_der = asn1_codec.convert_signed_metadata_to_der(
signable_pydict, datatype=datatype)
cls.assertTrue(is_valid_nonempty_der(signable_der))
# Convert it back.
signable_reverted = asn1_codec.convert_signed_der_to_dersigned_json(
signable_der, datatype=datatype)
# Ensure the original is equal to what is converted back.
cls.assertEqual(signable_pydict, signable_reverted)
# Test type 3: full conversion with re-signing
# Convert the full signable ('signed' and 'signatures'), but discarding the
# original signatures and re-signing over, instead, the hash of the converted,
# ASN.1/DER 'signed' element.
resigned_der = asn1_codec.convert_signed_metadata_to_der(
signable_pydict, resign=True, private_key=test_signing_key,
datatype=datatype)
cls.assertTrue(is_valid_nonempty_der(resigned_der))
# Convert the re-signed DER manifest back in order to split it up.
resigned_reverted = asn1_codec.convert_signed_der_to_dersigned_json(
resigned_der, datatype=datatype)
resigned_signature = resigned_reverted['signatures'][0]
# Check the signature on the re-signed DER manifest:
cls.assertTrue(uptane.common.verify_signature_over_metadata(
test_signing_key,
resigned_signature,
resigned_reverted['signed'],
datatype=datatype,
metadata_format='der'))
# The signatures will not match, because a new signature was made, but the
# 'signed' elements should match when converted back.
cls.assertEqual(
signable_pydict['signed'], resigned_reverted['signed'])
def is_valid_nonempty_der(der_string):
"""
Currently a hacky test to see if the result is a non-empty byte string.
This CAN raise false failures, stochastically, in Python2. In Python2,
where bytes and str are the same type, we check to see if, anywhere in the
string, there is a character requiring a \\x escape, as would almost
certainly happen in an adequately long DER string of bytes. As a result,
short or very regular strings may raise false failures in Python2.
The best way to really do this test is to decode the DER and see if
believable ASN.1 has come out of it.
"""
if not der_string:
return False
elif sys.version_info.major < 3:
if '\\x' not in repr(der_string): # TODO: <~> TEMPORARY FOR DEBUG. DELETE
print(repr(der_string)) # TODO: <~> TEMPORARY FOR DEBUG. DELETE
return '\\x' in repr(der_string)
else:
return isinstance(der_string, bytes)
SAMPLE_ECU_MANIFEST_SIGNABLE = {
'signed': {
'timeserver_time': '2017-03-27T16:19:17Z',
'previous_timeserver_time': '2017-03-27T16:19:17Z',
'ecu_serial': '22222',
'attacks_detected': '',
'installed_image': {
'filepath': '/secondary_firmware.txt',
'fileinfo': {
'length': 37,
'hashes': {
'sha256': '6b9f987226610bfed08b824c93bf8b2f59521fce9a2adef80c495f363c1c9c44',
'sha512': '706c283972c5ae69864b199e1cdd9b4b8babc14f5a454d0fd4d3b35396a04ca0b40af731671b74020a738b5108a78deb032332c36d6ae9f31fae2f8a70f7e1ce'}}}},
'signatures': [{
'sig': '42e6f4b398dbad0404cca847786a926972b54ca2ae71c7334f3d87fc62a10d08e01f7b5aa481cd3add61ef36f2037a9f68beca9f6ea26d2f9edc6f4ba0ba2a06',
'method': 'ed25519',
'keyid': '49309f114b857e4b29bfbff1c1c75df59f154fbc45539b2eb30c8a867843b2cb'}]}
SAMPLE_VEHICLE_MANIFEST_SIGNABLE = {
'signed': {
'primary_ecu_serial': '11111',
'vin': '111',
'ecu_version_manifests': {
'22222': [{
'signed': {
'previous_timeserver_time': '2017-03-31T15:48:31Z',
'timeserver_time': '2017-03-31T15:48:31Z',
'ecu_serial': '22222',
'attacks_detected': '',
'installed_image': {
'filepath': '/secondary_firmware.txt',
'fileinfo': {
'length': 37,
'hashes': {
'sha256': '6b9f987226610bfed08b824c93bf8b2f59521fce9a2adef80c495f363c1c9c44',
'sha512': '706c283972c5ae69864b199e1cdd9b4b8babc14f5a454d0fd4d3b35396a04ca0b40af731671b74020a738b5108a78deb032332c36d6ae9f31fae2f8a70f7e1ce'}}}},
'signatures': [{
'keyid': '49309f114b857e4b29bfbff1c1c75df59f154fbc45539b2eb30c8a867843b2cb',
'sig': '40069be1dd6f3fc091300307d61bc1646683a3ab8ebefac855bec0082c6fa067136800b744a2276564d9216cfcaafdea3976fc7f2128d2454d8d46bac79ebe05',
'method': 'ed25519'}]}]}},
'signatures': [{
'keyid': '9a406d99e362e7c93e7acfe1e4d6585221315be817f350c026bbee84ada260da',
'sig': '222e3fe0f3aa4fd14e163ec68f61954a9f4714d6d91d7114190e0a19a5ecc1cc43d9684e99dd8082c519815a01dd2e55a7a63d1467612cfb360937178586530c',
'method': 'ed25519'}]}
# Run unit tests.
if __name__ == '__main__':
unittest.main()
| mit |
Filechaser/nzbToMedia | libs/beetsplug/permissions.py | 7 | 3087 | # -*- coding: utf-8 -*-
from __future__ import division, absolute_import, print_function
"""Fixes file permissions after the file gets written on import. Put something
like the following in your config.yaml to configure:
permissions:
file: 644
dir: 755
"""
import os
from beets import config, util
from beets.plugins import BeetsPlugin
from beets.util import ancestry
def convert_perm(perm):
"""If the perm is a int it will first convert it to a string and back
to an oct int. Else it just converts it to oct.
"""
if isinstance(perm, int):
return int(bytes(perm), 8)
else:
return int(perm, 8)
def check_permissions(path, permission):
"""Checks the permissions of a path.
"""
return oct(os.stat(path).st_mode & 0o777) == oct(permission)
def dirs_in_library(library, item):
"""Creates a list of ancestor directories in the beets library path.
"""
return [ancestor
for ancestor in ancestry(item)
if ancestor.startswith(library)][1:]
class Permissions(BeetsPlugin):
def __init__(self):
super(Permissions, self).__init__()
# Adding defaults.
self.config.add({
u'file': 644,
u'dir': 755
})
self.register_listener('item_imported', permissions)
self.register_listener('album_imported', permissions)
def permissions(lib, item=None, album=None):
"""Running the permission fixer.
"""
# Getting the config.
file_perm = config['permissions']['file'].get()
dir_perm = config['permissions']['dir'].get()
# Converts permissions to oct.
file_perm = convert_perm(file_perm)
dir_perm = convert_perm(dir_perm)
# Create chmod_queue.
file_chmod_queue = []
if item:
file_chmod_queue.append(item.path)
elif album:
for album_item in album.items():
file_chmod_queue.append(album_item.path)
# A set of directories to change permissions for.
dir_chmod_queue = set()
for path in file_chmod_queue:
# Changing permissions on the destination file.
os.chmod(util.bytestring_path(path), file_perm)
# Checks if the destination path has the permissions configured.
if not check_permissions(util.bytestring_path(path), file_perm):
message = u'There was a problem setting permission on {}'.format(
path)
print(message)
# Adding directories to the directory chmod queue.
dir_chmod_queue.update(
dirs_in_library(lib.directory,
path))
# Change permissions for the directories.
for path in dir_chmod_queue:
# Chaning permissions on the destination directory.
os.chmod(util.bytestring_path(path), dir_perm)
# Checks if the destination path has the permissions configured.
if not check_permissions(util.bytestring_path(path), dir_perm):
message = u'There was a problem setting permission on {}'.format(
path)
print(message)
| gpl-3.0 |
kiddinn/plaso | tests/test_lib.py | 1 | 6573 | # -*- coding: utf-8 -*-
"""Shared functions and classes for testing."""
import io
import os
import shutil
import re
import tempfile
import unittest
from dfdatetime import time_elements
from dfvfs.lib import definitions as dfvfs_definitions
from dfvfs.path import factory as path_spec_factory
from dfvfs.resolver import resolver as path_spec_resolver
# The path to top of the Plaso source tree.
PROJECT_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
# The paths below are all derived from the project path directory.
# They are enumerated explicitly here so that they can be overwritten for
# compatibility with different build systems.
ANALYSIS_PATH = os.path.join(PROJECT_PATH, 'plaso', 'analysis')
ANALYZERS_PATH = os.path.join(PROJECT_PATH, 'plaso', 'analyzers')
CLI_HELPERS_PATH = os.path.join(PROJECT_PATH, 'plaso', 'cli', 'helpers')
CONTAINERS_PATH = os.path.join(PROJECT_PATH, 'plaso', 'containers')
DATA_PATH = os.path.join(PROJECT_PATH, 'data')
OUTPUT_PATH = os.path.join(PROJECT_PATH, 'plaso', 'output')
PARSERS_PATH = os.path.join(PROJECT_PATH, 'plaso', 'parsers')
PREPROCESSORS_PATH = os.path.join(PROJECT_PATH, 'plaso', 'preprocessors')
TEST_DATA_PATH = os.path.join(PROJECT_PATH, 'test_data')
def GetTestFilePath(path_segments):
"""Retrieves the path of a file in the test data directory.
Args:
path_segments (list[str]): path segments inside the test data directory.
Returns:
str: path of the test file.
"""
# Note that we need to pass the individual path segments to os.path.join
# and not a list.
return os.path.join(TEST_DATA_PATH, *path_segments)
def CopyTimestampFromSring(time_string):
"""Copies a date and time string to a Plaso timestamp.
Args:
time_string (str): a date and time string formatted as:
"YYYY-MM-DD hh:mm:ss.######[+-]##:##", where # are numeric digits
ranging from 0 to 9 and the seconds fraction can be either 3 or 6
digits. The time of day, seconds fraction and timezone offset are
optional. The default timezone is UTC.
Returns:
int: timestamp which contains the number of microseconds since January 1,
1970, 00:00:00 UTC.
Raises:
ValueError: if the time string is invalid or not supported.
"""
date_time = time_elements.TimeElementsInMicroseconds()
date_time.CopyFromDateTimeString(time_string)
return date_time.GetPlasoTimestamp()
class BaseTestCase(unittest.TestCase):
"""The base test case."""
# Show full diff results, part of TestCase so does not follow our naming
# conventions.
maxDiff = None
def _GetDataFilePath(self, path_segments):
"""Retrieves the path of a file in the data directory.
Args:
path_segments (list[str]): path segments inside the data directory.
Returns:
str: path of the test file.
"""
# Note that we need to pass the individual path segments to os.path.join
# and not a list.
return os.path.join(DATA_PATH, *path_segments)
def _GetTestFileEntry(self, path_segments):
"""Creates a file entry that references a file in the test data directory.
Args:
path_segments (list[str]): path segments inside the test data directory.
Returns:
dfvfs.FileEntry: file entry.
Raises:
SkipTest: if the path inside the test data directory does not exist and
the test should be skipped.
"""
path_spec = self._GetTestFilePathSpec(path_segments)
return path_spec_resolver.Resolver.OpenFileEntry(path_spec)
def _GetTestFilePath(self, path_segments):
"""Retrieves the path of a file in the test data directory.
Args:
path_segments (list[str]): path segments inside the test data directory.
Returns:
str: path of the test file.
"""
# Note that we need to pass the individual path segments to os.path.join
# and not a list.
return os.path.join(TEST_DATA_PATH, *path_segments)
def _GetTestFilePathSpec(self, path_segments):
"""Retrieves the path specification of a file in the test data directory.
Args:
path_segments (list[str]): path segments inside the test data directory.
Returns:
dfvfs.PathSpec: path specification.
Raises:
SkipTest: if the path does not exist and the test should be skipped.
"""
test_file_path = self._GetTestFilePath(path_segments)
self._SkipIfPathNotExists(test_file_path)
return path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location=test_file_path)
def _SkipIfPathNotExists(self, path):
"""Skips the test if the path does not exist.
Args:
path (str): path of a test file.
Raises:
SkipTest: if the path does not exist and the test should be skipped.
"""
if not os.path.exists(path):
filename = os.path.basename(path)
raise unittest.SkipTest('missing test file: {0:s}'.format(filename))
class ImportCheckTestCase(BaseTestCase):
"""Base class for tests that check modules are imported correctly."""
_FILENAME_REGEXP = re.compile(r'^[^_].*\.py$')
def _AssertFilesImportedInInit(self, path, ignorable_files):
"""Checks that files in path are imported in __init__.py
Args:
path (str): path to directory containing an __init__.py file and other
Python files which should be imported.
ignorable_files (list[str]): names of Python files that don't need to
appear in __init__.py. For example, 'manager.py'.
"""
init_path = '{0:s}/__init__.py'.format(path)
with io.open(init_path, mode='r', encoding='utf-8') as init_file:
init_content = init_file.read()
for file_path in os.listdir(path):
filename = os.path.basename(file_path)
if filename in ignorable_files:
continue
if self._FILENAME_REGEXP.search(filename):
module_name, _, _ = filename.partition('.')
import_expression = re.compile(r' import {0:s}\b'.format(module_name))
self.assertRegex(
init_content, import_expression,
'{0:s} not imported in {1:s}'.format(module_name, init_path))
class TempDirectory(object):
"""Class that implements a temporary directory."""
def __init__(self):
"""Initializes a temporary directory."""
super(TempDirectory, self).__init__()
self.name = ''
def __enter__(self):
"""Make this work with the 'with' statement."""
self.name = tempfile.mkdtemp()
return self.name
def __exit__(self, exception_type, value, traceback):
"""Make this work with the 'with' statement."""
shutil.rmtree(self.name, True)
| apache-2.0 |
synergeticsedx/deployment-wipro | common/djangoapps/third_party_auth/tests/test_pipeline.py | 77 | 1807 | """Unit tests for third_party_auth/pipeline.py."""
import random
from third_party_auth import pipeline
from third_party_auth.tests import testutil
import unittest
# Allow tests access to protected methods (or module-protected methods) under test.
# pylint: disable=protected-access
class MakeRandomPasswordTest(testutil.TestCase):
"""Tests formation of random placeholder passwords."""
def setUp(self):
super(MakeRandomPasswordTest, self).setUp()
self.seed = 1
def test_default_args(self):
self.assertEqual(pipeline._DEFAULT_RANDOM_PASSWORD_LENGTH, len(pipeline.make_random_password()))
def test_probably_only_uses_charset(self):
# This is ultimately probablistic since we could randomly select a good character 100000 consecutive times.
for char in pipeline.make_random_password(length=100000):
self.assertIn(char, pipeline._PASSWORD_CHARSET)
def test_pseudorandomly_picks_chars_from_charset(self):
random_instance = random.Random(self.seed)
expected = ''.join(
random_instance.choice(pipeline._PASSWORD_CHARSET)
for _ in xrange(pipeline._DEFAULT_RANDOM_PASSWORD_LENGTH))
random_instance.seed(self.seed)
self.assertEqual(expected, pipeline.make_random_password(choice_fn=random_instance.choice))
@unittest.skipUnless(testutil.AUTH_FEATURE_ENABLED, 'third_party_auth not enabled')
class ProviderUserStateTestCase(testutil.TestCase):
"""Tests ProviderUserState behavior."""
def test_get_unlink_form_name(self):
google_provider = self.configure_google_provider(enabled=True)
state = pipeline.ProviderUserState(google_provider, object(), None)
self.assertEqual(google_provider.provider_id + '_unlink_form', state.get_unlink_form_name())
| agpl-3.0 |
sander76/home-assistant | tests/components/kira/test_sensor.py | 5 | 1611 | """The tests for Kira sensor platform."""
import unittest
from unittest.mock import MagicMock
from homeassistant.components.kira import sensor as kira
from tests.common import get_test_home_assistant
TEST_CONFIG = {kira.DOMAIN: {"sensors": [{"host": "127.0.0.1", "port": 17324}]}}
DISCOVERY_INFO = {"name": "kira", "device": "kira"}
class TestKiraSensor(unittest.TestCase):
"""Tests the Kira Sensor platform."""
# pylint: disable=invalid-name
DEVICES = []
def add_entities(self, devices):
"""Mock add devices."""
for device in devices:
self.DEVICES.append(device)
def setUp(self):
"""Initialize values for this testcase class."""
self.hass = get_test_home_assistant()
mock_kira = MagicMock()
self.hass.data[kira.DOMAIN] = {kira.CONF_SENSOR: {}}
self.hass.data[kira.DOMAIN][kira.CONF_SENSOR]["kira"] = mock_kira
self.addCleanup(self.hass.stop)
# pylint: disable=protected-access
def test_kira_sensor_callback(self):
"""Ensure Kira sensor properly updates its attributes from callback."""
kira.setup_platform(self.hass, TEST_CONFIG, self.add_entities, DISCOVERY_INFO)
assert len(self.DEVICES) == 1
sensor = self.DEVICES[0]
assert sensor.name == "kira"
sensor.hass = self.hass
codeName = "FAKE_CODE"
deviceName = "FAKE_DEVICE"
codeTuple = (codeName, deviceName)
sensor._update_callback(codeTuple)
assert sensor.state == codeName
assert sensor.extra_state_attributes == {kira.CONF_DEVICE: deviceName}
| apache-2.0 |
dragon788/powerline | powerline/lint/markedjson/events.py | 38 | 2157 | # vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
# Abstract classes.
class Event(object):
def __init__(self, start_mark=None, end_mark=None):
self.start_mark = start_mark
self.end_mark = end_mark
def __repr__(self):
attributes = [
key for key in ['implicit', 'value']
if hasattr(self, key)
]
arguments = ', '.join([
'%s=%r' % (key, getattr(self, key))
for key in attributes
])
return '%s(%s)' % (self.__class__.__name__, arguments)
class NodeEvent(Event):
def __init__(self, start_mark=None, end_mark=None):
self.start_mark = start_mark
self.end_mark = end_mark
class CollectionStartEvent(NodeEvent):
def __init__(self, implicit, start_mark=None, end_mark=None, flow_style=None):
self.tag = None
self.implicit = implicit
self.start_mark = start_mark
self.end_mark = end_mark
self.flow_style = flow_style
class CollectionEndEvent(Event):
pass
# Implementations.
class StreamStartEvent(Event):
def __init__(self, start_mark=None, end_mark=None, encoding=None):
self.start_mark = start_mark
self.end_mark = end_mark
self.encoding = encoding
class StreamEndEvent(Event):
pass
class DocumentStartEvent(Event):
def __init__(self, start_mark=None, end_mark=None, explicit=None, version=None, tags=None):
self.start_mark = start_mark
self.end_mark = end_mark
self.explicit = explicit
self.version = version
self.tags = tags
class DocumentEndEvent(Event):
def __init__(self, start_mark=None, end_mark=None, explicit=None):
self.start_mark = start_mark
self.end_mark = end_mark
self.explicit = explicit
class AliasEvent(NodeEvent):
pass
class ScalarEvent(NodeEvent):
def __init__(self, implicit, value, start_mark=None, end_mark=None, style=None):
self.tag = None
self.implicit = implicit
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
self.style = style
class SequenceStartEvent(CollectionStartEvent):
pass
class SequenceEndEvent(CollectionEndEvent):
pass
class MappingStartEvent(CollectionStartEvent):
pass
class MappingEndEvent(CollectionEndEvent):
pass
| mit |
willprice/arduino-sphere-project | scripts/example_direction_finder/temboo/Library/Klout/User/ShowUser.py | 5 | 3067 | # -*- coding: utf-8 -*-
###############################################################################
#
# ShowUser
# Retrieves information for a specified Klout user.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class ShowUser(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the ShowUser Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(ShowUser, self).__init__(temboo_session, '/Library/Klout/User/ShowUser')
def new_input_set(self):
return ShowUserInputSet()
def _make_result_set(self, result, path):
return ShowUserResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ShowUserChoreographyExecution(session, exec_id, path)
class ShowUserInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the ShowUser
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Klout.)
"""
super(ShowUserInputSet, self)._set_input('APIKey', value)
def set_KloutID(self, value):
"""
Set the value of the KloutID input for this Choreo. ((required, string) The id for a Klout user to retrieve.)
"""
super(ShowUserInputSet, self)._set_input('KloutID', value)
class ShowUserResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the ShowUser Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Klout.)
"""
return self._output.get('Response', None)
class ShowUserChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ShowUserResultSet(response, path)
| gpl-2.0 |
brian-l/django-1.4.10 | django/contrib/gis/management/commands/inspectdb.py | 315 | 1466 | from django.core.management.commands.inspectdb import Command as InspectDBCommand
class Command(InspectDBCommand):
db_module = 'django.contrib.gis.db'
gis_tables = {}
def get_field_type(self, connection, table_name, row):
field_type, field_params, field_notes = super(Command, self).get_field_type(connection, table_name, row)
if field_type == 'GeometryField':
geo_col = row[0]
# Getting a more specific field type and any additional parameters
# from the `get_geometry_type` routine for the spatial backend.
field_type, geo_params = connection.introspection.get_geometry_type(table_name, geo_col)
field_params.update(geo_params)
# Adding the table name and column to the `gis_tables` dictionary, this
# allows us to track which tables need a GeoManager.
if table_name in self.gis_tables:
self.gis_tables[table_name].append(geo_col)
else:
self.gis_tables[table_name] = [geo_col]
return field_type, field_params, field_notes
def get_meta(self, table_name):
meta_lines = super(Command, self).get_meta(table_name)
if table_name in self.gis_tables:
# If the table is a geographic one, then we need make
# GeoManager the default manager for the model.
meta_lines.insert(0, ' objects = models.GeoManager()')
return meta_lines
| bsd-3-clause |
thecodinghub/news-for-good | news/Lib/site-packages/pip/_vendor/requests/packages/chardet/jisfreq.py | 3131 | 47315 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Sampling from about 20M text materials include literature and computer technology
#
# Japanese frequency table, applied to both S-JIS and EUC-JP
# They are sorted in order.
# 128 --> 0.77094
# 256 --> 0.85710
# 512 --> 0.92635
# 1024 --> 0.97130
# 2048 --> 0.99431
#
# Ideal Distribution Ratio = 0.92635 / (1-0.92635) = 12.58
# Random Distribution Ration = 512 / (2965+62+83+86-512) = 0.191
#
# Typical Distribution Ratio, 25% of IDR
JIS_TYPICAL_DISTRIBUTION_RATIO = 3.0
# Char to FreqOrder table ,
JIS_TABLE_SIZE = 4368
JISCharToFreqOrder = (
40, 1, 6, 182, 152, 180, 295,2127, 285, 381,3295,4304,3068,4606,3165,3510, # 16
3511,1822,2785,4607,1193,2226,5070,4608, 171,2996,1247, 18, 179,5071, 856,1661, # 32
1262,5072, 619, 127,3431,3512,3230,1899,1700, 232, 228,1294,1298, 284, 283,2041, # 48
2042,1061,1062, 48, 49, 44, 45, 433, 434,1040,1041, 996, 787,2997,1255,4305, # 64
2108,4609,1684,1648,5073,5074,5075,5076,5077,5078,3687,5079,4610,5080,3927,3928, # 80
5081,3296,3432, 290,2285,1471,2187,5082,2580,2825,1303,2140,1739,1445,2691,3375, # 96
1691,3297,4306,4307,4611, 452,3376,1182,2713,3688,3069,4308,5083,5084,5085,5086, # 112
5087,5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102, # 128
5103,5104,5105,5106,5107,5108,5109,5110,5111,5112,4097,5113,5114,5115,5116,5117, # 144
5118,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,5130,5131,5132,5133, # 160
5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148,5149, # 176
5150,5151,5152,4612,5153,5154,5155,5156,5157,5158,5159,5160,5161,5162,5163,5164, # 192
5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,1472, 598, 618, 820,1205, # 208
1309,1412,1858,1307,1692,5176,5177,5178,5179,5180,5181,5182,1142,1452,1234,1172, # 224
1875,2043,2149,1793,1382,2973, 925,2404,1067,1241, 960,1377,2935,1491, 919,1217, # 240
1865,2030,1406,1499,2749,4098,5183,5184,5185,5186,5187,5188,2561,4099,3117,1804, # 256
2049,3689,4309,3513,1663,5189,3166,3118,3298,1587,1561,3433,5190,3119,1625,2998, # 272
3299,4613,1766,3690,2786,4614,5191,5192,5193,5194,2161, 26,3377, 2,3929, 20, # 288
3691, 47,4100, 50, 17, 16, 35, 268, 27, 243, 42, 155, 24, 154, 29, 184, # 304
4, 91, 14, 92, 53, 396, 33, 289, 9, 37, 64, 620, 21, 39, 321, 5, # 320
12, 11, 52, 13, 3, 208, 138, 0, 7, 60, 526, 141, 151,1069, 181, 275, # 336
1591, 83, 132,1475, 126, 331, 829, 15, 69, 160, 59, 22, 157, 55,1079, 312, # 352
109, 38, 23, 25, 10, 19, 79,5195, 61, 382,1124, 8, 30,5196,5197,5198, # 368
5199,5200,5201,5202,5203,5204,5205,5206, 89, 62, 74, 34,2416, 112, 139, 196, # 384
271, 149, 84, 607, 131, 765, 46, 88, 153, 683, 76, 874, 101, 258, 57, 80, # 400
32, 364, 121,1508, 169,1547, 68, 235, 145,2999, 41, 360,3027, 70, 63, 31, # 416
43, 259, 262,1383, 99, 533, 194, 66, 93, 846, 217, 192, 56, 106, 58, 565, # 432
280, 272, 311, 256, 146, 82, 308, 71, 100, 128, 214, 655, 110, 261, 104,1140, # 448
54, 51, 36, 87, 67,3070, 185,2618,2936,2020, 28,1066,2390,2059,5207,5208, # 464
5209,5210,5211,5212,5213,5214,5215,5216,4615,5217,5218,5219,5220,5221,5222,5223, # 480
5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234,5235,5236,3514,5237,5238, # 496
5239,5240,5241,5242,5243,5244,2297,2031,4616,4310,3692,5245,3071,5246,3598,5247, # 512
4617,3231,3515,5248,4101,4311,4618,3808,4312,4102,5249,4103,4104,3599,5250,5251, # 528
5252,5253,5254,5255,5256,5257,5258,5259,5260,5261,5262,5263,5264,5265,5266,5267, # 544
5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278,5279,5280,5281,5282,5283, # 560
5284,5285,5286,5287,5288,5289,5290,5291,5292,5293,5294,5295,5296,5297,5298,5299, # 576
5300,5301,5302,5303,5304,5305,5306,5307,5308,5309,5310,5311,5312,5313,5314,5315, # 592
5316,5317,5318,5319,5320,5321,5322,5323,5324,5325,5326,5327,5328,5329,5330,5331, # 608
5332,5333,5334,5335,5336,5337,5338,5339,5340,5341,5342,5343,5344,5345,5346,5347, # 624
5348,5349,5350,5351,5352,5353,5354,5355,5356,5357,5358,5359,5360,5361,5362,5363, # 640
5364,5365,5366,5367,5368,5369,5370,5371,5372,5373,5374,5375,5376,5377,5378,5379, # 656
5380,5381, 363, 642,2787,2878,2788,2789,2316,3232,2317,3434,2011, 165,1942,3930, # 672
3931,3932,3933,5382,4619,5383,4620,5384,5385,5386,5387,5388,5389,5390,5391,5392, # 688
5393,5394,5395,5396,5397,5398,5399,5400,5401,5402,5403,5404,5405,5406,5407,5408, # 704
5409,5410,5411,5412,5413,5414,5415,5416,5417,5418,5419,5420,5421,5422,5423,5424, # 720
5425,5426,5427,5428,5429,5430,5431,5432,5433,5434,5435,5436,5437,5438,5439,5440, # 736
5441,5442,5443,5444,5445,5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456, # 752
5457,5458,5459,5460,5461,5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472, # 768
5473,5474,5475,5476,5477,5478,5479,5480,5481,5482,5483,5484,5485,5486,5487,5488, # 784
5489,5490,5491,5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504, # 800
5505,5506,5507,5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520, # 816
5521,5522,5523,5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536, # 832
5537,5538,5539,5540,5541,5542,5543,5544,5545,5546,5547,5548,5549,5550,5551,5552, # 848
5553,5554,5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568, # 864
5569,5570,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584, # 880
5585,5586,5587,5588,5589,5590,5591,5592,5593,5594,5595,5596,5597,5598,5599,5600, # 896
5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,5615,5616, # 912
5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631,5632, # 928
5633,5634,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646,5647,5648, # 944
5649,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660,5661,5662,5663,5664, # 960
5665,5666,5667,5668,5669,5670,5671,5672,5673,5674,5675,5676,5677,5678,5679,5680, # 976
5681,5682,5683,5684,5685,5686,5687,5688,5689,5690,5691,5692,5693,5694,5695,5696, # 992
5697,5698,5699,5700,5701,5702,5703,5704,5705,5706,5707,5708,5709,5710,5711,5712, # 1008
5713,5714,5715,5716,5717,5718,5719,5720,5721,5722,5723,5724,5725,5726,5727,5728, # 1024
5729,5730,5731,5732,5733,5734,5735,5736,5737,5738,5739,5740,5741,5742,5743,5744, # 1040
5745,5746,5747,5748,5749,5750,5751,5752,5753,5754,5755,5756,5757,5758,5759,5760, # 1056
5761,5762,5763,5764,5765,5766,5767,5768,5769,5770,5771,5772,5773,5774,5775,5776, # 1072
5777,5778,5779,5780,5781,5782,5783,5784,5785,5786,5787,5788,5789,5790,5791,5792, # 1088
5793,5794,5795,5796,5797,5798,5799,5800,5801,5802,5803,5804,5805,5806,5807,5808, # 1104
5809,5810,5811,5812,5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824, # 1120
5825,5826,5827,5828,5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840, # 1136
5841,5842,5843,5844,5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856, # 1152
5857,5858,5859,5860,5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872, # 1168
5873,5874,5875,5876,5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888, # 1184
5889,5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904, # 1200
5905,5906,5907,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920, # 1216
5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936, # 1232
5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952, # 1248
5953,5954,5955,5956,5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968, # 1264
5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984, # 1280
5985,5986,5987,5988,5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000, # 1296
6001,6002,6003,6004,6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016, # 1312
6017,6018,6019,6020,6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032, # 1328
6033,6034,6035,6036,6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048, # 1344
6049,6050,6051,6052,6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064, # 1360
6065,6066,6067,6068,6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080, # 1376
6081,6082,6083,6084,6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096, # 1392
6097,6098,6099,6100,6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112, # 1408
6113,6114,2044,2060,4621, 997,1235, 473,1186,4622, 920,3378,6115,6116, 379,1108, # 1424
4313,2657,2735,3934,6117,3809, 636,3233, 573,1026,3693,3435,2974,3300,2298,4105, # 1440
854,2937,2463, 393,2581,2417, 539, 752,1280,2750,2480, 140,1161, 440, 708,1569, # 1456
665,2497,1746,1291,1523,3000, 164,1603, 847,1331, 537,1997, 486, 508,1693,2418, # 1472
1970,2227, 878,1220, 299,1030, 969, 652,2751, 624,1137,3301,2619, 65,3302,2045, # 1488
1761,1859,3120,1930,3694,3516, 663,1767, 852, 835,3695, 269, 767,2826,2339,1305, # 1504
896,1150, 770,1616,6118, 506,1502,2075,1012,2519, 775,2520,2975,2340,2938,4314, # 1520
3028,2086,1224,1943,2286,6119,3072,4315,2240,1273,1987,3935,1557, 175, 597, 985, # 1536
3517,2419,2521,1416,3029, 585, 938,1931,1007,1052,1932,1685,6120,3379,4316,4623, # 1552
804, 599,3121,1333,2128,2539,1159,1554,2032,3810, 687,2033,2904, 952, 675,1467, # 1568
3436,6121,2241,1096,1786,2440,1543,1924, 980,1813,2228, 781,2692,1879, 728,1918, # 1584
3696,4624, 548,1950,4625,1809,1088,1356,3303,2522,1944, 502, 972, 373, 513,2827, # 1600
586,2377,2391,1003,1976,1631,6122,2464,1084, 648,1776,4626,2141, 324, 962,2012, # 1616
2177,2076,1384, 742,2178,1448,1173,1810, 222, 102, 301, 445, 125,2420, 662,2498, # 1632
277, 200,1476,1165,1068, 224,2562,1378,1446, 450,1880, 659, 791, 582,4627,2939, # 1648
3936,1516,1274, 555,2099,3697,1020,1389,1526,3380,1762,1723,1787,2229, 412,2114, # 1664
1900,2392,3518, 512,2597, 427,1925,2341,3122,1653,1686,2465,2499, 697, 330, 273, # 1680
380,2162, 951, 832, 780, 991,1301,3073, 965,2270,3519, 668,2523,2636,1286, 535, # 1696
1407, 518, 671, 957,2658,2378, 267, 611,2197,3030,6123, 248,2299, 967,1799,2356, # 1712
850,1418,3437,1876,1256,1480,2828,1718,6124,6125,1755,1664,2405,6126,4628,2879, # 1728
2829, 499,2179, 676,4629, 557,2329,2214,2090, 325,3234, 464, 811,3001, 992,2342, # 1744
2481,1232,1469, 303,2242, 466,1070,2163, 603,1777,2091,4630,2752,4631,2714, 322, # 1760
2659,1964,1768, 481,2188,1463,2330,2857,3600,2092,3031,2421,4632,2318,2070,1849, # 1776
2598,4633,1302,2254,1668,1701,2422,3811,2905,3032,3123,2046,4106,1763,1694,4634, # 1792
1604, 943,1724,1454, 917, 868,2215,1169,2940, 552,1145,1800,1228,1823,1955, 316, # 1808
1080,2510, 361,1807,2830,4107,2660,3381,1346,1423,1134,4108,6127, 541,1263,1229, # 1824
1148,2540, 545, 465,1833,2880,3438,1901,3074,2482, 816,3937, 713,1788,2500, 122, # 1840
1575, 195,1451,2501,1111,6128, 859, 374,1225,2243,2483,4317, 390,1033,3439,3075, # 1856
2524,1687, 266, 793,1440,2599, 946, 779, 802, 507, 897,1081, 528,2189,1292, 711, # 1872
1866,1725,1167,1640, 753, 398,2661,1053, 246, 348,4318, 137,1024,3440,1600,2077, # 1888
2129, 825,4319, 698, 238, 521, 187,2300,1157,2423,1641,1605,1464,1610,1097,2541, # 1904
1260,1436, 759,2255,1814,2150, 705,3235, 409,2563,3304, 561,3033,2005,2564, 726, # 1920
1956,2343,3698,4109, 949,3812,3813,3520,1669, 653,1379,2525, 881,2198, 632,2256, # 1936
1027, 778,1074, 733,1957, 514,1481,2466, 554,2180, 702,3938,1606,1017,1398,6129, # 1952
1380,3521, 921, 993,1313, 594, 449,1489,1617,1166, 768,1426,1360, 495,1794,3601, # 1968
1177,3602,1170,4320,2344, 476, 425,3167,4635,3168,1424, 401,2662,1171,3382,1998, # 1984
1089,4110, 477,3169, 474,6130,1909, 596,2831,1842, 494, 693,1051,1028,1207,3076, # 2000
606,2115, 727,2790,1473,1115, 743,3522, 630, 805,1532,4321,2021, 366,1057, 838, # 2016
684,1114,2142,4322,2050,1492,1892,1808,2271,3814,2424,1971,1447,1373,3305,1090, # 2032
1536,3939,3523,3306,1455,2199, 336, 369,2331,1035, 584,2393, 902, 718,2600,6131, # 2048
2753, 463,2151,1149,1611,2467, 715,1308,3124,1268, 343,1413,3236,1517,1347,2663, # 2064
2093,3940,2022,1131,1553,2100,2941,1427,3441,2942,1323,2484,6132,1980, 872,2368, # 2080
2441,2943, 320,2369,2116,1082, 679,1933,3941,2791,3815, 625,1143,2023, 422,2200, # 2096
3816,6133, 730,1695, 356,2257,1626,2301,2858,2637,1627,1778, 937, 883,2906,2693, # 2112
3002,1769,1086, 400,1063,1325,3307,2792,4111,3077, 456,2345,1046, 747,6134,1524, # 2128
884,1094,3383,1474,2164,1059, 974,1688,2181,2258,1047, 345,1665,1187, 358, 875, # 2144
3170, 305, 660,3524,2190,1334,1135,3171,1540,1649,2542,1527, 927, 968,2793, 885, # 2160
1972,1850, 482, 500,2638,1218,1109,1085,2543,1654,2034, 876, 78,2287,1482,1277, # 2176
861,1675,1083,1779, 724,2754, 454, 397,1132,1612,2332, 893, 672,1237, 257,2259, # 2192
2370, 135,3384, 337,2244, 547, 352, 340, 709,2485,1400, 788,1138,2511, 540, 772, # 2208
1682,2260,2272,2544,2013,1843,1902,4636,1999,1562,2288,4637,2201,1403,1533, 407, # 2224
576,3308,1254,2071, 978,3385, 170, 136,1201,3125,2664,3172,2394, 213, 912, 873, # 2240
3603,1713,2202, 699,3604,3699, 813,3442, 493, 531,1054, 468,2907,1483, 304, 281, # 2256
4112,1726,1252,2094, 339,2319,2130,2639, 756,1563,2944, 748, 571,2976,1588,2425, # 2272
2715,1851,1460,2426,1528,1392,1973,3237, 288,3309, 685,3386, 296, 892,2716,2216, # 2288
1570,2245, 722,1747,2217, 905,3238,1103,6135,1893,1441,1965, 251,1805,2371,3700, # 2304
2601,1919,1078, 75,2182,1509,1592,1270,2640,4638,2152,6136,3310,3817, 524, 706, # 2320
1075, 292,3818,1756,2602, 317, 98,3173,3605,3525,1844,2218,3819,2502, 814, 567, # 2336
385,2908,1534,6137, 534,1642,3239, 797,6138,1670,1529, 953,4323, 188,1071, 538, # 2352
178, 729,3240,2109,1226,1374,2000,2357,2977, 731,2468,1116,2014,2051,6139,1261, # 2368
1593, 803,2859,2736,3443, 556, 682, 823,1541,6140,1369,2289,1706,2794, 845, 462, # 2384
2603,2665,1361, 387, 162,2358,1740, 739,1770,1720,1304,1401,3241,1049, 627,1571, # 2400
2427,3526,1877,3942,1852,1500, 431,1910,1503, 677, 297,2795, 286,1433,1038,1198, # 2416
2290,1133,1596,4113,4639,2469,1510,1484,3943,6141,2442, 108, 712,4640,2372, 866, # 2432
3701,2755,3242,1348, 834,1945,1408,3527,2395,3243,1811, 824, 994,1179,2110,1548, # 2448
1453, 790,3003, 690,4324,4325,2832,2909,3820,1860,3821, 225,1748, 310, 346,1780, # 2464
2470, 821,1993,2717,2796, 828, 877,3528,2860,2471,1702,2165,2910,2486,1789, 453, # 2480
359,2291,1676, 73,1164,1461,1127,3311, 421, 604, 314,1037, 589, 116,2487, 737, # 2496
837,1180, 111, 244, 735,6142,2261,1861,1362, 986, 523, 418, 581,2666,3822, 103, # 2512
855, 503,1414,1867,2488,1091, 657,1597, 979, 605,1316,4641,1021,2443,2078,2001, # 2528
1209, 96, 587,2166,1032, 260,1072,2153, 173, 94, 226,3244, 819,2006,4642,4114, # 2544
2203, 231,1744, 782, 97,2667, 786,3387, 887, 391, 442,2219,4326,1425,6143,2694, # 2560
633,1544,1202, 483,2015, 592,2052,1958,2472,1655, 419, 129,4327,3444,3312,1714, # 2576
1257,3078,4328,1518,1098, 865,1310,1019,1885,1512,1734, 469,2444, 148, 773, 436, # 2592
1815,1868,1128,1055,4329,1245,2756,3445,2154,1934,1039,4643, 579,1238, 932,2320, # 2608
353, 205, 801, 115,2428, 944,2321,1881, 399,2565,1211, 678, 766,3944, 335,2101, # 2624
1459,1781,1402,3945,2737,2131,1010, 844, 981,1326,1013, 550,1816,1545,2620,1335, # 2640
1008, 371,2881, 936,1419,1613,3529,1456,1395,2273,1834,2604,1317,2738,2503, 416, # 2656
1643,4330, 806,1126, 229, 591,3946,1314,1981,1576,1837,1666, 347,1790, 977,3313, # 2672
764,2861,1853, 688,2429,1920,1462, 77, 595, 415,2002,3034, 798,1192,4115,6144, # 2688
2978,4331,3035,2695,2582,2072,2566, 430,2430,1727, 842,1396,3947,3702, 613, 377, # 2704
278, 236,1417,3388,3314,3174, 757,1869, 107,3530,6145,1194, 623,2262, 207,1253, # 2720
2167,3446,3948, 492,1117,1935, 536,1838,2757,1246,4332, 696,2095,2406,1393,1572, # 2736
3175,1782, 583, 190, 253,1390,2230, 830,3126,3389, 934,3245,1703,1749,2979,1870, # 2752
2545,1656,2204, 869,2346,4116,3176,1817, 496,1764,4644, 942,1504, 404,1903,1122, # 2768
1580,3606,2945,1022, 515, 372,1735, 955,2431,3036,6146,2797,1110,2302,2798, 617, # 2784
6147, 441, 762,1771,3447,3607,3608,1904, 840,3037, 86, 939,1385, 572,1370,2445, # 2800
1336, 114,3703, 898, 294, 203,3315, 703,1583,2274, 429, 961,4333,1854,1951,3390, # 2816
2373,3704,4334,1318,1381, 966,1911,2322,1006,1155, 309, 989, 458,2718,1795,1372, # 2832
1203, 252,1689,1363,3177, 517,1936, 168,1490, 562, 193,3823,1042,4117,1835, 551, # 2848
470,4645, 395, 489,3448,1871,1465,2583,2641, 417,1493, 279,1295, 511,1236,1119, # 2864
72,1231,1982,1812,3004, 871,1564, 984,3449,1667,2696,2096,4646,2347,2833,1673, # 2880
3609, 695,3246,2668, 807,1183,4647, 890, 388,2333,1801,1457,2911,1765,1477,1031, # 2896
3316,3317,1278,3391,2799,2292,2526, 163,3450,4335,2669,1404,1802,6148,2323,2407, # 2912
1584,1728,1494,1824,1269, 298, 909,3318,1034,1632, 375, 776,1683,2061, 291, 210, # 2928
1123, 809,1249,1002,2642,3038, 206,1011,2132, 144, 975, 882,1565, 342, 667, 754, # 2944
1442,2143,1299,2303,2062, 447, 626,2205,1221,2739,2912,1144,1214,2206,2584, 760, # 2960
1715, 614, 950,1281,2670,2621, 810, 577,1287,2546,4648, 242,2168, 250,2643, 691, # 2976
123,2644, 647, 313,1029, 689,1357,2946,1650, 216, 771,1339,1306, 808,2063, 549, # 2992
913,1371,2913,2914,6149,1466,1092,1174,1196,1311,2605,2396,1783,1796,3079, 406, # 3008
2671,2117,3949,4649, 487,1825,2220,6150,2915, 448,2348,1073,6151,2397,1707, 130, # 3024
900,1598, 329, 176,1959,2527,1620,6152,2275,4336,3319,1983,2191,3705,3610,2155, # 3040
3706,1912,1513,1614,6153,1988, 646, 392,2304,1589,3320,3039,1826,1239,1352,1340, # 3056
2916, 505,2567,1709,1437,2408,2547, 906,6154,2672, 384,1458,1594,1100,1329, 710, # 3072
423,3531,2064,2231,2622,1989,2673,1087,1882, 333, 841,3005,1296,2882,2379, 580, # 3088
1937,1827,1293,2585, 601, 574, 249,1772,4118,2079,1120, 645, 901,1176,1690, 795, # 3104
2207, 478,1434, 516,1190,1530, 761,2080, 930,1264, 355, 435,1552, 644,1791, 987, # 3120
220,1364,1163,1121,1538, 306,2169,1327,1222, 546,2645, 218, 241, 610,1704,3321, # 3136
1984,1839,1966,2528, 451,6155,2586,3707,2568, 907,3178, 254,2947, 186,1845,4650, # 3152
745, 432,1757, 428,1633, 888,2246,2221,2489,3611,2118,1258,1265, 956,3127,1784, # 3168
4337,2490, 319, 510, 119, 457,3612, 274,2035,2007,4651,1409,3128, 970,2758, 590, # 3184
2800, 661,2247,4652,2008,3950,1420,1549,3080,3322,3951,1651,1375,2111, 485,2491, # 3200
1429,1156,6156,2548,2183,1495, 831,1840,2529,2446, 501,1657, 307,1894,3247,1341, # 3216
666, 899,2156,1539,2549,1559, 886, 349,2208,3081,2305,1736,3824,2170,2759,1014, # 3232
1913,1386, 542,1397,2948, 490, 368, 716, 362, 159, 282,2569,1129,1658,1288,1750, # 3248
2674, 276, 649,2016, 751,1496, 658,1818,1284,1862,2209,2087,2512,3451, 622,2834, # 3264
376, 117,1060,2053,1208,1721,1101,1443, 247,1250,3179,1792,3952,2760,2398,3953, # 3280
6157,2144,3708, 446,2432,1151,2570,3452,2447,2761,2835,1210,2448,3082, 424,2222, # 3296
1251,2449,2119,2836, 504,1581,4338, 602, 817, 857,3825,2349,2306, 357,3826,1470, # 3312
1883,2883, 255, 958, 929,2917,3248, 302,4653,1050,1271,1751,2307,1952,1430,2697, # 3328
2719,2359, 354,3180, 777, 158,2036,4339,1659,4340,4654,2308,2949,2248,1146,2232, # 3344
3532,2720,1696,2623,3827,6158,3129,1550,2698,1485,1297,1428, 637, 931,2721,2145, # 3360
914,2550,2587, 81,2450, 612, 827,2646,1242,4655,1118,2884, 472,1855,3181,3533, # 3376
3534, 569,1353,2699,1244,1758,2588,4119,2009,2762,2171,3709,1312,1531,6159,1152, # 3392
1938, 134,1830, 471,3710,2276,1112,1535,3323,3453,3535, 982,1337,2950, 488, 826, # 3408
674,1058,1628,4120,2017, 522,2399, 211, 568,1367,3454, 350, 293,1872,1139,3249, # 3424
1399,1946,3006,1300,2360,3324, 588, 736,6160,2606, 744, 669,3536,3828,6161,1358, # 3440
199, 723, 848, 933, 851,1939,1505,1514,1338,1618,1831,4656,1634,3613, 443,2740, # 3456
3829, 717,1947, 491,1914,6162,2551,1542,4121,1025,6163,1099,1223, 198,3040,2722, # 3472
370, 410,1905,2589, 998,1248,3182,2380, 519,1449,4122,1710, 947, 928,1153,4341, # 3488
2277, 344,2624,1511, 615, 105, 161,1212,1076,1960,3130,2054,1926,1175,1906,2473, # 3504
414,1873,2801,6164,2309, 315,1319,3325, 318,2018,2146,2157, 963, 631, 223,4342, # 3520
4343,2675, 479,3711,1197,2625,3712,2676,2361,6165,4344,4123,6166,2451,3183,1886, # 3536
2184,1674,1330,1711,1635,1506, 799, 219,3250,3083,3954,1677,3713,3326,2081,3614, # 3552
1652,2073,4657,1147,3041,1752, 643,1961, 147,1974,3955,6167,1716,2037, 918,3007, # 3568
1994, 120,1537, 118, 609,3184,4345, 740,3455,1219, 332,1615,3830,6168,1621,2980, # 3584
1582, 783, 212, 553,2350,3714,1349,2433,2082,4124, 889,6169,2310,1275,1410, 973, # 3600
166,1320,3456,1797,1215,3185,2885,1846,2590,2763,4658, 629, 822,3008, 763, 940, # 3616
1990,2862, 439,2409,1566,1240,1622, 926,1282,1907,2764, 654,2210,1607, 327,1130, # 3632
3956,1678,1623,6170,2434,2192, 686, 608,3831,3715, 903,3957,3042,6171,2741,1522, # 3648
1915,1105,1555,2552,1359, 323,3251,4346,3457, 738,1354,2553,2311,2334,1828,2003, # 3664
3832,1753,2351,1227,6172,1887,4125,1478,6173,2410,1874,1712,1847, 520,1204,2607, # 3680
264,4659, 836,2677,2102, 600,4660,3833,2278,3084,6174,4347,3615,1342, 640, 532, # 3696
543,2608,1888,2400,2591,1009,4348,1497, 341,1737,3616,2723,1394, 529,3252,1321, # 3712
983,4661,1515,2120, 971,2592, 924, 287,1662,3186,4349,2700,4350,1519, 908,1948, # 3728
2452, 156, 796,1629,1486,2223,2055, 694,4126,1259,1036,3392,1213,2249,2742,1889, # 3744
1230,3958,1015, 910, 408, 559,3617,4662, 746, 725, 935,4663,3959,3009,1289, 563, # 3760
867,4664,3960,1567,2981,2038,2626, 988,2263,2381,4351, 143,2374, 704,1895,6175, # 3776
1188,3716,2088, 673,3085,2362,4352, 484,1608,1921,2765,2918, 215, 904,3618,3537, # 3792
894, 509, 976,3043,2701,3961,4353,2837,2982, 498,6176,6177,1102,3538,1332,3393, # 3808
1487,1636,1637, 233, 245,3962, 383, 650, 995,3044, 460,1520,1206,2352, 749,3327, # 3824
530, 700, 389,1438,1560,1773,3963,2264, 719,2951,2724,3834, 870,1832,1644,1000, # 3840
839,2474,3717, 197,1630,3394, 365,2886,3964,1285,2133, 734, 922, 818,1106, 732, # 3856
480,2083,1774,3458, 923,2279,1350, 221,3086, 85,2233,2234,3835,1585,3010,2147, # 3872
1387,1705,2382,1619,2475, 133, 239,2802,1991,1016,2084,2383, 411,2838,1113, 651, # 3888
1985,1160,3328, 990,1863,3087,1048,1276,2647, 265,2627,1599,3253,2056, 150, 638, # 3904
2019, 656, 853, 326,1479, 680,1439,4354,1001,1759, 413,3459,3395,2492,1431, 459, # 3920
4355,1125,3329,2265,1953,1450,2065,2863, 849, 351,2678,3131,3254,3255,1104,1577, # 3936
227,1351,1645,2453,2193,1421,2887, 812,2121, 634, 95,2435, 201,2312,4665,1646, # 3952
1671,2743,1601,2554,2702,2648,2280,1315,1366,2089,3132,1573,3718,3965,1729,1189, # 3968
328,2679,1077,1940,1136, 558,1283, 964,1195, 621,2074,1199,1743,3460,3619,1896, # 3984
1916,1890,3836,2952,1154,2112,1064, 862, 378,3011,2066,2113,2803,1568,2839,6178, # 4000
3088,2919,1941,1660,2004,1992,2194, 142, 707,1590,1708,1624,1922,1023,1836,1233, # 4016
1004,2313, 789, 741,3620,6179,1609,2411,1200,4127,3719,3720,4666,2057,3721, 593, # 4032
2840, 367,2920,1878,6180,3461,1521, 628,1168, 692,2211,2649, 300, 720,2067,2571, # 4048
2953,3396, 959,2504,3966,3539,3462,1977, 701,6181, 954,1043, 800, 681, 183,3722, # 4064
1803,1730,3540,4128,2103, 815,2314, 174, 467, 230,2454,1093,2134, 755,3541,3397, # 4080
1141,1162,6182,1738,2039, 270,3256,2513,1005,1647,2185,3837, 858,1679,1897,1719, # 4096
2954,2324,1806, 402, 670, 167,4129,1498,2158,2104, 750,6183, 915, 189,1680,1551, # 4112
455,4356,1501,2455, 405,1095,2955, 338,1586,1266,1819, 570, 641,1324, 237,1556, # 4128
2650,1388,3723,6184,1368,2384,1343,1978,3089,2436, 879,3724, 792,1191, 758,3012, # 4144
1411,2135,1322,4357, 240,4667,1848,3725,1574,6185, 420,3045,1546,1391, 714,4358, # 4160
1967, 941,1864, 863, 664, 426, 560,1731,2680,1785,2864,1949,2363, 403,3330,1415, # 4176
1279,2136,1697,2335, 204, 721,2097,3838, 90,6186,2085,2505, 191,3967, 124,2148, # 4192
1376,1798,1178,1107,1898,1405, 860,4359,1243,1272,2375,2983,1558,2456,1638, 113, # 4208
3621, 578,1923,2609, 880, 386,4130, 784,2186,2266,1422,2956,2172,1722, 497, 263, # 4224
2514,1267,2412,2610, 177,2703,3542, 774,1927,1344, 616,1432,1595,1018, 172,4360, # 4240
2325, 911,4361, 438,1468,3622, 794,3968,2024,2173,1681,1829,2957, 945, 895,3090, # 4256
575,2212,2476, 475,2401,2681, 785,2744,1745,2293,2555,1975,3133,2865, 394,4668, # 4272
3839, 635,4131, 639, 202,1507,2195,2766,1345,1435,2572,3726,1908,1184,1181,2457, # 4288
3727,3134,4362, 843,2611, 437, 916,4669, 234, 769,1884,3046,3047,3623, 833,6187, # 4304
1639,2250,2402,1355,1185,2010,2047, 999, 525,1732,1290,1488,2612, 948,1578,3728, # 4320
2413,2477,1216,2725,2159, 334,3840,1328,3624,2921,1525,4132, 564,1056, 891,4363, # 4336
1444,1698,2385,2251,3729,1365,2281,2235,1717,6188, 864,3841,2515, 444, 527,2767, # 4352
2922,3625, 544, 461,6189, 566, 209,2437,3398,2098,1065,2068,3331,3626,3257,2137, # 4368 #last 512
#Everything below is of no interest for detection purpose
2138,2122,3730,2888,1995,1820,1044,6190,6191,6192,6193,6194,6195,6196,6197,6198, # 4384
6199,6200,6201,6202,6203,6204,6205,4670,6206,6207,6208,6209,6210,6211,6212,6213, # 4400
6214,6215,6216,6217,6218,6219,6220,6221,6222,6223,6224,6225,6226,6227,6228,6229, # 4416
6230,6231,6232,6233,6234,6235,6236,6237,3187,6238,6239,3969,6240,6241,6242,6243, # 4432
6244,4671,6245,6246,4672,6247,6248,4133,6249,6250,4364,6251,2923,2556,2613,4673, # 4448
4365,3970,6252,6253,6254,6255,4674,6256,6257,6258,2768,2353,4366,4675,4676,3188, # 4464
4367,3463,6259,4134,4677,4678,6260,2267,6261,3842,3332,4368,3543,6262,6263,6264, # 4480
3013,1954,1928,4135,4679,6265,6266,2478,3091,6267,4680,4369,6268,6269,1699,6270, # 4496
3544,4136,4681,6271,4137,6272,4370,2804,6273,6274,2593,3971,3972,4682,6275,2236, # 4512
4683,6276,6277,4684,6278,6279,4138,3973,4685,6280,6281,3258,6282,6283,6284,6285, # 4528
3974,4686,2841,3975,6286,6287,3545,6288,6289,4139,4687,4140,6290,4141,6291,4142, # 4544
6292,6293,3333,6294,6295,6296,4371,6297,3399,6298,6299,4372,3976,6300,6301,6302, # 4560
4373,6303,6304,3843,3731,6305,4688,4374,6306,6307,3259,2294,6308,3732,2530,4143, # 4576
6309,4689,6310,6311,6312,3048,6313,6314,4690,3733,2237,6315,6316,2282,3334,6317, # 4592
6318,3844,6319,6320,4691,6321,3400,4692,6322,4693,6323,3049,6324,4375,6325,3977, # 4608
6326,6327,6328,3546,6329,4694,3335,6330,4695,4696,6331,6332,6333,6334,4376,3978, # 4624
6335,4697,3979,4144,6336,3980,4698,6337,6338,6339,6340,6341,4699,4700,4701,6342, # 4640
6343,4702,6344,6345,4703,6346,6347,4704,6348,4705,4706,3135,6349,4707,6350,4708, # 4656
6351,4377,6352,4709,3734,4145,6353,2506,4710,3189,6354,3050,4711,3981,6355,3547, # 4672
3014,4146,4378,3735,2651,3845,3260,3136,2224,1986,6356,3401,6357,4712,2594,3627, # 4688
3137,2573,3736,3982,4713,3628,4714,4715,2682,3629,4716,6358,3630,4379,3631,6359, # 4704
6360,6361,3983,6362,6363,6364,6365,4147,3846,4717,6366,6367,3737,2842,6368,4718, # 4720
2628,6369,3261,6370,2386,6371,6372,3738,3984,4719,3464,4720,3402,6373,2924,3336, # 4736
4148,2866,6374,2805,3262,4380,2704,2069,2531,3138,2806,2984,6375,2769,6376,4721, # 4752
4722,3403,6377,6378,3548,6379,6380,2705,3092,1979,4149,2629,3337,2889,6381,3338, # 4768
4150,2557,3339,4381,6382,3190,3263,3739,6383,4151,4723,4152,2558,2574,3404,3191, # 4784
6384,6385,4153,6386,4724,4382,6387,6388,4383,6389,6390,4154,6391,4725,3985,6392, # 4800
3847,4155,6393,6394,6395,6396,6397,3465,6398,4384,6399,6400,6401,6402,6403,6404, # 4816
4156,6405,6406,6407,6408,2123,6409,6410,2326,3192,4726,6411,6412,6413,6414,4385, # 4832
4157,6415,6416,4158,6417,3093,3848,6418,3986,6419,6420,3849,6421,6422,6423,4159, # 4848
6424,6425,4160,6426,3740,6427,6428,6429,6430,3987,6431,4727,6432,2238,6433,6434, # 4864
4386,3988,6435,6436,3632,6437,6438,2843,6439,6440,6441,6442,3633,6443,2958,6444, # 4880
6445,3466,6446,2364,4387,3850,6447,4388,2959,3340,6448,3851,6449,4728,6450,6451, # 4896
3264,4729,6452,3193,6453,4389,4390,2706,3341,4730,6454,3139,6455,3194,6456,3051, # 4912
2124,3852,1602,4391,4161,3853,1158,3854,4162,3989,4392,3990,4731,4732,4393,2040, # 4928
4163,4394,3265,6457,2807,3467,3855,6458,6459,6460,3991,3468,4733,4734,6461,3140, # 4944
2960,6462,4735,6463,6464,6465,6466,4736,4737,4738,4739,6467,6468,4164,2403,3856, # 4960
6469,6470,2770,2844,6471,4740,6472,6473,6474,6475,6476,6477,6478,3195,6479,4741, # 4976
4395,6480,2867,6481,4742,2808,6482,2493,4165,6483,6484,6485,6486,2295,4743,6487, # 4992
6488,6489,3634,6490,6491,6492,6493,6494,6495,6496,2985,4744,6497,6498,4745,6499, # 5008
6500,2925,3141,4166,6501,6502,4746,6503,6504,4747,6505,6506,6507,2890,6508,6509, # 5024
6510,6511,6512,6513,6514,6515,6516,6517,6518,6519,3469,4167,6520,6521,6522,4748, # 5040
4396,3741,4397,4749,4398,3342,2125,4750,6523,4751,4752,4753,3052,6524,2961,4168, # 5056
6525,4754,6526,4755,4399,2926,4169,6527,3857,6528,4400,4170,6529,4171,6530,6531, # 5072
2595,6532,6533,6534,6535,3635,6536,6537,6538,6539,6540,6541,6542,4756,6543,6544, # 5088
6545,6546,6547,6548,4401,6549,6550,6551,6552,4402,3405,4757,4403,6553,6554,6555, # 5104
4172,3742,6556,6557,6558,3992,3636,6559,6560,3053,2726,6561,3549,4173,3054,4404, # 5120
6562,6563,3993,4405,3266,3550,2809,4406,6564,6565,6566,4758,4759,6567,3743,6568, # 5136
4760,3744,4761,3470,6569,6570,6571,4407,6572,3745,4174,6573,4175,2810,4176,3196, # 5152
4762,6574,4177,6575,6576,2494,2891,3551,6577,6578,3471,6579,4408,6580,3015,3197, # 5168
6581,3343,2532,3994,3858,6582,3094,3406,4409,6583,2892,4178,4763,4410,3016,4411, # 5184
6584,3995,3142,3017,2683,6585,4179,6586,6587,4764,4412,6588,6589,4413,6590,2986, # 5200
6591,2962,3552,6592,2963,3472,6593,6594,4180,4765,6595,6596,2225,3267,4414,6597, # 5216
3407,3637,4766,6598,6599,3198,6600,4415,6601,3859,3199,6602,3473,4767,2811,4416, # 5232
1856,3268,3200,2575,3996,3997,3201,4417,6603,3095,2927,6604,3143,6605,2268,6606, # 5248
3998,3860,3096,2771,6607,6608,3638,2495,4768,6609,3861,6610,3269,2745,4769,4181, # 5264
3553,6611,2845,3270,6612,6613,6614,3862,6615,6616,4770,4771,6617,3474,3999,4418, # 5280
4419,6618,3639,3344,6619,4772,4182,6620,2126,6621,6622,6623,4420,4773,6624,3018, # 5296
6625,4774,3554,6626,4183,2025,3746,6627,4184,2707,6628,4421,4422,3097,1775,4185, # 5312
3555,6629,6630,2868,6631,6632,4423,6633,6634,4424,2414,2533,2928,6635,4186,2387, # 5328
6636,4775,6637,4187,6638,1891,4425,3202,3203,6639,6640,4776,6641,3345,6642,6643, # 5344
3640,6644,3475,3346,3641,4000,6645,3144,6646,3098,2812,4188,3642,3204,6647,3863, # 5360
3476,6648,3864,6649,4426,4001,6650,6651,6652,2576,6653,4189,4777,6654,6655,6656, # 5376
2846,6657,3477,3205,4002,6658,4003,6659,3347,2252,6660,6661,6662,4778,6663,6664, # 5392
6665,6666,6667,6668,6669,4779,4780,2048,6670,3478,3099,6671,3556,3747,4004,6672, # 5408
6673,6674,3145,4005,3748,6675,6676,6677,6678,6679,3408,6680,6681,6682,6683,3206, # 5424
3207,6684,6685,4781,4427,6686,4782,4783,4784,6687,6688,6689,4190,6690,6691,3479, # 5440
6692,2746,6693,4428,6694,6695,6696,6697,6698,6699,4785,6700,6701,3208,2727,6702, # 5456
3146,6703,6704,3409,2196,6705,4429,6706,6707,6708,2534,1996,6709,6710,6711,2747, # 5472
6712,6713,6714,4786,3643,6715,4430,4431,6716,3557,6717,4432,4433,6718,6719,6720, # 5488
6721,3749,6722,4006,4787,6723,6724,3644,4788,4434,6725,6726,4789,2772,6727,6728, # 5504
6729,6730,6731,2708,3865,2813,4435,6732,6733,4790,4791,3480,6734,6735,6736,6737, # 5520
4436,3348,6738,3410,4007,6739,6740,4008,6741,6742,4792,3411,4191,6743,6744,6745, # 5536
6746,6747,3866,6748,3750,6749,6750,6751,6752,6753,6754,6755,3867,6756,4009,6757, # 5552
4793,4794,6758,2814,2987,6759,6760,6761,4437,6762,6763,6764,6765,3645,6766,6767, # 5568
3481,4192,6768,3751,6769,6770,2174,6771,3868,3752,6772,6773,6774,4193,4795,4438, # 5584
3558,4796,4439,6775,4797,6776,6777,4798,6778,4799,3559,4800,6779,6780,6781,3482, # 5600
6782,2893,6783,6784,4194,4801,4010,6785,6786,4440,6787,4011,6788,6789,6790,6791, # 5616
6792,6793,4802,6794,6795,6796,4012,6797,6798,6799,6800,3349,4803,3483,6801,4804, # 5632
4195,6802,4013,6803,6804,4196,6805,4014,4015,6806,2847,3271,2848,6807,3484,6808, # 5648
6809,6810,4441,6811,4442,4197,4443,3272,4805,6812,3412,4016,1579,6813,6814,4017, # 5664
6815,3869,6816,2964,6817,4806,6818,6819,4018,3646,6820,6821,4807,4019,4020,6822, # 5680
6823,3560,6824,6825,4021,4444,6826,4198,6827,6828,4445,6829,6830,4199,4808,6831, # 5696
6832,6833,3870,3019,2458,6834,3753,3413,3350,6835,4809,3871,4810,3561,4446,6836, # 5712
6837,4447,4811,4812,6838,2459,4448,6839,4449,6840,6841,4022,3872,6842,4813,4814, # 5728
6843,6844,4815,4200,4201,4202,6845,4023,6846,6847,4450,3562,3873,6848,6849,4816, # 5744
4817,6850,4451,4818,2139,6851,3563,6852,6853,3351,6854,6855,3352,4024,2709,3414, # 5760
4203,4452,6856,4204,6857,6858,3874,3875,6859,6860,4819,6861,6862,6863,6864,4453, # 5776
3647,6865,6866,4820,6867,6868,6869,6870,4454,6871,2869,6872,6873,4821,6874,3754, # 5792
6875,4822,4205,6876,6877,6878,3648,4206,4455,6879,4823,6880,4824,3876,6881,3055, # 5808
4207,6882,3415,6883,6884,6885,4208,4209,6886,4210,3353,6887,3354,3564,3209,3485, # 5824
2652,6888,2728,6889,3210,3755,6890,4025,4456,6891,4825,6892,6893,6894,6895,4211, # 5840
6896,6897,6898,4826,6899,6900,4212,6901,4827,6902,2773,3565,6903,4828,6904,6905, # 5856
6906,6907,3649,3650,6908,2849,3566,6909,3567,3100,6910,6911,6912,6913,6914,6915, # 5872
4026,6916,3355,4829,3056,4457,3756,6917,3651,6918,4213,3652,2870,6919,4458,6920, # 5888
2438,6921,6922,3757,2774,4830,6923,3356,4831,4832,6924,4833,4459,3653,2507,6925, # 5904
4834,2535,6926,6927,3273,4027,3147,6928,3568,6929,6930,6931,4460,6932,3877,4461, # 5920
2729,3654,6933,6934,6935,6936,2175,4835,2630,4214,4028,4462,4836,4215,6937,3148, # 5936
4216,4463,4837,4838,4217,6938,6939,2850,4839,6940,4464,6941,6942,6943,4840,6944, # 5952
4218,3274,4465,6945,6946,2710,6947,4841,4466,6948,6949,2894,6950,6951,4842,6952, # 5968
4219,3057,2871,6953,6954,6955,6956,4467,6957,2711,6958,6959,6960,3275,3101,4843, # 5984
6961,3357,3569,6962,4844,6963,6964,4468,4845,3570,6965,3102,4846,3758,6966,4847, # 6000
3878,4848,4849,4029,6967,2929,3879,4850,4851,6968,6969,1733,6970,4220,6971,6972, # 6016
6973,6974,6975,6976,4852,6977,6978,6979,6980,6981,6982,3759,6983,6984,6985,3486, # 6032
3487,6986,3488,3416,6987,6988,6989,6990,6991,6992,6993,6994,6995,6996,6997,4853, # 6048
6998,6999,4030,7000,7001,3211,7002,7003,4221,7004,7005,3571,4031,7006,3572,7007, # 6064
2614,4854,2577,7008,7009,2965,3655,3656,4855,2775,3489,3880,4222,4856,3881,4032, # 6080
3882,3657,2730,3490,4857,7010,3149,7011,4469,4858,2496,3491,4859,2283,7012,7013, # 6096
7014,2365,4860,4470,7015,7016,3760,7017,7018,4223,1917,7019,7020,7021,4471,7022, # 6112
2776,4472,7023,7024,7025,7026,4033,7027,3573,4224,4861,4034,4862,7028,7029,1929, # 6128
3883,4035,7030,4473,3058,7031,2536,3761,3884,7032,4036,7033,2966,2895,1968,4474, # 6144
3276,4225,3417,3492,4226,2105,7034,7035,1754,2596,3762,4227,4863,4475,3763,4864, # 6160
3764,2615,2777,3103,3765,3658,3418,4865,2296,3766,2815,7036,7037,7038,3574,2872, # 6176
3277,4476,7039,4037,4477,7040,7041,4038,7042,7043,7044,7045,7046,7047,2537,7048, # 6192
7049,7050,7051,7052,7053,7054,4478,7055,7056,3767,3659,4228,3575,7057,7058,4229, # 6208
7059,7060,7061,3660,7062,3212,7063,3885,4039,2460,7064,7065,7066,7067,7068,7069, # 6224
7070,7071,7072,7073,7074,4866,3768,4867,7075,7076,7077,7078,4868,3358,3278,2653, # 6240
7079,7080,4479,3886,7081,7082,4869,7083,7084,7085,7086,7087,7088,2538,7089,7090, # 6256
7091,4040,3150,3769,4870,4041,2896,3359,4230,2930,7092,3279,7093,2967,4480,3213, # 6272
4481,3661,7094,7095,7096,7097,7098,7099,7100,7101,7102,2461,3770,7103,7104,4231, # 6288
3151,7105,7106,7107,4042,3662,7108,7109,4871,3663,4872,4043,3059,7110,7111,7112, # 6304
3493,2988,7113,4873,7114,7115,7116,3771,4874,7117,7118,4232,4875,7119,3576,2336, # 6320
4876,7120,4233,3419,4044,4877,4878,4482,4483,4879,4484,4234,7121,3772,4880,1045, # 6336
3280,3664,4881,4882,7122,7123,7124,7125,4883,7126,2778,7127,4485,4486,7128,4884, # 6352
3214,3887,7129,7130,3215,7131,4885,4045,7132,7133,4046,7134,7135,7136,7137,7138, # 6368
7139,7140,7141,7142,7143,4235,7144,4886,7145,7146,7147,4887,7148,7149,7150,4487, # 6384
4047,4488,7151,7152,4888,4048,2989,3888,7153,3665,7154,4049,7155,7156,7157,7158, # 6400
7159,7160,2931,4889,4890,4489,7161,2631,3889,4236,2779,7162,7163,4891,7164,3060, # 6416
7165,1672,4892,7166,4893,4237,3281,4894,7167,7168,3666,7169,3494,7170,7171,4050, # 6432
7172,7173,3104,3360,3420,4490,4051,2684,4052,7174,4053,7175,7176,7177,2253,4054, # 6448
7178,7179,4895,7180,3152,3890,3153,4491,3216,7181,7182,7183,2968,4238,4492,4055, # 6464
7184,2990,7185,2479,7186,7187,4493,7188,7189,7190,7191,7192,4896,7193,4897,2969, # 6480
4494,4898,7194,3495,7195,7196,4899,4495,7197,3105,2731,7198,4900,7199,7200,7201, # 6496
4056,7202,3361,7203,7204,4496,4901,4902,7205,4497,7206,7207,2315,4903,7208,4904, # 6512
7209,4905,2851,7210,7211,3577,7212,3578,4906,7213,4057,3667,4907,7214,4058,2354, # 6528
3891,2376,3217,3773,7215,7216,7217,7218,7219,4498,7220,4908,3282,2685,7221,3496, # 6544
4909,2632,3154,4910,7222,2337,7223,4911,7224,7225,7226,4912,4913,3283,4239,4499, # 6560
7227,2816,7228,7229,7230,7231,7232,7233,7234,4914,4500,4501,7235,7236,7237,2686, # 6576
7238,4915,7239,2897,4502,7240,4503,7241,2516,7242,4504,3362,3218,7243,7244,7245, # 6592
4916,7246,7247,4505,3363,7248,7249,7250,7251,3774,4506,7252,7253,4917,7254,7255, # 6608
3284,2991,4918,4919,3219,3892,4920,3106,3497,4921,7256,7257,7258,4922,7259,4923, # 6624
3364,4507,4508,4059,7260,4240,3498,7261,7262,4924,7263,2992,3893,4060,3220,7264, # 6640
7265,7266,7267,7268,7269,4509,3775,7270,2817,7271,4061,4925,4510,3776,7272,4241, # 6656
4511,3285,7273,7274,3499,7275,7276,7277,4062,4512,4926,7278,3107,3894,7279,7280, # 6672
4927,7281,4513,7282,7283,3668,7284,7285,4242,4514,4243,7286,2058,4515,4928,4929, # 6688
4516,7287,3286,4244,7288,4517,7289,7290,7291,3669,7292,7293,4930,4931,4932,2355, # 6704
4933,7294,2633,4518,7295,4245,7296,7297,4519,7298,7299,4520,4521,4934,7300,4246, # 6720
4522,7301,7302,7303,3579,7304,4247,4935,7305,4936,7306,7307,7308,7309,3777,7310, # 6736
4523,7311,7312,7313,4248,3580,7314,4524,3778,4249,7315,3581,7316,3287,7317,3221, # 6752
7318,4937,7319,7320,7321,7322,7323,7324,4938,4939,7325,4525,7326,7327,7328,4063, # 6768
7329,7330,4940,7331,7332,4941,7333,4526,7334,3500,2780,1741,4942,2026,1742,7335, # 6784
7336,3582,4527,2388,7337,7338,7339,4528,7340,4250,4943,7341,7342,7343,4944,7344, # 6800
7345,7346,3020,7347,4945,7348,7349,7350,7351,3895,7352,3896,4064,3897,7353,7354, # 6816
7355,4251,7356,7357,3898,7358,3779,7359,3780,3288,7360,7361,4529,7362,4946,4530, # 6832
2027,7363,3899,4531,4947,3222,3583,7364,4948,7365,7366,7367,7368,4949,3501,4950, # 6848
3781,4951,4532,7369,2517,4952,4252,4953,3155,7370,4954,4955,4253,2518,4533,7371, # 6864
7372,2712,4254,7373,7374,7375,3670,4956,3671,7376,2389,3502,4065,7377,2338,7378, # 6880
7379,7380,7381,3061,7382,4957,7383,7384,7385,7386,4958,4534,7387,7388,2993,7389, # 6896
3062,7390,4959,7391,7392,7393,4960,3108,4961,7394,4535,7395,4962,3421,4536,7396, # 6912
4963,7397,4964,1857,7398,4965,7399,7400,2176,3584,4966,7401,7402,3422,4537,3900, # 6928
3585,7403,3782,7404,2852,7405,7406,7407,4538,3783,2654,3423,4967,4539,7408,3784, # 6944
3586,2853,4540,4541,7409,3901,7410,3902,7411,7412,3785,3109,2327,3903,7413,7414, # 6960
2970,4066,2932,7415,7416,7417,3904,3672,3424,7418,4542,4543,4544,7419,4968,7420, # 6976
7421,4255,7422,7423,7424,7425,7426,4067,7427,3673,3365,4545,7428,3110,2559,3674, # 6992
7429,7430,3156,7431,7432,3503,7433,3425,4546,7434,3063,2873,7435,3223,4969,4547, # 7008
4548,2898,4256,4068,7436,4069,3587,3786,2933,3787,4257,4970,4971,3788,7437,4972, # 7024
3064,7438,4549,7439,7440,7441,7442,7443,4973,3905,7444,2874,7445,7446,7447,7448, # 7040
3021,7449,4550,3906,3588,4974,7450,7451,3789,3675,7452,2578,7453,4070,7454,7455, # 7056
7456,4258,3676,7457,4975,7458,4976,4259,3790,3504,2634,4977,3677,4551,4260,7459, # 7072
7460,7461,7462,3907,4261,4978,7463,7464,7465,7466,4979,4980,7467,7468,2213,4262, # 7088
7469,7470,7471,3678,4981,7472,2439,7473,4263,3224,3289,7474,3908,2415,4982,7475, # 7104
4264,7476,4983,2655,7477,7478,2732,4552,2854,2875,7479,7480,4265,7481,4553,4984, # 7120
7482,7483,4266,7484,3679,3366,3680,2818,2781,2782,3367,3589,4554,3065,7485,4071, # 7136
2899,7486,7487,3157,2462,4072,4555,4073,4985,4986,3111,4267,2687,3368,4556,4074, # 7152
3791,4268,7488,3909,2783,7489,2656,1962,3158,4557,4987,1963,3159,3160,7490,3112, # 7168
4988,4989,3022,4990,4991,3792,2855,7491,7492,2971,4558,7493,7494,4992,7495,7496, # 7184
7497,7498,4993,7499,3426,4559,4994,7500,3681,4560,4269,4270,3910,7501,4075,4995, # 7200
4271,7502,7503,4076,7504,4996,7505,3225,4997,4272,4077,2819,3023,7506,7507,2733, # 7216
4561,7508,4562,7509,3369,3793,7510,3590,2508,7511,7512,4273,3113,2994,2616,7513, # 7232
7514,7515,7516,7517,7518,2820,3911,4078,2748,7519,7520,4563,4998,7521,7522,7523, # 7248
7524,4999,4274,7525,4564,3682,2239,4079,4565,7526,7527,7528,7529,5000,7530,7531, # 7264
5001,4275,3794,7532,7533,7534,3066,5002,4566,3161,7535,7536,4080,7537,3162,7538, # 7280
7539,4567,7540,7541,7542,7543,7544,7545,5003,7546,4568,7547,7548,7549,7550,7551, # 7296
7552,7553,7554,7555,7556,5004,7557,7558,7559,5005,7560,3795,7561,4569,7562,7563, # 7312
7564,2821,3796,4276,4277,4081,7565,2876,7566,5006,7567,7568,2900,7569,3797,3912, # 7328
7570,7571,7572,4278,7573,7574,7575,5007,7576,7577,5008,7578,7579,4279,2934,7580, # 7344
7581,5009,7582,4570,7583,4280,7584,7585,7586,4571,4572,3913,7587,4573,3505,7588, # 7360
5010,7589,7590,7591,7592,3798,4574,7593,7594,5011,7595,4281,7596,7597,7598,4282, # 7376
5012,7599,7600,5013,3163,7601,5014,7602,3914,7603,7604,2734,4575,4576,4577,7605, # 7392
7606,7607,7608,7609,3506,5015,4578,7610,4082,7611,2822,2901,2579,3683,3024,4579, # 7408
3507,7612,4580,7613,3226,3799,5016,7614,7615,7616,7617,7618,7619,7620,2995,3290, # 7424
7621,4083,7622,5017,7623,7624,7625,7626,7627,4581,3915,7628,3291,7629,5018,7630, # 7440
7631,7632,7633,4084,7634,7635,3427,3800,7636,7637,4582,7638,5019,4583,5020,7639, # 7456
3916,7640,3801,5021,4584,4283,7641,7642,3428,3591,2269,7643,2617,7644,4585,3592, # 7472
7645,4586,2902,7646,7647,3227,5022,7648,4587,7649,4284,7650,7651,7652,4588,2284, # 7488
7653,5023,7654,7655,7656,4589,5024,3802,7657,7658,5025,3508,4590,7659,7660,7661, # 7504
1969,5026,7662,7663,3684,1821,2688,7664,2028,2509,4285,7665,2823,1841,7666,2689, # 7520
3114,7667,3917,4085,2160,5027,5028,2972,7668,5029,7669,7670,7671,3593,4086,7672, # 7536
4591,4087,5030,3803,7673,7674,7675,7676,7677,7678,7679,4286,2366,4592,4593,3067, # 7552
2328,7680,7681,4594,3594,3918,2029,4287,7682,5031,3919,3370,4288,4595,2856,7683, # 7568
3509,7684,7685,5032,5033,7686,7687,3804,2784,7688,7689,7690,7691,3371,7692,7693, # 7584
2877,5034,7694,7695,3920,4289,4088,7696,7697,7698,5035,7699,5036,4290,5037,5038, # 7600
5039,7700,7701,7702,5040,5041,3228,7703,1760,7704,5042,3229,4596,2106,4089,7705, # 7616
4597,2824,5043,2107,3372,7706,4291,4090,5044,7707,4091,7708,5045,3025,3805,4598, # 7632
4292,4293,4294,3373,7709,4599,7710,5046,7711,7712,5047,5048,3806,7713,7714,7715, # 7648
5049,7716,7717,7718,7719,4600,5050,7720,7721,7722,5051,7723,4295,3429,7724,7725, # 7664
7726,7727,3921,7728,3292,5052,4092,7729,7730,7731,7732,7733,7734,7735,5053,5054, # 7680
7736,7737,7738,7739,3922,3685,7740,7741,7742,7743,2635,5055,7744,5056,4601,7745, # 7696
7746,2560,7747,7748,7749,7750,3923,7751,7752,7753,7754,7755,4296,2903,7756,7757, # 7712
7758,7759,7760,3924,7761,5057,4297,7762,7763,5058,4298,7764,4093,7765,7766,5059, # 7728
3925,7767,7768,7769,7770,7771,7772,7773,7774,7775,7776,3595,7777,4299,5060,4094, # 7744
7778,3293,5061,7779,7780,4300,7781,7782,4602,7783,3596,7784,7785,3430,2367,7786, # 7760
3164,5062,5063,4301,7787,7788,4095,5064,5065,7789,3374,3115,7790,7791,7792,7793, # 7776
7794,7795,7796,3597,4603,7797,7798,3686,3116,3807,5066,7799,7800,5067,7801,7802, # 7792
4604,4302,5068,4303,4096,7803,7804,3294,7805,7806,5069,4605,2690,7807,3026,7808, # 7808
7809,7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,7824, # 7824
7825,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,7840, # 7840
7841,7842,7843,7844,7845,7846,7847,7848,7849,7850,7851,7852,7853,7854,7855,7856, # 7856
7857,7858,7859,7860,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870,7871,7872, # 7872
7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886,7887,7888, # 7888
7889,7890,7891,7892,7893,7894,7895,7896,7897,7898,7899,7900,7901,7902,7903,7904, # 7904
7905,7906,7907,7908,7909,7910,7911,7912,7913,7914,7915,7916,7917,7918,7919,7920, # 7920
7921,7922,7923,7924,3926,7925,7926,7927,7928,7929,7930,7931,7932,7933,7934,7935, # 7936
7936,7937,7938,7939,7940,7941,7942,7943,7944,7945,7946,7947,7948,7949,7950,7951, # 7952
7952,7953,7954,7955,7956,7957,7958,7959,7960,7961,7962,7963,7964,7965,7966,7967, # 7968
7968,7969,7970,7971,7972,7973,7974,7975,7976,7977,7978,7979,7980,7981,7982,7983, # 7984
7984,7985,7986,7987,7988,7989,7990,7991,7992,7993,7994,7995,7996,7997,7998,7999, # 8000
8000,8001,8002,8003,8004,8005,8006,8007,8008,8009,8010,8011,8012,8013,8014,8015, # 8016
8016,8017,8018,8019,8020,8021,8022,8023,8024,8025,8026,8027,8028,8029,8030,8031, # 8032
8032,8033,8034,8035,8036,8037,8038,8039,8040,8041,8042,8043,8044,8045,8046,8047, # 8048
8048,8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063, # 8064
8064,8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079, # 8080
8080,8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095, # 8096
8096,8097,8098,8099,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110,8111, # 8112
8112,8113,8114,8115,8116,8117,8118,8119,8120,8121,8122,8123,8124,8125,8126,8127, # 8128
8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141,8142,8143, # 8144
8144,8145,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155,8156,8157,8158,8159, # 8160
8160,8161,8162,8163,8164,8165,8166,8167,8168,8169,8170,8171,8172,8173,8174,8175, # 8176
8176,8177,8178,8179,8180,8181,8182,8183,8184,8185,8186,8187,8188,8189,8190,8191, # 8192
8192,8193,8194,8195,8196,8197,8198,8199,8200,8201,8202,8203,8204,8205,8206,8207, # 8208
8208,8209,8210,8211,8212,8213,8214,8215,8216,8217,8218,8219,8220,8221,8222,8223, # 8224
8224,8225,8226,8227,8228,8229,8230,8231,8232,8233,8234,8235,8236,8237,8238,8239, # 8240
8240,8241,8242,8243,8244,8245,8246,8247,8248,8249,8250,8251,8252,8253,8254,8255, # 8256
8256,8257,8258,8259,8260,8261,8262,8263,8264,8265,8266,8267,8268,8269,8270,8271) # 8272
# flake8: noqa
| bsd-3-clause |
40423130/2016fallcadp_hw | plugin/render_math/math.py | 283 | 14202 | # -*- coding: utf-8 -*-
"""
Math Render Plugin for Pelican
==============================
This plugin allows your site to render Math. It uses
the MathJax JavaScript engine.
For markdown, the plugin works by creating a Markdown
extension which is used during the markdown compilation
stage. Math therefore gets treated like a "first class
citizen" in Pelican
For reStructuredText, the plugin instructs the rst engine
to output Mathjax for all math.
The mathjax script is by default automatically inserted
into the HTML.
Typogrify Compatibility
-----------------------
This plugin now plays nicely with Typogrify, but it
requires Typogrify version 2.07 or above.
User Settings
-------------
Users are also able to pass a dictionary of settings
in the settings file which will control how the MathJax
library renders things. This could be very useful for
template builders that want to adjust the look and feel of
the math. See README for more details.
"""
import os
import sys
from pelican import signals, generators
try:
from bs4 import BeautifulSoup
except ImportError as e:
BeautifulSoup = None
try:
from . pelican_mathjax_markdown_extension import PelicanMathJaxExtension
except ImportError as e:
PelicanMathJaxExtension = None
def process_settings(pelicanobj):
"""Sets user specified MathJax settings (see README for more details)"""
mathjax_settings = {}
# NOTE TO FUTURE DEVELOPERS: Look at the README and what is happening in
# this function if any additional changes to the mathjax settings need to
# be incorporated. Also, please inline comment what the variables
# will be used for
# Default settings
mathjax_settings['auto_insert'] = True # if set to true, it will insert mathjax script automatically into content without needing to alter the template.
mathjax_settings['align'] = 'center' # controls alignment of of displayed equations (values can be: left, right, center)
mathjax_settings['indent'] = '0em' # if above is not set to 'center', then this setting acts as an indent
mathjax_settings['show_menu'] = 'true' # controls whether to attach mathjax contextual menu
mathjax_settings['process_escapes'] = 'true' # controls whether escapes are processed
mathjax_settings['latex_preview'] = 'TeX' # controls what user sees while waiting for LaTex to render
mathjax_settings['color'] = 'inherit' # controls color math is rendered in
mathjax_settings['linebreak_automatic'] = 'false' # Set to false by default for performance reasons (see http://docs.mathjax.org/en/latest/output.html#automatic-line-breaking)
mathjax_settings['tex_extensions'] = '' # latex extensions that can be embedded inside mathjax (see http://docs.mathjax.org/en/latest/tex.html#tex-and-latex-extensions)
mathjax_settings['responsive'] = 'false' # Tries to make displayed math responsive
mathjax_settings['responsive_break'] = '768' # The break point at which it math is responsively aligned (in pixels)
mathjax_settings['mathjax_font'] = 'default' # forces mathjax to use the specified font.
mathjax_settings['process_summary'] = BeautifulSoup is not None # will fix up summaries if math is cut off. Requires beautiful soup
mathjax_settings['force_tls'] = 'false' # will force mathjax to be served by https - if set as False, it will only use https if site is served using https
mathjax_settings['message_style'] = 'normal' # This value controls the verbosity of the messages in the lower left-hand corner. Set it to "none" to eliminate all messages
# Source for MathJax
mathjax_settings['source'] = "'//cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML'"
# Get the user specified settings
try:
settings = pelicanobj.settings['MATH_JAX']
except:
settings = None
# If no settings have been specified, then return the defaults
if not isinstance(settings, dict):
return mathjax_settings
# The following mathjax settings can be set via the settings dictionary
for key, value in ((key, settings[key]) for key in settings):
# Iterate over dictionary in a way that is compatible with both version 2
# and 3 of python
if key == 'align':
try:
typeVal = isinstance(value, basestring)
except NameError:
typeVal = isinstance(value, str)
if not typeVal:
continue
if value == 'left' or value == 'right' or value == 'center':
mathjax_settings[key] = value
else:
mathjax_settings[key] = 'center'
if key == 'indent':
mathjax_settings[key] = value
if key == 'show_menu' and isinstance(value, bool):
mathjax_settings[key] = 'true' if value else 'false'
if key == 'message_style':
mathjax_settings[key] = value if value is not None else 'none'
if key == 'auto_insert' and isinstance(value, bool):
mathjax_settings[key] = value
if key == 'process_escapes' and isinstance(value, bool):
mathjax_settings[key] = 'true' if value else 'false'
if key == 'latex_preview':
try:
typeVal = isinstance(value, basestring)
except NameError:
typeVal = isinstance(value, str)
if not typeVal:
continue
mathjax_settings[key] = value
if key == 'color':
try:
typeVal = isinstance(value, basestring)
except NameError:
typeVal = isinstance(value, str)
if not typeVal:
continue
mathjax_settings[key] = value
if key == 'linebreak_automatic' and isinstance(value, bool):
mathjax_settings[key] = 'true' if value else 'false'
if key == 'process_summary' and isinstance(value, bool):
if value and BeautifulSoup is None:
print("BeautifulSoup4 is needed for summaries to be processed by render_math\nPlease install it")
value = False
mathjax_settings[key] = value
if key == 'responsive' and isinstance(value, bool):
mathjax_settings[key] = 'true' if value else 'false'
if key == 'force_tls' and isinstance(value, bool):
mathjax_settings[key] = 'true' if value else 'false'
if key == 'responsive_break' and isinstance(value, int):
mathjax_settings[key] = str(value)
if key == 'tex_extensions' and isinstance(value, list):
# filter string values, then add '' to them
try:
value = filter(lambda string: isinstance(string, basestring), value)
except NameError:
value = filter(lambda string: isinstance(string, str), value)
value = map(lambda string: "'%s'" % string, value)
mathjax_settings[key] = ',' + ','.join(value)
if key == 'mathjax_font':
try:
typeVal = isinstance(value, basestring)
except NameError:
typeVal = isinstance(value, str)
if not typeVal:
continue
value = value.lower()
if value == 'sanserif':
value = 'SansSerif'
elif value == 'fraktur':
value = 'Fraktur'
elif value == 'typewriter':
value = 'Typewriter'
else:
value = 'default'
mathjax_settings[key] = value
return mathjax_settings
def process_summary(article):
"""Ensures summaries are not cut off. Also inserts
mathjax script so that math will be rendered"""
summary = article._get_summary()
summary_parsed = BeautifulSoup(summary, 'html.parser')
math = summary_parsed.find_all(class_='math')
if len(math) > 0:
last_math_text = math[-1].get_text()
if len(last_math_text) > 3 and last_math_text[-3:] == '...':
content_parsed = BeautifulSoup(article._content, 'html.parser')
full_text = content_parsed.find_all(class_='math')[len(math)-1].get_text()
math[-1].string = "%s ..." % full_text
summary = summary_parsed.decode()
article._summary = "%s<script type='text/javascript'>%s</script>" % (summary, process_summary.mathjax_script)
def configure_typogrify(pelicanobj, mathjax_settings):
"""Instructs Typogrify to ignore math tags - which allows Typogrify
to play nicely with math related content"""
# If Typogrify is not being used, then just exit
if not pelicanobj.settings.get('TYPOGRIFY', False):
return
try:
import typogrify
from distutils.version import LooseVersion
if LooseVersion(typogrify.__version__) < LooseVersion('2.0.7'):
raise TypeError('Incorrect version of Typogrify')
from typogrify.filters import typogrify
# At this point, we are happy to use Typogrify, meaning
# it is installed and it is a recent enough version
# that can be used to ignore all math
# Instantiate markdown extension and append it to the current extensions
pelicanobj.settings['TYPOGRIFY_IGNORE_TAGS'].extend(['.math', 'script']) # ignore math class and script
except (ImportError, TypeError) as e:
pelicanobj.settings['TYPOGRIFY'] = False # disable Typogrify
if isinstance(e, ImportError):
print("\nTypogrify is not installed, so it is being ignored.\nIf you want to use it, please install via: pip install typogrify\n")
if isinstance(e, TypeError):
print("\nA more recent version of Typogrify is needed for the render_math module.\nPlease upgrade Typogrify to the latest version (anything equal or above version 2.0.7 is okay).\nTypogrify will be turned off due to this reason.\n")
def process_mathjax_script(mathjax_settings):
"""Load the mathjax script template from file, and render with the settings"""
# Read the mathjax javascript template from file
with open (os.path.dirname(os.path.realpath(__file__))
+ '/mathjax_script_template', 'r') as mathjax_script_template:
mathjax_template = mathjax_script_template.read()
return mathjax_template.format(**mathjax_settings)
def mathjax_for_markdown(pelicanobj, mathjax_script, mathjax_settings):
"""Instantiates a customized markdown extension for handling mathjax
related content"""
# Create the configuration for the markdown template
config = {}
config['mathjax_script'] = mathjax_script
config['math_tag_class'] = 'math'
config['auto_insert'] = mathjax_settings['auto_insert']
# Instantiate markdown extension and append it to the current extensions
try:
pelicanobj.settings['MD_EXTENSIONS'].append(PelicanMathJaxExtension(config))
except:
sys.excepthook(*sys.exc_info())
sys.stderr.write("\nError - the pelican mathjax markdown extension failed to configure. MathJax is non-functional.\n")
sys.stderr.flush()
def mathjax_for_rst(pelicanobj, mathjax_script):
"""Setup math for RST"""
docutils_settings = pelicanobj.settings.get('DOCUTILS_SETTINGS', {})
docutils_settings['math_output'] = 'MathJax'
pelicanobj.settings['DOCUTILS_SETTINGS'] = docutils_settings
rst_add_mathjax.mathjax_script = mathjax_script
def pelican_init(pelicanobj):
"""
Loads the mathjax script according to the settings.
Instantiate the Python markdown extension, passing in the mathjax
script as config parameter.
"""
# Process settings, and set global var
mathjax_settings = process_settings(pelicanobj)
# Generate mathjax script
mathjax_script = process_mathjax_script(mathjax_settings)
# Configure Typogrify
configure_typogrify(pelicanobj, mathjax_settings)
# Configure Mathjax For Markdown
if PelicanMathJaxExtension:
mathjax_for_markdown(pelicanobj, mathjax_script, mathjax_settings)
# Configure Mathjax For RST
mathjax_for_rst(pelicanobj, mathjax_script)
# Set process_summary's mathjax_script variable
process_summary.mathjax_script = None
if mathjax_settings['process_summary']:
process_summary.mathjax_script = mathjax_script
def rst_add_mathjax(content):
"""Adds mathjax script for reStructuredText"""
# .rst is the only valid extension for reStructuredText files
_, ext = os.path.splitext(os.path.basename(content.source_path))
if ext != '.rst':
return
# If math class is present in text, add the javascript
# note that RST hardwires mathjax to be class "math"
if 'class="math"' in content._content:
content._content += "<script type='text/javascript'>%s</script>" % rst_add_mathjax.mathjax_script
def process_rst_and_summaries(content_generators):
"""
Ensure mathjax script is applied to RST and summaries are
corrected if specified in user settings.
Handles content attached to ArticleGenerator and PageGenerator objects,
since the plugin doesn't know how to handle other Generator types.
For reStructuredText content, examine both articles and pages.
If article or page is reStructuredText and there is math present,
append the mathjax script.
Also process summaries if present (only applies to articles)
and user wants summaries processed (via user settings)
"""
for generator in content_generators:
if isinstance(generator, generators.ArticlesGenerator):
for article in generator.articles + generator.translations:
rst_add_mathjax(article)
#optionally fix truncated formulae in summaries.
if process_summary.mathjax_script is not None:
process_summary(article)
elif isinstance(generator, generators.PagesGenerator):
for page in generator.pages:
rst_add_mathjax(page)
def register():
"""Plugin registration"""
signals.initialized.connect(pelican_init)
signals.all_generators_finalized.connect(process_rst_and_summaries)
| agpl-3.0 |
mtndesign/mtnvim | myvim/bundle/ropevim/ftplugin/python/libs/rope/refactor/multiproject.py | 91 | 2639 | """This module can be used for performing cross-project refactorings
See the "cross-project refactorings" section of ``docs/library.txt``
file.
"""
from rope.base import resources, project, libutils
class MultiProjectRefactoring(object):
def __init__(self, refactoring, projects, addpath=True):
"""Create a multiproject proxy for the main refactoring
`projects` are other project.
"""
self.refactoring = refactoring
self.projects = projects
self.addpath = addpath
def __call__(self, project, *args, **kwds):
"""Create the refactoring"""
return _MultiRefactoring(self.refactoring, self.projects,
self.addpath, project, *args, **kwds)
class _MultiRefactoring(object):
def __init__(self, refactoring, other_projects, addpath,
project, *args, **kwds):
self.refactoring = refactoring
self.projects = [project] + other_projects
for other_project in other_projects:
for folder in self.project.pycore.get_source_folders():
other_project.get_prefs().add('python_path', folder.real_path)
self.refactorings = []
for other in self.projects:
args, kwds = self._resources_for_args(other, args, kwds)
self.refactorings.append(
self.refactoring(other, *args, **kwds))
def get_all_changes(self, *args, **kwds):
"""Get a project to changes dict"""
result = []
for project, refactoring in zip(self.projects, self.refactorings):
args, kwds = self._resources_for_args(project, args, kwds)
result.append((project, refactoring.get_changes(*args, **kwds)))
return result
def __getattr__(self, name):
return getattr(self.main_refactoring, name)
def _resources_for_args(self, project, args, kwds):
newargs = [self._change_project_resource(project, arg) for arg in args]
newkwds = dict((name, self._change_project_resource(project, value))
for name, value in kwds.items())
return newargs, newkwds
def _change_project_resource(self, project, obj):
if isinstance(obj, resources.Resource) and \
obj.project != project:
return libutils.path_to_resource(project, obj.real_path)
return obj
@property
def project(self):
return self.projects[0]
@property
def main_refactoring(self):
return self.refactorings[0]
def perform(project_changes):
for project, changes in project_changes:
project.do(changes)
| gpl-2.0 |
marcellodesales/svnedge-console | ext/solaris-sparc/pkg-toolkit/pkg/vendor-packages/mako/codegen.py | 6 | 32514 | # codegen.py
# Copyright (C) 2006, 2007, 2008 Michael Bayer [email protected]
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""provides functionality for rendering a parsetree constructing into module source code."""
import time
import re
from mako.pygen import PythonPrinter
from mako import util, ast, parsetree, filters
MAGIC_NUMBER = 4
def compile(node, uri, filename=None, default_filters=None, buffer_filters=None, imports=None, source_encoding=None, generate_unicode=True):
"""generate module source code given a parsetree node, uri, and optional source filename"""
buf = util.FastEncodingBuffer(unicode=generate_unicode)
printer = PythonPrinter(buf)
_GenerateRenderMethod(printer, _CompileContext(uri, filename, default_filters, buffer_filters, imports, source_encoding, generate_unicode), node)
return buf.getvalue()
class _CompileContext(object):
def __init__(self, uri, filename, default_filters, buffer_filters, imports, source_encoding, generate_unicode):
self.uri = uri
self.filename = filename
self.default_filters = default_filters
self.buffer_filters = buffer_filters
self.imports = imports
self.source_encoding = source_encoding
self.generate_unicode = generate_unicode
class _GenerateRenderMethod(object):
"""a template visitor object which generates the full module source for a template."""
def __init__(self, printer, compiler, node):
self.printer = printer
self.last_source_line = -1
self.compiler = compiler
self.node = node
self.identifier_stack = [None]
self.in_def = isinstance(node, parsetree.DefTag)
if self.in_def:
name = "render_" + node.name
args = node.function_decl.get_argument_expressions()
filtered = len(node.filter_args.args) > 0
buffered = eval(node.attributes.get('buffered', 'False'))
cached = eval(node.attributes.get('cached', 'False'))
defs = None
pagetag = None
else:
defs = self.write_toplevel()
pagetag = self.compiler.pagetag
name = "render_body"
if pagetag is not None:
args = pagetag.body_decl.get_argument_expressions()
if not pagetag.body_decl.kwargs:
args += ['**pageargs']
cached = eval(pagetag.attributes.get('cached', 'False'))
else:
args = ['**pageargs']
cached = False
buffered = filtered = False
if args is None:
args = ['context']
else:
args = [a for a in ['context'] + args]
self.write_render_callable(pagetag or node, name, args, buffered, filtered, cached)
if defs is not None:
for node in defs:
_GenerateRenderMethod(printer, compiler, node)
identifiers = property(lambda self:self.identifier_stack[-1])
def write_toplevel(self):
"""traverse a template structure for module-level directives and generate the
start of module-level code."""
inherit = []
namespaces = {}
module_code = []
encoding =[None]
self.compiler.pagetag = None
class FindTopLevel(object):
def visitInheritTag(s, node):
inherit.append(node)
def visitNamespaceTag(s, node):
namespaces[node.name] = node
def visitPageTag(s, node):
self.compiler.pagetag = node
def visitCode(s, node):
if node.ismodule:
module_code.append(node)
f = FindTopLevel()
for n in self.node.nodes:
n.accept_visitor(f)
self.compiler.namespaces = namespaces
module_ident = util.Set()
for n in module_code:
module_ident = module_ident.union(n.declared_identifiers())
module_identifiers = _Identifiers()
module_identifiers.declared = module_ident
# module-level names, python code
if not self.compiler.generate_unicode and self.compiler.source_encoding:
self.printer.writeline("# -*- encoding:%s -*-" % self.compiler.source_encoding)
self.printer.writeline("from mako import runtime, filters, cache")
self.printer.writeline("UNDEFINED = runtime.UNDEFINED")
self.printer.writeline("__M_dict_builtin = dict")
self.printer.writeline("__M_locals_builtin = locals")
self.printer.writeline("_magic_number = %s" % repr(MAGIC_NUMBER))
self.printer.writeline("_modified_time = %s" % repr(time.time()))
self.printer.writeline("_template_filename=%s" % repr(self.compiler.filename))
self.printer.writeline("_template_uri=%s" % repr(self.compiler.uri))
self.printer.writeline("_template_cache=cache.Cache(__name__, _modified_time)")
self.printer.writeline("_source_encoding=%s" % repr(self.compiler.source_encoding))
if self.compiler.imports:
buf = ''
for imp in self.compiler.imports:
buf += imp + "\n"
self.printer.writeline(imp)
impcode = ast.PythonCode(buf, source='', lineno=0, pos=0, filename='template defined imports')
else:
impcode = None
main_identifiers = module_identifiers.branch(self.node)
module_identifiers.topleveldefs = module_identifiers.topleveldefs.union(main_identifiers.topleveldefs)
[module_identifiers.declared.add(x) for x in ["UNDEFINED"]]
if impcode:
[module_identifiers.declared.add(x) for x in impcode.declared_identifiers]
self.compiler.identifiers = module_identifiers
self.printer.writeline("_exports = %s" % repr([n.name for n in main_identifiers.topleveldefs.values()]))
self.printer.write("\n\n")
if len(module_code):
self.write_module_code(module_code)
if len(inherit):
self.write_namespaces(namespaces)
self.write_inherit(inherit[-1])
elif len(namespaces):
self.write_namespaces(namespaces)
return main_identifiers.topleveldefs.values()
def write_render_callable(self, node, name, args, buffered, filtered, cached):
"""write a top-level render callable.
this could be the main render() method or that of a top-level def."""
self.printer.writelines(
"def %s(%s):" % (name, ','.join(args)),
"context.caller_stack._push_frame()",
"try:"
)
if buffered or filtered or cached:
self.printer.writeline("context._push_buffer()")
self.identifier_stack.append(self.compiler.identifiers.branch(self.node))
if not self.in_def and '**pageargs' in args:
self.identifier_stack[-1].argument_declared.add('pageargs')
if not self.in_def and (len(self.identifiers.locally_assigned) > 0 or len(self.identifiers.argument_declared)>0):
self.printer.writeline("__M_locals = __M_dict_builtin(%s)" % ','.join(["%s=%s" % (x, x) for x in self.identifiers.argument_declared]))
self.write_variable_declares(self.identifiers, toplevel=True)
for n in self.node.nodes:
n.accept_visitor(self)
self.write_def_finish(self.node, buffered, filtered, cached)
self.printer.writeline(None)
self.printer.write("\n\n")
if cached:
self.write_cache_decorator(node, name, args, buffered, self.identifiers, toplevel=True)
def write_module_code(self, module_code):
"""write module-level template code, i.e. that which is enclosed in <%! %> tags
in the template."""
for n in module_code:
self.write_source_comment(n)
self.printer.write_indented_block(n.text)
def write_inherit(self, node):
"""write the module-level inheritance-determination callable."""
self.printer.writelines(
"def _mako_inherit(template, context):",
"_mako_generate_namespaces(context)",
"return runtime._inherit_from(context, %s, _template_uri)" % (node.parsed_attributes['file']),
None
)
def write_namespaces(self, namespaces):
"""write the module-level namespace-generating callable."""
self.printer.writelines(
"def _mako_get_namespace(context, name):",
"try:",
"return context.namespaces[(__name__, name)]",
"except KeyError:",
"_mako_generate_namespaces(context)",
"return context.namespaces[(__name__, name)]",
None,None
)
self.printer.writeline("def _mako_generate_namespaces(context):")
for node in namespaces.values():
if node.attributes.has_key('import'):
self.compiler.has_ns_imports = True
self.write_source_comment(node)
if len(node.nodes):
self.printer.writeline("def make_namespace():")
export = []
identifiers = self.compiler.identifiers.branch(node)
class NSDefVisitor(object):
def visitDefTag(s, node):
self.write_inline_def(node, identifiers, nested=False)
export.append(node.name)
vis = NSDefVisitor()
for n in node.nodes:
n.accept_visitor(vis)
self.printer.writeline("return [%s]" % (','.join(export)))
self.printer.writeline(None)
callable_name = "make_namespace()"
else:
callable_name = "None"
self.printer.writeline("ns = runtime.Namespace(%s, context._clean_inheritance_tokens(), templateuri=%s, callables=%s, calling_uri=_template_uri, module=%s)" % (repr(node.name), node.parsed_attributes.get('file', 'None'), callable_name, node.parsed_attributes.get('module', 'None')))
if eval(node.attributes.get('inheritable', "False")):
self.printer.writeline("context['self'].%s = ns" % (node.name))
self.printer.writeline("context.namespaces[(__name__, %s)] = ns" % repr(node.name))
self.printer.write("\n")
if not len(namespaces):
self.printer.writeline("pass")
self.printer.writeline(None)
def write_variable_declares(self, identifiers, toplevel=False, limit=None):
"""write variable declarations at the top of a function.
the variable declarations are in the form of callable definitions for defs and/or
name lookup within the function's context argument. the names declared are based on the
names that are referenced in the function body, which don't otherwise have any explicit
assignment operation. names that are assigned within the body are assumed to be
locally-scoped variables and are not separately declared.
for def callable definitions, if the def is a top-level callable then a
'stub' callable is generated which wraps the current Context into a closure. if the def
is not top-level, it is fully rendered as a local closure."""
# collection of all defs available to us in this scope
comp_idents = dict([(c.name, c) for c in identifiers.defs])
to_write = util.Set()
# write "context.get()" for all variables we are going to need that arent in the namespace yet
to_write = to_write.union(identifiers.undeclared)
# write closure functions for closures that we define right here
to_write = to_write.union(util.Set([c.name for c in identifiers.closuredefs.values()]))
# remove identifiers that are declared in the argument signature of the callable
to_write = to_write.difference(identifiers.argument_declared)
# remove identifiers that we are going to assign to. in this way we mimic Python's behavior,
# i.e. assignment to a variable within a block means that variable is now a "locally declared" var,
# which cannot be referenced beforehand.
to_write = to_write.difference(identifiers.locally_declared)
# if a limiting set was sent, constraint to those items in that list
# (this is used for the caching decorator)
if limit is not None:
to_write = to_write.intersection(limit)
if toplevel and getattr(self.compiler, 'has_ns_imports', False):
self.printer.writeline("_import_ns = {}")
self.compiler.has_imports = True
for ident, ns in self.compiler.namespaces.iteritems():
if ns.attributes.has_key('import'):
self.printer.writeline("_mako_get_namespace(context, %s)._populate(_import_ns, %s)" % (repr(ident), repr(re.split(r'\s*,\s*', ns.attributes['import']))))
for ident in to_write:
if ident in comp_idents:
comp = comp_idents[ident]
if comp.is_root():
self.write_def_decl(comp, identifiers)
else:
self.write_inline_def(comp, identifiers, nested=True)
elif ident in self.compiler.namespaces:
self.printer.writeline("%s = _mako_get_namespace(context, %s)" % (ident, repr(ident)))
else:
if getattr(self.compiler, 'has_ns_imports', False):
self.printer.writeline("%s = _import_ns.get(%s, context.get(%s, UNDEFINED))" % (ident, repr(ident), repr(ident)))
else:
self.printer.writeline("%s = context.get(%s, UNDEFINED)" % (ident, repr(ident)))
self.printer.writeline("__M_writer = context.writer()")
def write_source_comment(self, node):
"""write a source comment containing the line number of the corresponding template line."""
if self.last_source_line != node.lineno:
self.printer.writeline("# SOURCE LINE %d" % node.lineno)
self.last_source_line = node.lineno
def write_def_decl(self, node, identifiers):
"""write a locally-available callable referencing a top-level def"""
funcname = node.function_decl.funcname
namedecls = node.function_decl.get_argument_expressions()
nameargs = node.function_decl.get_argument_expressions(include_defaults=False)
if not self.in_def and (len(self.identifiers.locally_assigned) > 0 or len(self.identifiers.argument_declared) > 0):
nameargs.insert(0, 'context.locals_(__M_locals)')
else:
nameargs.insert(0, 'context')
self.printer.writeline("def %s(%s):" % (funcname, ",".join(namedecls)))
self.printer.writeline("return render_%s(%s)" % (funcname, ",".join(nameargs)))
self.printer.writeline(None)
def write_inline_def(self, node, identifiers, nested):
"""write a locally-available def callable inside an enclosing def."""
namedecls = node.function_decl.get_argument_expressions()
self.printer.writeline("def %s(%s):" % (node.name, ",".join(namedecls)))
filtered = len(node.filter_args.args) > 0
buffered = eval(node.attributes.get('buffered', 'False'))
cached = eval(node.attributes.get('cached', 'False'))
self.printer.writelines(
"context.caller_stack._push_frame()",
"try:"
)
if buffered or filtered or cached:
self.printer.writelines(
"context._push_buffer()",
)
identifiers = identifiers.branch(node, nested=nested)
self.write_variable_declares(identifiers)
self.identifier_stack.append(identifiers)
for n in node.nodes:
n.accept_visitor(self)
self.identifier_stack.pop()
self.write_def_finish(node, buffered, filtered, cached)
self.printer.writeline(None)
if cached:
self.write_cache_decorator(node, node.name, namedecls, False, identifiers, inline=True, toplevel=False)
def write_def_finish(self, node, buffered, filtered, cached, callstack=True):
"""write the end section of a rendering function, either outermost or inline.
this takes into account if the rendering function was filtered, buffered, etc.
and closes the corresponding try: block if any, and writes code to retrieve captured content,
apply filters, send proper return value."""
if not buffered and not cached and not filtered:
self.printer.writeline("return ''")
if callstack:
self.printer.writelines(
"finally:",
"context.caller_stack._pop_frame()",
None
)
if buffered or filtered or cached:
if buffered or cached:
# in a caching scenario, don't try to get a writer
# from the context after popping; assume the caching
# implemenation might be using a context with no
# extra buffers
self.printer.writelines(
"finally:",
"__M_buf = context._pop_buffer()"
)
else:
self.printer.writelines(
"finally:",
"__M_buf, __M_writer = context._pop_buffer_and_writer()"
)
if callstack:
self.printer.writeline("context.caller_stack._pop_frame()")
s = "__M_buf.getvalue()"
if filtered:
s = self.create_filter_callable(node.filter_args.args, s, False)
self.printer.writeline(None)
if buffered and not cached:
s = self.create_filter_callable(self.compiler.buffer_filters, s, False)
if buffered or cached:
self.printer.writeline("return %s" % s)
else:
self.printer.writelines(
"__M_writer(%s)" % s,
"return ''"
)
def write_cache_decorator(self, node_or_pagetag, name, args, buffered, identifiers, inline=False, toplevel=False):
"""write a post-function decorator to replace a rendering callable with a cached version of itself."""
self.printer.writeline("__M_%s = %s" % (name, name))
cachekey = node_or_pagetag.parsed_attributes.get('cache_key', repr(name))
cacheargs = {}
for arg in (('cache_type', 'type'), ('cache_dir', 'data_dir'), ('cache_timeout', 'expiretime'), ('cache_url', 'url')):
val = node_or_pagetag.parsed_attributes.get(arg[0], None)
if val is not None:
if arg[1] == 'expiretime':
cacheargs[arg[1]] = int(eval(val))
else:
cacheargs[arg[1]] = val
else:
if self.compiler.pagetag is not None:
val = self.compiler.pagetag.parsed_attributes.get(arg[0], None)
if val is not None:
if arg[1] == 'expiretime':
cacheargs[arg[1]] == int(eval(val))
else:
cacheargs[arg[1]] = val
self.printer.writeline("def %s(%s):" % (name, ','.join(args)))
# form "arg1, arg2, arg3=arg3, arg4=arg4", etc.
pass_args = [ '=' in a and "%s=%s" % ((a.split('=')[0],)*2) or a for a in args]
self.write_variable_declares(identifiers, toplevel=toplevel, limit=node_or_pagetag.undeclared_identifiers())
if buffered:
s = "context.get('local').get_cached(%s, %screatefunc=lambda:__M_%s(%s))" % (cachekey, ''.join(["%s=%s, " % (k,v) for k, v in cacheargs.iteritems()]), name, ','.join(pass_args))
# apply buffer_filters
s = self.create_filter_callable(self.compiler.buffer_filters, s, False)
self.printer.writelines("return " + s,None)
else:
self.printer.writelines(
"__M_writer(context.get('local').get_cached(%s, %screatefunc=lambda:__M_%s(%s)))" % (cachekey, ''.join(["%s=%s, " % (k,v) for k, v in cacheargs.iteritems()]), name, ','.join(pass_args)),
"return ''",
None
)
def create_filter_callable(self, args, target, is_expression):
"""write a filter-applying expression based on the filters present in the given
filter names, adjusting for the global 'default' filter aliases as needed."""
def locate_encode(name):
if re.match(r'decode\..+', name):
return "filters." + name
else:
return filters.DEFAULT_ESCAPES.get(name, name)
if 'n' not in args:
if is_expression:
if self.compiler.pagetag:
args = self.compiler.pagetag.filter_args.args + args
if self.compiler.default_filters:
args = self.compiler.default_filters + args
for e in args:
# if filter given as a function, get just the identifier portion
if e == 'n':
continue
m = re.match(r'(.+?)(\(.*\))', e)
if m:
(ident, fargs) = m.group(1,2)
f = locate_encode(ident)
e = f + fargs
else:
x = e
e = locate_encode(e)
assert e is not None
target = "%s(%s)" % (e, target)
return target
def visitExpression(self, node):
self.write_source_comment(node)
if len(node.escapes) or (self.compiler.pagetag is not None and len(self.compiler.pagetag.filter_args.args)) or len(self.compiler.default_filters):
s = self.create_filter_callable(node.escapes_code.args, "%s" % node.text, True)
self.printer.writeline("__M_writer(%s)" % s)
else:
self.printer.writeline("__M_writer(%s)" % node.text)
def visitControlLine(self, node):
if node.isend:
self.printer.writeline(None)
else:
self.write_source_comment(node)
self.printer.writeline(node.text)
def visitText(self, node):
self.write_source_comment(node)
self.printer.writeline("__M_writer(%s)" % repr(node.content))
def visitTextTag(self, node):
filtered = len(node.filter_args.args) > 0
if filtered:
self.printer.writelines(
"__M_writer = context._push_writer()",
"try:",
)
for n in node.nodes:
n.accept_visitor(self)
if filtered:
self.printer.writelines(
"finally:",
"__M_buf, __M_writer = context._pop_buffer_and_writer()",
"__M_writer(%s)" % self.create_filter_callable(node.filter_args.args, "__M_buf.getvalue()", False),
None
)
def visitCode(self, node):
if not node.ismodule:
self.write_source_comment(node)
self.printer.write_indented_block(node.text)
if not self.in_def and len(self.identifiers.locally_assigned) > 0:
# if we are the "template" def, fudge locally declared/modified variables into the "__M_locals" dictionary,
# which is used for def calls within the same template, to simulate "enclosing scope"
self.printer.writeline('__M_locals.update(__M_dict_builtin([(__M_key, __M_locals_builtin()[__M_key]) for __M_key in [%s] if __M_key in __M_locals_builtin()]))' % ','.join([repr(x) for x in node.declared_identifiers()]))
def visitIncludeTag(self, node):
self.write_source_comment(node)
args = node.attributes.get('args')
if args:
self.printer.writeline("runtime._include_file(context, %s, _template_uri, %s)" % (node.parsed_attributes['file'], args))
else:
self.printer.writeline("runtime._include_file(context, %s, _template_uri)" % (node.parsed_attributes['file']))
def visitNamespaceTag(self, node):
pass
def visitDefTag(self, node):
pass
def visitCallTag(self, node):
self.printer.writeline("def ccall(caller):")
export = ['body']
callable_identifiers = self.identifiers.branch(node, nested=True)
body_identifiers = callable_identifiers.branch(node, nested=False)
# we want the 'caller' passed to ccall to be used for the body() function,
# but for other non-body() <%def>s within <%call> we want the current caller off the call stack (if any)
body_identifiers.add_declared('caller')
self.identifier_stack.append(body_identifiers)
class DefVisitor(object):
def visitDefTag(s, node):
self.write_inline_def(node, callable_identifiers, nested=False)
export.append(node.name)
# remove defs that are within the <%call> from the "closuredefs" defined
# in the body, so they dont render twice
if node.name in body_identifiers.closuredefs:
del body_identifiers.closuredefs[node.name]
vis = DefVisitor()
for n in node.nodes:
n.accept_visitor(vis)
self.identifier_stack.pop()
bodyargs = node.body_decl.get_argument_expressions()
self.printer.writeline("def body(%s):" % ','.join(bodyargs))
# TODO: figure out best way to specify buffering/nonbuffering (at call time would be better)
buffered = False
if buffered:
self.printer.writelines(
"context._push_buffer()",
"try:"
)
self.write_variable_declares(body_identifiers)
self.identifier_stack.append(body_identifiers)
for n in node.nodes:
n.accept_visitor(self)
self.identifier_stack.pop()
self.write_def_finish(node, buffered, False, False, callstack=False)
self.printer.writelines(
None,
"return [%s]" % (','.join(export)),
None
)
self.printer.writelines(
# get local reference to current caller, if any
"caller = context.caller_stack._get_caller()",
# push on caller for nested call
"context.caller_stack.nextcaller = runtime.Namespace('caller', context, callables=ccall(caller))",
"try:")
self.write_source_comment(node)
self.printer.writelines(
"__M_writer(%s)" % self.create_filter_callable([], node.attributes['expr'], True),
"finally:",
"context.caller_stack.nextcaller = None",
None
)
class _Identifiers(object):
"""tracks the status of identifier names as template code is rendered."""
def __init__(self, node=None, parent=None, nested=False):
if parent is not None:
# things that have already been declared in an enclosing namespace (i.e. names we can just use)
self.declared = util.Set(parent.declared).union([c.name for c in parent.closuredefs.values()]).union(parent.locally_declared).union(parent.argument_declared)
# if these identifiers correspond to a "nested" scope, it means whatever the
# parent identifiers had as undeclared will have been declared by that parent,
# and therefore we have them in our scope.
if nested:
self.declared = self.declared.union(parent.undeclared)
# top level defs that are available
self.topleveldefs = util.SetLikeDict(**parent.topleveldefs)
else:
self.declared = util.Set()
self.topleveldefs = util.SetLikeDict()
# things within this level that are referenced before they are declared (e.g. assigned to)
self.undeclared = util.Set()
# things that are declared locally. some of these things could be in the "undeclared"
# list as well if they are referenced before declared
self.locally_declared = util.Set()
# assignments made in explicit python blocks. these will be propigated to
# the context of local def calls.
self.locally_assigned = util.Set()
# things that are declared in the argument signature of the def callable
self.argument_declared = util.Set()
# closure defs that are defined in this level
self.closuredefs = util.SetLikeDict()
self.node = node
if node is not None:
node.accept_visitor(self)
def branch(self, node, **kwargs):
"""create a new Identifiers for a new Node, with this Identifiers as the parent."""
return _Identifiers(node, self, **kwargs)
defs = property(lambda self:util.Set(self.topleveldefs.union(self.closuredefs).values()))
def __repr__(self):
return "Identifiers(declared=%s, locally_declared=%s, undeclared=%s, topleveldefs=%s, closuredefs=%s, argumenetdeclared=%s)" % (repr(list(self.declared)), repr(list(self.locally_declared)), repr(list(self.undeclared)), repr([c.name for c in self.topleveldefs.values()]), repr([c.name for c in self.closuredefs.values()]), repr(self.argument_declared))
def check_declared(self, node):
"""update the state of this Identifiers with the undeclared and declared identifiers of the given node."""
for ident in node.undeclared_identifiers():
if ident != 'context' and ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
for ident in node.declared_identifiers():
self.locally_declared.add(ident)
def add_declared(self, ident):
self.declared.add(ident)
if ident in self.undeclared:
self.undeclared.remove(ident)
def visitExpression(self, node):
self.check_declared(node)
def visitControlLine(self, node):
self.check_declared(node)
def visitCode(self, node):
if not node.ismodule:
self.check_declared(node)
self.locally_assigned = self.locally_assigned.union(node.declared_identifiers())
def visitDefTag(self, node):
if node.is_root():
self.topleveldefs[node.name] = node
elif node is not self.node:
self.closuredefs[node.name] = node
for ident in node.undeclared_identifiers():
if ident != 'context' and ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
# visit defs only one level deep
if node is self.node:
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
for n in node.nodes:
n.accept_visitor(self)
def visitIncludeTag(self, node):
self.check_declared(node)
def visitPageTag(self, node):
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
self.check_declared(node)
def visitCallTag(self, node):
if node is self.node:
for ident in node.undeclared_identifiers():
if ident != 'context' and ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
for n in node.nodes:
n.accept_visitor(self)
else:
for ident in node.undeclared_identifiers():
if ident != 'context' and ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
| agpl-3.0 |
jcoady9/python-for-android | python-modules/twisted/twisted/conch/ssh/forwarding.py | 61 | 5659 | # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
#
"""
This module contains the implementation of the TCP forwarding, which allows
clients and servers to forward arbitrary TCP data across the connection.
Maintainer: Paul Swartz
"""
import struct
from twisted.internet import protocol, reactor
from twisted.python import log
import common, channel
class SSHListenForwardingFactory(protocol.Factory):
def __init__(self, connection, hostport, klass):
self.conn = connection
self.hostport = hostport # tuple
self.klass = klass
def buildProtocol(self, addr):
channel = self.klass(conn = self.conn)
client = SSHForwardingClient(channel)
channel.client = client
addrTuple = (addr.host, addr.port)
channelOpenData = packOpen_direct_tcpip(self.hostport, addrTuple)
self.conn.openChannel(channel, channelOpenData)
return client
class SSHListenForwardingChannel(channel.SSHChannel):
def channelOpen(self, specificData):
log.msg('opened forwarding channel %s' % self.id)
if len(self.client.buf)>1:
b = self.client.buf[1:]
self.write(b)
self.client.buf = ''
def openFailed(self, reason):
self.closed()
def dataReceived(self, data):
self.client.transport.write(data)
def eofReceived(self):
self.client.transport.loseConnection()
def closed(self):
if hasattr(self, 'client'):
log.msg('closing local forwarding channel %s' % self.id)
self.client.transport.loseConnection()
del self.client
class SSHListenClientForwardingChannel(SSHListenForwardingChannel):
name = 'direct-tcpip'
class SSHListenServerForwardingChannel(SSHListenForwardingChannel):
name = 'forwarded-tcpip'
class SSHConnectForwardingChannel(channel.SSHChannel):
def __init__(self, hostport, *args, **kw):
channel.SSHChannel.__init__(self, *args, **kw)
self.hostport = hostport
self.client = None
self.clientBuf = ''
def channelOpen(self, specificData):
cc = protocol.ClientCreator(reactor, SSHForwardingClient, self)
log.msg("connecting to %s:%i" % self.hostport)
cc.connectTCP(*self.hostport).addCallbacks(self._setClient, self._close)
def _setClient(self, client):
self.client = client
log.msg("connected to %s:%i" % self.hostport)
if self.clientBuf:
self.client.transport.write(self.clientBuf)
self.clientBuf = None
if self.client.buf[1:]:
self.write(self.client.buf[1:])
self.client.buf = ''
def _close(self, reason):
log.msg("failed to connect: %s" % reason)
self.loseConnection()
def dataReceived(self, data):
if self.client:
self.client.transport.write(data)
else:
self.clientBuf += data
def closed(self):
if self.client:
log.msg('closed remote forwarding channel %s' % self.id)
if self.client.channel:
self.loseConnection()
self.client.transport.loseConnection()
del self.client
def openConnectForwardingClient(remoteWindow, remoteMaxPacket, data, avatar):
remoteHP, origHP = unpackOpen_direct_tcpip(data)
return SSHConnectForwardingChannel(remoteHP,
remoteWindow=remoteWindow,
remoteMaxPacket=remoteMaxPacket,
avatar=avatar)
class SSHForwardingClient(protocol.Protocol):
def __init__(self, channel):
self.channel = channel
self.buf = '\000'
def dataReceived(self, data):
if self.buf:
self.buf += data
else:
self.channel.write(data)
def connectionLost(self, reason):
if self.channel:
self.channel.loseConnection()
self.channel = None
def packOpen_direct_tcpip((connHost, connPort), (origHost, origPort)):
"""Pack the data suitable for sending in a CHANNEL_OPEN packet.
"""
conn = common.NS(connHost) + struct.pack('>L', connPort)
orig = common.NS(origHost) + struct.pack('>L', origPort)
return conn + orig
packOpen_forwarded_tcpip = packOpen_direct_tcpip
def unpackOpen_direct_tcpip(data):
"""Unpack the data to a usable format.
"""
connHost, rest = common.getNS(data)
connPort = int(struct.unpack('>L', rest[:4])[0])
origHost, rest = common.getNS(rest[4:])
origPort = int(struct.unpack('>L', rest[:4])[0])
return (connHost, connPort), (origHost, origPort)
unpackOpen_forwarded_tcpip = unpackOpen_direct_tcpip
def packGlobal_tcpip_forward((host, port)):
return common.NS(host) + struct.pack('>L', port)
def unpackGlobal_tcpip_forward(data):
host, rest = common.getNS(data)
port = int(struct.unpack('>L', rest[:4])[0])
return host, port
"""This is how the data -> eof -> close stuff /should/ work.
debug3: channel 1: waiting for connection
debug1: channel 1: connected
debug1: channel 1: read<=0 rfd 7 len 0
debug1: channel 1: read failed
debug1: channel 1: close_read
debug1: channel 1: input open -> drain
debug1: channel 1: ibuf empty
debug1: channel 1: send eof
debug1: channel 1: input drain -> closed
debug1: channel 1: rcvd eof
debug1: channel 1: output open -> drain
debug1: channel 1: obuf empty
debug1: channel 1: close_write
debug1: channel 1: output drain -> closed
debug1: channel 1: rcvd close
debug3: channel 1: will not send data after close
debug1: channel 1: send close
debug1: channel 1: is dead
"""
| apache-2.0 |
fclesio/learning-space | Python/ims24/ims24/contrators/requester.py | 1 | 3740 | #!/usr/bin/env python3.6
# -*- coding: utf-8 -*-
#
# requester.py
# @Author : Gustavo F ([email protected])
# @Link : https://github.com/sharkguto
# @Date : 17/02/2019 11:06:12
import typing
import requests
from requests_futures.sessions import FuturesSession
from ims24 import logger
from ims24.configuration.environments import BASE_URL_CRAWLER
class RequesterCrawler(object):
"""
Interface to implement in service
"""
def __init__(self, base_url: str = ""):
"""
constructor.... load session and set env variables
:param self: itself
"""
self._requester_url = base_url or BASE_URL_CRAWLER
self._default_headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:65.0) Gecko/20100101 Firefox/65.0",
"content-type": "text/html",
}
self.reset_session()
def reset_session(self):
"""
Reset session if necessary.... bootstrap in constructor to load request session
:param self: itself
"""
self._requester_session = requests.Session()
self._requester_session.headers.update(self._default_headers)
def get_list(
self, endpoints: tuple, headers: dict = {}, params: dict = {}, **path_params
) -> typing.Tuple[object, int]:
"""
send multiple requests and group by endpoints
:param self: itself
:param endpoints:tuple: tuple list with endpoints
:param headers:dict={}:
:param params:dict={}:
:param **path_params:
"""
dict_responses = {}
result = {}
status_code = 200
wrk_num = self.max_workers_value(endpoints)
with FuturesSession(max_workers=wrk_num) as session_async:
session_async.headers.update(self._default_headers)
for i in endpoints:
dict_responses[i] = {"url": self._endpoints[i]}
dict_responses[i]["response"] = session_async.get(
dict_responses[i]["url"], params=params, timeout=20
)
try:
for i in endpoints:
response = dict_responses[i][
"response"
].result() # block IO... wait for thd response
if "application/json" in response.headers["content-type"].lower():
result[i] = response.json()
else: # needs to implement parser on your service
result[i] = self.html_parser(response.text)
logger.info(
f"finish request {dict_responses[i]['url']} {response.status_code}"
)
except requests.exceptions.ConnectionError as ex:
logger.error(f"{ex} \n key : {i} on request {dict_responses[i]}")
result = f"call procedure {i} failed -> Connection to requester failed"
raise ex
except Exception as ex:
logger.error(f"{ex} \n key : {i} on request {dict_responses[i]}")
result = f"call procedure {i} failed -> {response.text}"
raise ex
return result
def max_workers_value(self, endpoints: tuple):
"""
check how many endpoints we have... if have more than 50
trigger 50 tasks per requests loop
:param self: itself
:param endpoints:tuple: ("1","2","3")
"""
max_conn = len(endpoints) or 1
if max_conn > 50:
max_conn = 50
return max_conn
def html_parser(self, text: str):
raise NotImplementedError("Needs to be implement in your service")
| gpl-2.0 |
Glignos/inspire-next | inspirehep/modules/fixtures/files.py | 4 | 2972 | # -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""Functions for searching ES and returning the results."""
from __future__ import absolute_import, division, print_function
import os
from flask import current_app
from invenio_db import db
from invenio_files_rest.models import Location
def init_default_storage_path():
"""Init default file store location."""
try:
uri = current_app.config['BASE_FILES_LOCATION']
if uri.startswith('/') and not os.path.exists(uri):
os.makedirs(uri)
loc = Location(
name="default",
uri=uri,
default=True
)
db.session.add(loc)
db.session.commit()
return loc
except Exception:
db.session.rollback()
raise
def init_workflows_storage_path(default=False):
"""Init workflows file store location."""
try:
uri = current_app.config['WORKFLOWS_FILE_LOCATION']
if uri.startswith('/') and not os.path.exists(uri):
os.makedirs(uri)
loc = Location(
name=current_app.config["WORKFLOWS_DEFAULT_FILE_LOCATION_NAME"],
uri=uri,
default=False
)
db.session.add(loc)
db.session.commit()
return loc
except Exception:
db.session.rollback()
raise
def init_records_files_storage_path(default=False):
"""Init records file store location."""
try:
uri = os.path.join(
current_app.config['BASE_FILES_LOCATION'],
"records", "files"
)
if uri.startswith('/') and not os.path.exists(uri):
os.makedirs(uri)
loc = Location(
name=current_app.config["RECORDS_DEFAULT_FILE_LOCATION_NAME"],
uri=uri,
default=False
)
db.session.add(loc)
db.session.commit()
return loc
except Exception:
db.session.rollback()
raise
def init_all_storage_paths():
"""Init all storage paths."""
init_default_storage_path()
init_workflows_storage_path()
init_records_files_storage_path()
| gpl-3.0 |
pbmanis/acq4 | acq4/analysis/scripts/eventExplorer.py | 3 | 15464 | from __future__ import print_function
from acq4.util import Qt
import acq4.Manager
import acq4.pyqtgraph as pg
import acq4.pyqtgraph.opengl as gl
import numpy as np
import acq4.util.functions as fn
import re
man = acq4.Manager.getManager()
## update DB field to reflect dir meta info
#for i in db.select('Cell', ['rowid']):
#d = db.getDir('Cell', i[0])
#typ = d.info().get('type', '')
#db.update('Cell', {'type': typ}, rowid=i[0])
#print d, typ
global eventView, siteView, cells
eventView = 'events_view'
siteView = 'sites_view'
firstRun = False
if 'events' not in locals():
global events
events = {}
firstRun = True
win = Qt.QMainWindow()
#cw = Qt.QWidget()
layout = pg.LayoutWidget()
#layout = Qt.QGridLayout()
#layout.setContentsMargins(0,0,0,0)
#layout.setSpacing(0)
#cw.setLayout(layout)
win.setCentralWidget(layout)
cellCombo = Qt.QComboBox()
cellCombo.setSizeAdjustPolicy(cellCombo.AdjustToContents)
layout.addWidget(cellCombo)
reloadBtn = Qt.QPushButton('reload')
layout.addWidget(reloadBtn)
separateCheck = Qt.QCheckBox("color pre/post")
layout.addWidget(separateCheck)
colorCheck = Qt.QCheckBox("color y position")
layout.addWidget(colorCheck)
errLimitSpin = pg.SpinBox(value=0.7, step=0.1)
layout.addWidget(errLimitSpin)
lengthRatioLimitSpin = pg.SpinBox(value=1.5, step=0.1)
layout.addWidget(lengthRatioLimitSpin)
postRgnStartSpin = pg.SpinBox(value=0.500, step=0.01, siPrefix=True, suffix='s')
layout.addWidget(postRgnStartSpin)
postRgnStopSpin = pg.SpinBox(value=0.700, step=0.01, siPrefix=True, suffix='s')
layout.addWidget(postRgnStopSpin)
spl1 = Qt.QSplitter()
spl1.setOrientation(Qt.Qt.Vertical)
layout.addWidget(spl1, row=1, col=0, rowspan=1, colspan=8)
pw1 = pg.PlotWidget()
spl1.addWidget(pw1)
pw1.setLabel('left', 'Amplitude', 'A')
pw1.setLabel('bottom', 'Decay Tau', 's')
spl2 = Qt.QSplitter()
spl2.setOrientation(Qt.Qt.Horizontal)
spl1.addWidget(spl2)
pw2 = pg.PlotWidget(labels={'bottom': ('time', 's')})
spl2.addWidget(pw2)
tab = Qt.QTabWidget()
spl2.addWidget(tab)
## For viewing cell morphology
gv = pg.GraphicsView()
gv.setBackgroundBrush(pg.mkBrush('w'))
image = pg.ImageItem()
gv.addItem(image)
gv.enableMouse()
gv.setAspectLocked(True)
tab.addTab(gv, 'Morphology')
## 3D atlas
import acq4.analysis.atlas.CochlearNucleus as CN
atlas = CN.CNAtlasDisplayWidget()
atlas.showLabel('DCN')
atlas.showLabel('AVCN')
atlas.showLabel('PVCN')
tab.addTab(atlas, 'Atlas')
atlasPoints = gl.GLScatterPlotItem()
atlas.addItem(atlasPoints)
win.show()
win.resize(1000,800)
sp1 = pw1.scatterPlot([], pen=pg.mkPen(None), brush=(200,200,255,70), identical=True, size=8)
sp2 = pw1.scatterPlot([], pen=pg.mkPen(None), brush=(255,200,200,70), identical=True, size=8)
sp3 = pw1.scatterPlot([], pen=pg.mkPen(None), brush=(100,255,100,70), identical=True, size=8)
sp4 = pw1.scatterPlot([], pen=pg.mkPen(None), size=8)
print("Reading cell list...")
#import os, pickle
#md = os.path.abspath(os.path.split(__file__)[0])
#cacheFile = os.path.join(md, 'eventCache.p')
#if os.path.isfile(cacheFile):
#print "Read from cache..."
#ev = pickle.load(open(cacheFile, 'r'))
#else:
#pickle.dump(ev, open(cacheFile, 'w'))
## create views that link cell information to events/sites
db = man.getModule('Data Manager').currentDatabase()
if not db.hasTable(siteView):
print("Creating DB views.")
db.createView(siteView, ['photostim_sites', 'DirTable_Protocol', 'DirTable_Cell']) ## seems to be unused.
if not db.hasTable(eventView):
db.createView(eventView, ['photostim_events', 'DirTable_Protocol', 'DirTable_Cell'])
cells = db.select(siteView, ['CellDir'], distinct=True)
cells = [c['CellDir'] for c in cells]
cells.sort(lambda a,b: cmp(a.name(), b.name()))
cellCombo.addItem('')
for c in cells:
cellCombo.addItem(c.name(relativeTo=man.baseDir))
#cellSpin.setMaximum(len(cells)-1)
print("Done.")
def loadCell(cell, reloadData=False):
global events
if reloadData:
events.pop(cell, None)
if cell in events:
return
db = man.getModule('Data Manager').currentDatabase()
mod = man.dataModel
allEvents = []
hvals = {}
nEv = 0
positionCache = {}
tcache = {}
print("Loading all events for cell", cell)
tot = db.select(eventView, 'count()', where={'CellDir': cell})[0]['count()']
print(tot, "total events..")
with pg.ProgressDialog('Loading event data...', maximum=tot, wait=0) as dlg:
for ev in db.iterSelect(eventView, ['ProtocolSequenceDir', 'SourceFile', 'fitAmplitude', 'fitTime', 'fitDecayTau', 'fitRiseTau', 'fitTimeToPeak', 'fitLengthOverDecay', 'fitFractionalError', 'userTransform', 'CellType', 'CellDir', 'ProtocolDir'], where={'CellDir': cell}, toArray=True, chunkSize=200):
extra = np.empty(ev.shape, dtype=[('right', float), ('anterior', float), ('dorsal', float), ('holding', float)])
## insert holding levels
for i in range(len(ev)):
sd = ev[i]['ProtocolSequenceDir']
if sd not in hvals:
cf = ev[i]['SourceFile']
hvals[sd] = mod.getClampHoldingLevel(cf)
#print hvals[sd], cf
extra[i]['holding'] = hvals[sd]
## insert positions
for i in range(len(ev)):
protoDir = ev[i]['SourceFile'].parent()
key = protoDir
#key = (ev[i]['ProtocolSequenceDir'], ev[i]['SourceFile'])
if key not in positionCache:
#try:
#dh = ev[i]['ProtocolDir']
#p1 = pg.Point(dh.info()['Scanner']['position'])
#if key[0] not in tcache:
#tr = pg.SRTTransform()
#tr.restoreState(dh.parent().info()['userTransform'])
#tcache[key[0]] = tr
#trans = tcache[key[0]]
#p2 = trans.map(p1)
#pcache[key] = (p2.x(),p2.y())
#except:
#print key
#raise
rec = db.select('CochlearNucleus_Protocol', where={'ProtocolDir': protoDir})
if len(rec) == 0:
pos = (None, None, None)
elif len(rec) == 1:
pos = (rec[0]['right'], rec[0]['anterior'], rec[0]['dorsal'])
elif len(rec) == 2:
raise Exception("Multiple position records for %s!" % str(protoDir))
positionCache[key] = pos
extra[i]['right'] = positionCache[key][0]
extra[i]['anterior'] = positionCache[key][1]
extra[i]['dorsal'] = positionCache[key][2]
ev = fn.concatenateColumns([ev, extra])
allEvents.append(ev)
nEv += len(ev)
dlg.setValue(nEv)
if dlg.wasCanceled():
raise Exception('Canceled by user.')
ev = np.concatenate(allEvents)
numExSites = 0
numInSites = 0
for site in db.select(siteView, 'ProtocolSequenceDir', where={'CellDir': cell}):
h = hvals.get(site['ProtocolSequenceDir'],None)
if h is None:
continue
if h > -0.02:
numInSites += 1
elif h < -0.04:
numExSites += 1
events[cell] = (ev, numExSites, numInSites)
def init():
if not firstRun:
return
cellCombo.currentIndexChanged.connect(showCell)
separateCheck.toggled.connect(showCell)
colorCheck.toggled.connect(showCell)
errLimitSpin.valueChanged.connect(showCell)
lengthRatioLimitSpin.valueChanged.connect(showCell)
reloadBtn.clicked.connect(reloadCell)
for s in [sp1, sp2, sp3, sp4]:
s.sigPointsClicked.connect(plotClicked)
def plotClicked(plt, pts):
pt = pts[0]
#(id, fn, time) = pt.data
#[['SourceFile', 'ProtocolSequenceDir', 'fitTime']]
#fh = db.getDir('ProtocolSequence', id)[fn]
fh = pt.data()['SourceFile']
id = pt.data()['ProtocolSequenceDir']
time = pt.data()['fitTime']
data = fh.read()['Channel':'primary']
data = fn.besselFilter(data, 8e3)
p = pw2.plot(data, clear=True)
pos = time / data.xvals('Time')[-1]
arrow = pg.CurveArrow(p, pos=pos)
xr = pw2.viewRect().left(), pw2.viewRect().right()
if time < xr[0] or time > xr[1]:
w = xr[1]-xr[0]
pw2.setXRange(time-w/5., time+4*w/5., padding=0)
fitLen = pt.data()['fitDecayTau']*pt.data()['fitLengthOverDecay']
x = np.linspace(time, time+fitLen, fitLen * 50e3)
v = [pt.data()['fitAmplitude'], pt.data()['fitTime'], pt.data()['fitRiseTau'], pt.data()['fitDecayTau']]
y = fn.pspFunc(v, x, risePower=2.0) + data[np.argwhere(data.xvals('Time')>time)[0]-1]
pw2.plot(x, y, pen='b')
#plot.addItem(arrow)
def select(ev, ex=True):
#if source is not None:
#ev = ev[ev['CellDir']==source]
if ex:
ev = ev[ev['holding'] < -0.04] # excitatory events
ev = ev[(ev['fitAmplitude'] < 0) * (ev['fitAmplitude'] > -2e-10)]
else:
ev = ev[(ev['holding'] >= -0.02) * (ev['holding'] <= 0.01)] ## inhibitory events
ev = ev[(ev['fitAmplitude'] > 0) * (ev['fitAmplitude'] < 2e-10)]
ev = ev[(0 < ev['fitDecayTau']) * (ev['fitDecayTau'] < 0.2)] # select decay region
ev = ev[ev['fitFractionalError'] < errLimitSpin.value()]
ev = ev[ev['fitLengthOverDecay'] > lengthRatioLimitSpin.value()]
return ev
def reloadCell():
showCell(reloadData=True)
def showCell(**kwds):
pw2.clear()
reloadData = kwds.get('reloadData', False)
#global lock
#if lock:
#return
#lock = True
Qt.QApplication.processEvents() ## prevents double-spin
#lock = False
cell = cells[cellCombo.currentIndex()-1]
dh = cell #db.getDir('Cell', cell)
loadCell(dh, reloadData=reloadData)
try:
image.setImage(dh['morphology.png'].read())
gv.setRange(image.sceneBoundingRect())
except:
image.setImage(np.zeros((2,2)))
pass
ev, numExSites, numInSites = events[cell]
ev2 = select(ev, ex=True)
ev3 = select(ev, ex=False)
if colorCheck.isChecked():
sp1.hide()
sp2.hide()
sp3.hide()
sp4.show()
start = postRgnStart()
stop = postRgnStop()
ev2post = ev2[(ev2['fitTime']>start) * (ev2['fitTime']<stop)]
ev3post = ev3[(ev3['fitTime']>start) * (ev3['fitTime']<stop)]
ev4 = np.concatenate([ev2post, ev3post])
yMax = ev4['dorsal'].max()
yMin = ev4['dorsal'].min()
brushes = []
for i in range(len(ev4)):
hue = 0.6*((ev4[i]['dorsal']-yMin) / (yMax-yMin))
brushes.append(pg.hsvColor(hue, 1.0, 1.0, 0.3))
#pts.append({
#'pos': (ev4[i]['fitDecayTau'], ev4[i]['fitAmplitude']),
#'brush': pg.hsvColor(hue, 1, 1, 0.3),
#'data': ev4[i]
#})
sp4.setData(x=ev4['fitDecayTau'], y=ev4['fitAmplitude'], symbolBrush=brushes, data=ev4)
else:
sp1.show()
sp2.show()
#sp3.show()
sp4.hide()
## excitatory
if separateCheck.isChecked():
pre = ev2[ev2['fitTime']< preRgnStop()]
post = ev2[(ev2['fitTime'] > postRgnStart()) * (ev2['fitTime'] < postRgnStop())]
else:
pre = ev2
sp1.setData(x=pre['fitDecayTau'], y=pre['fitAmplitude'], data=pre);
#print "Cell ", cell
#print " excitatory:", np.median(ev2['fitDecayTau']), np.median(ev2['fitAmplitude'])
## inhibitory
if separateCheck.isChecked():
pre = ev3[ev3['fitTime']< preRgnStop()]
post2 = ev3[(ev3['fitTime'] > postRgnStart()) * (ev3['fitTime'] < postRgnStop())]
post = np.concatenate([post, post2])
else:
pre = ev3
sp2.setData(x=pre['fitDecayTau'], y=pre['fitAmplitude'], data=pre);
#print " inhibitory:", np.median(ev2['fitDecayTau']), np.median(ev2['fitAmplitude'])
if separateCheck.isChecked():
sp3.setData(x=post['fitDecayTau'], y=post['fitAmplitude'], data=post)
sp3.show()
else:
sp3.hide()
try:
typ = ev2[0]['CellType']
except:
typ = ev3[0]['CellType']
sr = spontRate(ev2, numExSites)
sri = spontRate(ev3, numInSites)
title = "%s -- %s --- <span style='color: #99F;'>ex:</span> %s %s %s %0.1fHz --- <span style='color: #F99;'>in:</span> %s %s %s %0.1fHz" % (
dh.name(relativeTo=dh.parent().parent().parent()),
typ,
pg.siFormat(np.median(ev2['fitTimeToPeak']), error=np.std(ev2['fitTimeToPeak']), space=False, suffix='s'),
pg.siFormat(np.median(ev2['fitDecayTau']), error=np.std(ev2['fitDecayTau']), space=False, suffix='s'),
pg.siFormat(np.median(ev2['fitAmplitude']), error=np.std(ev2['fitAmplitude']), space=False, suffix='A'),
sr,
pg.siFormat(np.median(ev3['fitTimeToPeak']), error=np.std(ev3['fitTimeToPeak']), space=False, suffix='s'),
pg.siFormat(np.median(ev3['fitDecayTau']), error=np.std(ev3['fitDecayTau']), space=False, suffix='s'),
pg.siFormat(np.median(ev3['fitAmplitude']), error=np.std(ev3['fitAmplitude']), space=False, suffix='A'),
sri)
print(re.sub(r'<[^>]+>', '', title))
pw1.setTitle(title)
### show cell in atlas
#rec = db.select('CochlearNucleus_Cell', where={'CellDir': cell})
#pts = []
#if len(rec) > 0:
#pos = (rec[0]['right'], rec[0]['anterior'], rec[0]['dorsal'])
#pts = [{'pos': pos, 'size': 100e-6, 'color': (0.7, 0.7, 1.0, 1.0)}]
### show event positions
evSpots = {}
for rec in ev:
p = (rec['right'], rec['anterior'], rec['dorsal'])
evSpots[p] = None
pos = np.array(list(evSpots.keys()))
atlasPoints.setData(pos=pos, )
def spontRate(ev, n):
## This is broken. It does not take into account recordings that had no events.
ev = ev[ev['fitTime'] < preRgnStop()]
#count = {}
#dirs = set()
#for i in range(len(ev)):
#key = (ev[i]['ProtocolSequenceDir'], ev[i]['SourceFile'])
#dirs.add(set)
#if key not in count:
#count[key] = 0
#count[key] += 1
#sr = np.mean([v/(preRgnStop()) for v in count.itervalues()])
if n == 0:
return 0
return len(ev) / (preRgnStop() * n)
def preRgnStop():
return postRgnStartSpin.value() - 0.002
def postRgnStart():
return postRgnStartSpin.value() + 0.002
def postRgnStop():
return postRgnStopSpin.value()
init() | mit |
iddqd1/django-cms | cms/tests/test_check.py | 1 | 7102 | # -*- coding: utf-8 -*-
from copy import deepcopy
import os
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.template import base
from django.test import SimpleTestCase, TestCase
from cms.api import add_plugin
from cms.models.pluginmodel import CMSPlugin
from cms.models.placeholdermodel import Placeholder
from cms.test_utils.project.pluginapp.plugins.manytomany_rel.models import ArticlePluginModel
from cms.test_utils.project.extensionapp.models import MyPageExtension
from cms.utils.check import FileOutputWrapper, check, FileSectionWrapper
from djangocms_text_ckeditor.cms_plugins import TextPlugin
class TestOutput(FileOutputWrapper):
def __init__(self):
super(TestOutput, self).__init__(None, None)
self.section_wrapper = TestSectionOutput
def write(self, message):
pass
def write_stderr(self, message):
pass
class TestSectionOutput(FileSectionWrapper):
def write(self, message):
pass
def write_stderr(self, message):
pass
class CheckAssertMixin(object):
def assertCheck(self, successful, **assertions):
"""
asserts that checks are successful or not
Assertions is a mapping of numbers to check (eg successes=5)
"""
output = TestOutput()
check(output)
self.assertEqual(output.successful, successful)
for key, value in assertions.items():
self.assertEqual(getattr(output, key), value, "%s %s expected, got %s" % (value, key, getattr(output, key)))
class CheckTests(CheckAssertMixin, SimpleTestCase):
def test_test_confs(self):
self.assertCheck(True, errors=0, warnings=0)
def test_cms_moderator_deprecated(self):
with self.settings(CMS_MODERATOR=True):
self.assertCheck(True, warnings=1, errors=0)
def test_cms_flat_urls_deprecated(self):
with self.settings(CMS_FLAT_URLS=True):
self.assertCheck(True, warnings=1, errors=0)
def test_no_sekizai(self):
base.get_templatetags_modules.cache_clear()
apps.set_available_apps(['cms', 'menus'])
self.assertCheck(False, errors=2)
apps.unset_available_apps()
def test_no_sekizai_template_context_processor(self):
override = {'TEMPLATES': deepcopy(settings.TEMPLATES)}
override['TEMPLATES'][0]['OPTIONS']['context_processors'] = []
with self.settings(**override):
self.assertCheck(False, errors=2)
def test_old_style_i18n_settings(self):
with self.settings(CMS_LANGUAGES=[('en', 'English')]):
self.assertRaises(ImproperlyConfigured, self.assertCheck, True, warnings=1, errors=0)
def test_cms_hide_untranslated_deprecated(self):
with self.settings(CMS_HIDE_UNTRANSLATED=True):
self.assertCheck(True, warnings=1, errors=0)
def test_cms_language_fallback_deprecated(self):
with self.settings(CMS_LANGUAGE_FALLBACK=True):
self.assertCheck(True, warnings=1, errors=0)
def test_cms_language_conf_deprecated(self):
with self.settings(CMS_LANGUAGE_CONF=True):
self.assertCheck(True, warnings=1, errors=0)
def test_middlewares(self):
MIDDLEWARE_CLASSES=[
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.http.ConditionalGetMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'cms.middleware.page.CurrentPageMiddleware',
'cms.middleware.toolbar.ToolbarMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
]
self.assertCheck(True, warnings=0, errors=0)
with self.settings(MIDDLEWARE_CLASSES=MIDDLEWARE_CLASSES):
self.assertCheck(False, warnings=0, errors=2)
def test_cms_site_languages_deprecated(self):
with self.settings(CMS_SITE_LANGUAGES=True):
self.assertCheck(True, warnings=1, errors=0)
def test_cms_frontend_languages_deprecated(self):
with self.settings(CMS_FRONTEND_LANGUAGES=True):
self.assertCheck(True, warnings=1, errors=0)
def test_copy_relations_fk_check(self):
"""
this is ugly, feel free to come up with a better test
"""
self.assertCheck(True, warnings=0, errors=0)
copy_rel = ArticlePluginModel.copy_relations
del ArticlePluginModel.copy_relations
self.assertCheck(True, warnings=2, errors=0)
ArticlePluginModel.copy_relations = copy_rel
def test_copy_relations_on_page_extension(self):
"""
Agreed. It is ugly, but it works.
"""
self.assertCheck(True, warnings=0, errors=0)
copy_rel = MyPageExtension.copy_relations
del MyPageExtension.copy_relations
self.assertCheck(True, warnings=1, errors=0)
MyPageExtension.copy_relations = copy_rel
def test_placeholder_tag_deprecation(self):
self.assertCheck(True, warnings=0, errors=0)
alt_dir = os.path.join(
os.path.dirname(__file__),
'..',
'test_utils',
'project',
'alt_templates'
)
NEWTEMPLATES = deepcopy(settings.TEMPLATES)
NEWTEMPLATES[0]['DIRS'] = [alt_dir]
with self.settings(TEMPLATES=NEWTEMPLATES, CMS_TEMPLATES=[]):
self.assertCheck(True, warnings=1, errors=0)
def test_non_numeric_site_id(self):
self.assertCheck(True, warnings=0, errors=0)
with self.settings(SITE_ID='broken'):
self.assertCheck(False, warnings=0, errors=1)
class CheckWithDatabaseTests(CheckAssertMixin, TestCase):
def test_check_plugin_instances(self):
self.assertCheck(True, warnings=0, errors=0 )
apps = ["cms", "menus", "sekizai", "cms.test_utils.project.sampleapp", "treebeard"]
with self.settings(INSTALLED_APPS=apps):
placeholder = Placeholder.objects.create(slot="test")
add_plugin(placeholder, TextPlugin, "en", body="en body")
add_plugin(placeholder, TextPlugin, "en", body="en body")
add_plugin(placeholder, "LinkPlugin", "en",
name="A Link", url="https://www.django-cms.org")
# create a CMSPlugin with an unsaved instance
instanceless_plugin = CMSPlugin(language="en", plugin_type="TextPlugin")
instanceless_plugin.save()
self.assertCheck(False, warnings=0, errors=2)
# create a bogus CMSPlugin to simulate one which used to exist but
# is no longer installed
bogus_plugin = CMSPlugin(language="en", plugin_type="BogusPlugin")
bogus_plugin.save()
self.assertCheck(False, warnings=0, errors=3)
| bsd-3-clause |
vmindru/ansible | lib/ansible/modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py | 31 | 2619 | #!/usr/bin/python
# Copyright: (c) 2018, Juergen Wiebe <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = """
---
module: utm_network_interface_address_info
author:
- Juergen Wiebe (@steamx)
short_description: Get info for a network/interface_address object
description:
- Get info for a network/interface_address object in SOPHOS UTM.
version_added: "2.8"
options:
name:
description:
- The name of the object. Will be used to identify the entry
required: true
extends_documentation_fragment:
- utm
"""
EXAMPLES = """
- name: utm network interface address
utm_proxy_interface_address_info:
utm_host: sophos.host.name
utm_token: abcdefghijklmno1234
name: TestNetworkInterfaceAddress
"""
RETURN = """
result:
description: The utm object that was created
returned: success
type: complex
contains:
_ref:
description: The reference name of the object
type: string
_locked:
description: Whether or not the object is currently locked
type: boolean
_type:
description: The type of the object
type: string
name:
description: The name of the object
type: string
address:
description: The ip4 address of the network/interface_address object
type: string
address6:
description: The ip6 address of the network/interface_address object
type: string
comment:
description: The comment string
type: string
resolved:
description: Whether or not the object is resolved
type: boolean
resolved6:
description: Whether or not the object is resolved
type: boolean
"""
from ansible.module_utils.utm_utils import UTM, UTMModule
from ansible.module_utils._text import to_native
def main():
endpoint = "network/interface_address"
key_to_check_for_changes = []
module = UTMModule(
argument_spec=dict(
name=dict(type='str', required=True)
)
)
try:
UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute()
except Exception as e:
module.fail_json(msg=to_native(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
enigmampc/catalyst | catalyst/data/continuous_future_reader.py | 1 | 12198 | import numpy as np
import pandas as pd
from catalyst.data.session_bars import SessionBarReader
class ContinuousFutureSessionBarReader(SessionBarReader):
def __init__(self, bar_reader, roll_finders):
self._bar_reader = bar_reader
self._roll_finders = roll_finders
def load_raw_arrays(self, columns, start_date, end_date, assets):
"""
Parameters
----------
fields : list of str
'sid'
start_dt: Timestamp
Beginning of the window range.
end_dt: Timestamp
End of the window range.
sids : list of int
The asset identifiers in the window.
Returns
-------
list of np.ndarray
A list with an entry per field of ndarrays with shape
(minutes in range, sids) with a dtype of float64, containing the
values for the respective field over start and end dt range.
"""
rolls_by_asset = {}
for asset in assets:
rf = self._roll_finders[asset.roll_style]
rolls_by_asset[asset] = rf.get_rolls(
asset.root_symbol, start_date, end_date, asset.offset)
num_sessions = len(
self.trading_calendar.sessions_in_range(start_date, end_date))
shape = num_sessions, len(assets)
results = []
tc = self._bar_reader.trading_calendar
sessions = tc.sessions_in_range(start_date, end_date)
# Get partitions
partitions_by_asset = {}
for asset in assets:
partitions = []
partitions_by_asset[asset] = partitions
rolls = rolls_by_asset[asset]
start = start_date
for roll in rolls:
sid, roll_date = roll
start_loc = sessions.get_loc(start)
if roll_date is not None:
end = roll_date - sessions.freq
end_loc = sessions.get_loc(end)
else:
end = end_date
end_loc = len(sessions) - 1
partitions.append((sid, start, end, start_loc, end_loc))
if roll[-1] is not None:
start = sessions[end_loc + 1]
for column in columns:
if column != 'volume' and column != 'sid':
out = np.full(shape, np.nan)
else:
out = np.zeros(shape, dtype=np.int64)
for i, asset in enumerate(assets):
partitions = partitions_by_asset[asset]
for sid, start, end, start_loc, end_loc in partitions:
if column != 'sid':
result = self._bar_reader.load_raw_arrays(
[column], start, end, [sid])[0][:, 0]
else:
result = int(sid)
out[start_loc:end_loc + 1, i] = result
results.append(out)
return results
@property
def last_available_dt(self):
"""
Returns
-------
dt : pd.Timestamp
The last session for which the reader can provide data.
"""
return self._bar_reader.last_available_dt
@property
def trading_calendar(self):
"""
Returns the catalyst.utils.calendar.trading_calendar used to read
the data. Can be None (if the writer didn't specify it).
"""
return self._bar_reader.trading_calendar
@property
def first_trading_day(self):
"""
Returns
-------
dt : pd.Timestamp
The first trading day (session) for which the reader can provide
data.
"""
return self._bar_reader.first_trading_day
def get_value(self, continuous_future, dt, field):
"""
Retrieve the value at the given coordinates.
Parameters
----------
sid : int
The asset identifier.
dt : pd.Timestamp
The timestamp for the desired data point.
field : string
The OHLVC name for the desired data point.
Returns
-------
value : float|int
The value at the given coordinates, ``float`` for OHLC, ``int``
for 'volume'.
Raises
------
NoDataOnDate
If the given dt is not a valid market minute (in minute mode) or
session (in daily mode) according to this reader's tradingcalendar.
"""
rf = self._roll_finders[continuous_future.roll_style]
sid = (rf.get_contract_center(continuous_future.root_symbol,
dt,
continuous_future.offset))
return self._bar_reader.get_value(sid, dt, field)
def get_last_traded_dt(self, asset, dt):
"""
Get the latest minute on or before ``dt`` in which ``asset`` traded.
If there are no trades on or before ``dt``, returns ``pd.NaT``.
Parameters
----------
asset : catalyst.asset.Asset
The asset for which to get the last traded minute.
dt : pd.Timestamp
The minute at which to start searching for the last traded minute.
Returns
-------
last_traded : pd.Timestamp
The dt of the last trade for the given asset, using the input
dt as a vantage point.
"""
rf = self._roll_finders[asset.roll_style]
sid = (rf.get_contract_center(asset.root_symbol,
dt,
asset.offset))
if sid is None:
return pd.NaT
contract = rf.asset_finder.retrieve_asset(sid)
return self._bar_reader.get_last_traded_dt(contract, dt)
@property
def sessions(self):
"""
Returns
-------
sessions : DatetimeIndex
All session labels (unionining the range for all assets) which the
reader can provide.
"""
return self._bar_reader.sessions
class ContinuousFutureMinuteBarReader(SessionBarReader):
def __init__(self, bar_reader, roll_finders):
self._bar_reader = bar_reader
self._roll_finders = roll_finders
def load_raw_arrays(self, columns, start_date, end_date, assets):
"""
Parameters
----------
fields : list of str
'open', 'high', 'low', 'close', or 'volume'
start_dt: Timestamp
Beginning of the window range.
end_dt: Timestamp
End of the window range.
sids : list of int
The asset identifiers in the window.
Returns
-------
list of np.ndarray
A list with an entry per field of ndarrays with shape
(minutes in range, sids) with a dtype of float64, containing the
values for the respective field over start and end dt range.
"""
rolls_by_asset = {}
tc = self.trading_calendar
start_session = tc.minute_to_session_label(start_date)
end_session = tc.minute_to_session_label(end_date)
for asset in assets:
rf = self._roll_finders[asset.roll_style]
rolls_by_asset[asset] = rf.get_rolls(
asset.root_symbol,
start_session,
end_session, asset.offset)
sessions = tc.sessions_in_range(start_date, end_date)
minutes = tc.minutes_in_range(start_date, end_date)
num_minutes = len(minutes)
shape = num_minutes, len(assets)
results = []
# Get partitions
partitions_by_asset = {}
for asset in assets:
partitions = []
partitions_by_asset[asset] = partitions
rolls = rolls_by_asset[asset]
start = start_date
for roll in rolls:
sid, roll_date = roll
start_loc = minutes.searchsorted(start)
if roll_date is not None:
_, end = tc.open_and_close_for_session(
roll_date - sessions.freq)
end_loc = minutes.searchsorted(end)
else:
end = end_date
end_loc = len(minutes) - 1
partitions.append((sid, start, end, start_loc, end_loc))
if roll[-1] is not None:
start, _ = tc.open_and_close_for_session(
tc.minute_to_session_label(minutes[end_loc + 1]))
for column in columns:
if column != 'volume':
out = np.full(shape, np.nan)
else:
out = np.zeros(shape, dtype=np.uint32)
for i, asset in enumerate(assets):
partitions = partitions_by_asset[asset]
for sid, start, end, start_loc, end_loc in partitions:
if column != 'sid':
result = self._bar_reader.load_raw_arrays(
[column], start, end, [sid])[0][:, 0]
else:
result = int(sid)
out[start_loc:end_loc + 1, i] = result
results.append(out)
return results
@property
def last_available_dt(self):
"""
Returns
-------
dt : pd.Timestamp
The last session for which the reader can provide data.
"""
return self._bar_reader.last_available_dt
@property
def trading_calendar(self):
"""
Returns the catalyst.utils.calendar.trading_calendar used to read
the data. Can be None (if the writer didn't specify it).
"""
return self._bar_reader.trading_calendar
@property
def first_trading_day(self):
"""
Returns
-------
dt : pd.Timestamp
The first trading day (session) for which the reader can provide
data.
"""
return self._bar_reader.first_trading_day
def get_value(self, continuous_future, dt, field):
"""
Retrieve the value at the given coordinates.
Parameters
----------
sid : int
The asset identifier.
dt : pd.Timestamp
The timestamp for the desired data point.
field : string
The OHLVC name for the desired data point.
Returns
-------
value : float|int
The value at the given coordinates, ``float`` for OHLC, ``int``
for 'volume'.
Raises
------
NoDataOnDate
If the given dt is not a valid market minute (in minute mode) or
session (in daily mode) according to this reader's tradingcalendar.
"""
rf = self._roll_finders[continuous_future.roll_style]
sid = (rf.get_contract_center(continuous_future.root_symbol,
dt,
continuous_future.offset))
return self._bar_reader.get_value(sid, dt, field)
def get_last_traded_dt(self, asset, dt):
"""
Get the latest minute on or before ``dt`` in which ``asset`` traded.
If there are no trades on or before ``dt``, returns ``pd.NaT``.
Parameters
----------
asset : catalyst.asset.Asset
The asset for which to get the last traded minute.
dt : pd.Timestamp
The minute at which to start searching for the last traded minute.
Returns
-------
last_traded : pd.Timestamp
The dt of the last trade for the given asset, using the input
dt as a vantage point.
"""
rf = self._roll_finders[asset.roll_style]
sid = (rf.get_contract_center(asset.root_symbol,
dt,
asset.offset))
if sid is None:
return pd.NaT
contract = rf.asset_finder.retrieve_asset(sid)
return self._bar_reader.get_last_traded_dt(contract, dt)
@property
def sessions(self):
return self._bar_reader.sessions
| apache-2.0 |
NaturalGIS/naturalgis_qgis | python/plugins/processing/gui/BatchAlgorithmDialog.py | 25 | 11235 | # -*- coding: utf-8 -*-
"""
***************************************************************************
BatchAlgorithmDialog.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
from pprint import pformat
import time
from qgis.PyQt.QtWidgets import QPushButton, QDialogButtonBox
from qgis.PyQt.QtCore import Qt, QCoreApplication
from qgis.core import (QgsProcessingOutputHtml,
QgsProcessingOutputNumber,
QgsProcessingOutputString,
QgsProcessingOutputBoolean,
QgsProject,
QgsProcessingMultiStepFeedback,
QgsScopedProxyProgressTask,
QgsProcessingException)
from qgis.gui import QgsProcessingAlgorithmDialogBase
from qgis.utils import OverrideCursor, iface
from processing.gui.BatchPanel import BatchPanel
from processing.gui.AlgorithmExecutor import execute
from processing.gui.Postprocessing import handleAlgorithmResults
from processing.core.ProcessingResults import resultsList
from processing.tools.system import getTempFilename
from processing.tools import dataobjects
import codecs
class BatchFeedback(QgsProcessingMultiStepFeedback):
def __init__(self, steps, feedback):
super().__init__(steps, feedback)
self.errors = []
def reportError(self, error: str, fatalError: bool = False):
self.errors.append(error)
super().reportError(error, fatalError)
class BatchAlgorithmDialog(QgsProcessingAlgorithmDialogBase):
def __init__(self, alg, parent=None):
super().__init__(parent)
self.setAlgorithm(alg)
self.setWindowTitle(self.tr('Batch Processing - {0}').format(self.algorithm().displayName()))
self.setMainWidget(BatchPanel(self, self.algorithm()))
self.hideShortHelp()
self.btnRunSingle = QPushButton(QCoreApplication.translate('BatchAlgorithmDialog', "Run as Single Process…"))
self.btnRunSingle.clicked.connect(self.runAsSingle)
self.buttonBox().addButton(self.btnRunSingle, QDialogButtonBox.ResetRole) # reset role to ensure left alignment
self.updateRunButtonVisibility()
def runAsSingle(self):
self.close()
from processing.gui.AlgorithmDialog import AlgorithmDialog
dlg = AlgorithmDialog(self.algorithm().create(), parent=iface.mainWindow())
dlg.show()
dlg.exec_()
def resetAdditionalGui(self):
self.btnRunSingle.setEnabled(True)
def blockAdditionalControlsWhileRunning(self):
self.btnRunSingle.setEnabled(False)
def runAlgorithm(self):
alg_parameters = []
feedback = self.createFeedback()
load_layers = self.mainWidget().checkLoadLayersOnCompletion.isChecked()
project = QgsProject.instance() if load_layers else None
for row in range(self.mainWidget().batchRowCount()):
parameters, ok = self.mainWidget().parametersForRow(row, destinationProject=project, warnOnInvalid=True)
if ok:
alg_parameters.append(parameters)
if not alg_parameters:
return
task = QgsScopedProxyProgressTask(self.tr('Batch Processing - {0}').format(self.algorithm().displayName()))
multi_feedback = BatchFeedback(len(alg_parameters), feedback)
feedback.progressChanged.connect(task.setProgress)
algorithm_results = []
errors = []
with OverrideCursor(Qt.WaitCursor):
self.blockControlsWhileRunning()
self.setExecutedAnyResult(True)
self.cancelButton().setEnabled(True)
# Make sure the Log tab is visible before executing the algorithm
try:
self.showLog()
self.repaint()
except Exception: # FIXME which one?
pass
start_time = time.time()
for count, parameters in enumerate(alg_parameters):
if feedback.isCanceled():
break
self.setProgressText(
QCoreApplication.translate('BatchAlgorithmDialog', '\nProcessing algorithm {0}/{1}…').format(
count + 1, len(alg_parameters)))
self.setInfo(self.tr('<b>Algorithm {0} starting…</b>').format(self.algorithm().displayName()),
escapeHtml=False)
multi_feedback.setCurrentStep(count)
parameters = self.algorithm().preprocessParameters(parameters)
feedback.pushInfo(self.tr('Input parameters:'))
feedback.pushCommandInfo(pformat(parameters))
feedback.pushInfo('')
# important - we create a new context for each iteration
# this avoids holding onto resources and layers from earlier iterations,
# and allows batch processing of many more items then is possible
# if we hold on to these layers
context = dataobjects.createContext(feedback)
alg_start_time = time.time()
multi_feedback.errors = []
results, ok = self.algorithm().run(parameters, context, multi_feedback)
if ok:
self.setInfo(
QCoreApplication.translate('BatchAlgorithmDialog', 'Algorithm {0} correctly executed…').format(
self.algorithm().displayName()), escapeHtml=False)
feedback.pushInfo(
self.tr('Execution completed in {0:0.2f} seconds'.format(time.time() - alg_start_time)))
feedback.pushInfo(self.tr('Results:'))
feedback.pushCommandInfo(pformat(results))
feedback.pushInfo('')
algorithm_results.append({'parameters': parameters, 'results': results})
handleAlgorithmResults(self.algorithm(), context, multi_feedback, False, parameters)
else:
err = [e for e in multi_feedback.errors]
self.setInfo(
QCoreApplication.translate('BatchAlgorithmDialog', 'Algorithm {0} failed…').format(
self.algorithm().displayName()), escapeHtml=False)
feedback.reportError(
self.tr('Execution failed after {0:0.2f} seconds'.format(time.time() - alg_start_time)),
fatalError=False)
errors.append({'parameters': parameters, 'errors': err})
feedback.pushInfo(self.tr('Batch execution completed in {0:0.2f} seconds'.format(time.time() - start_time)))
if errors:
feedback.reportError(self.tr('{} executions failed. See log for further details.').format(len(errors)), fatalError=True)
task = None
self.finish(algorithm_results, errors)
self.cancelButton().setEnabled(False)
def finish(self, algorithm_results, errors):
for count, results in enumerate(algorithm_results):
self.loadHTMLResults(results['results'], count)
self.createSummaryTable(algorithm_results, errors)
self.resetGui()
def loadHTMLResults(self, results, num):
for out in self.algorithm().outputDefinitions():
if isinstance(out, QgsProcessingOutputHtml) and out.name() in results and results[out.name()]:
resultsList.addResult(icon=self.algorithm().icon(), name='{} [{}]'.format(out.description(), num),
result=results[out.name()])
def createSummaryTable(self, algorithm_results, errors):
createTable = False
for out in self.algorithm().outputDefinitions():
if isinstance(out, (QgsProcessingOutputNumber, QgsProcessingOutputString, QgsProcessingOutputBoolean)):
createTable = True
break
if not createTable and not errors:
return
outputFile = getTempFilename('html')
with codecs.open(outputFile, 'w', encoding='utf-8') as f:
if createTable:
for i, res in enumerate(algorithm_results):
results = res['results']
params = res['parameters']
if i > 0:
f.write('<hr>\n')
f.write(self.tr('<h3>Parameters</h3>\n'))
f.write('<table>\n')
for param in self.algorithm().parameterDefinitions():
if not param.isDestination():
if param.name() in params:
f.write('<tr><th>{}</th><td>{}</td></tr>\n'.format(param.description(),
params[param.name()]))
f.write('</table>\n')
f.write(self.tr('<h3>Results</h3>\n'))
f.write('<table>\n')
for out in self.algorithm().outputDefinitions():
if out.name() in results:
f.write('<tr><th>{}</th><td>{}</td></tr>\n'.format(out.description(), results[out.name()]))
f.write('</table>\n')
if errors:
f.write('<h2 style="color: red">{}</h2>\n'.format(self.tr('Errors')))
for i, res in enumerate(errors):
errors = res['errors']
params = res['parameters']
if i > 0:
f.write('<hr>\n')
f.write(self.tr('<h3>Parameters</h3>\n'))
f.write('<table>\n')
for param in self.algorithm().parameterDefinitions():
if not param.isDestination():
if param.name() in params:
f.write(
'<tr><th>{}</th><td>{}</td></tr>\n'.format(param.description(), params[param.name()]))
f.write('</table>\n')
f.write('<h3>{}</h3>\n'.format(self.tr('Error')))
f.write('<p style="color: red">{}</p>\n'.format('<br>'.join(errors)))
resultsList.addResult(icon=self.algorithm().icon(),
name='{} [summary]'.format(self.algorithm().name()), timestamp=time.localtime(),
result=outputFile)
| gpl-2.0 |
vfasky/BlogCatke | virtualenv.bundle/requests/packages/chardet2/jpcntx.py | 25 | 19197 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
NUM_OF_CATEGORY = 6
DONT_KNOW = -1
ENOUGH_REL_THRESHOLD = 100
MAX_REL_THRESHOLD = 1000
MINIMUM_DATA_THRESHOLD = 4
# This is hiragana 2-char sequence table, the number in each cell represents its frequency category
jp2CharContext = ( \
(0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1),
(2,4,0,4,0,3,0,4,0,3,4,4,4,2,4,3,3,4,3,2,3,3,4,2,3,3,3,2,4,1,4,3,3,1,5,4,3,4,3,4,3,5,3,0,3,5,4,2,0,3,1,0,3,3,0,3,3,0,1,1,0,4,3,0,3,3,0,4,0,2,0,3,5,5,5,5,4,0,4,1,0,3,4),
(0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2),
(0,4,0,5,0,5,0,4,0,4,5,4,4,3,5,3,5,1,5,3,4,3,4,4,3,4,3,3,4,3,5,4,4,3,5,5,3,5,5,5,3,5,5,3,4,5,5,3,1,3,2,0,3,4,0,4,2,0,4,2,1,5,3,2,3,5,0,4,0,2,0,5,4,4,5,4,5,0,4,0,0,4,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,4,0,3,0,3,0,4,5,4,3,3,3,3,4,3,5,4,4,3,5,4,4,3,4,3,4,4,4,4,5,3,4,4,3,4,5,5,4,5,5,1,4,5,4,3,0,3,3,1,3,3,0,4,4,0,3,3,1,5,3,3,3,5,0,4,0,3,0,4,4,3,4,3,3,0,4,1,1,3,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,4,0,3,0,3,0,4,0,3,4,4,3,2,2,1,2,1,3,1,3,3,3,3,3,4,3,1,3,3,5,3,3,0,4,3,0,5,4,3,3,5,4,4,3,4,4,5,0,1,2,0,1,2,0,2,2,0,1,0,0,5,2,2,1,4,0,3,0,1,0,4,4,3,5,4,3,0,2,1,0,4,3),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,5,0,4,0,2,1,4,4,2,4,1,4,2,4,2,4,3,3,3,4,3,3,3,3,1,4,2,3,3,3,1,4,4,1,1,1,4,3,3,2,0,2,4,3,2,0,3,3,0,3,1,1,0,0,0,3,3,0,4,2,2,3,4,0,4,0,3,0,4,4,5,3,4,4,0,3,0,0,1,4),
(1,4,0,4,0,4,0,4,0,3,5,4,4,3,4,3,5,4,3,3,4,3,5,4,4,4,4,3,4,2,4,3,3,1,5,4,3,2,4,5,4,5,5,4,4,5,4,4,0,3,2,2,3,3,0,4,3,1,3,2,1,4,3,3,4,5,0,3,0,2,0,4,5,5,4,5,4,0,4,0,0,5,4),
(0,5,0,5,0,4,0,3,0,4,4,3,4,3,3,3,4,0,4,4,4,3,4,3,4,3,3,1,4,2,4,3,4,0,5,4,1,4,5,4,4,5,3,2,4,3,4,3,2,4,1,3,3,3,2,3,2,0,4,3,3,4,3,3,3,4,0,4,0,3,0,4,5,4,4,4,3,0,4,1,0,1,3),
(0,3,1,4,0,3,0,2,0,3,4,4,3,1,4,2,3,3,4,3,4,3,4,3,4,4,3,2,3,1,5,4,4,1,4,4,3,5,4,4,3,5,5,4,3,4,4,3,1,2,3,1,2,2,0,3,2,0,3,1,0,5,3,3,3,4,3,3,3,3,4,4,4,4,5,4,2,0,3,3,2,4,3),
(0,2,0,3,0,1,0,1,0,0,3,2,0,0,2,0,1,0,2,1,3,3,3,1,2,3,1,0,1,0,4,2,1,1,3,3,0,4,3,3,1,4,3,3,0,3,3,2,0,0,0,0,1,0,0,2,0,0,0,0,0,4,1,0,2,3,2,2,2,1,3,3,3,4,4,3,2,0,3,1,0,3,3),
(0,4,0,4,0,3,0,3,0,4,4,4,3,3,3,3,3,3,4,3,4,2,4,3,4,3,3,2,4,3,4,5,4,1,4,5,3,5,4,5,3,5,4,0,3,5,5,3,1,3,3,2,2,3,0,3,4,1,3,3,2,4,3,3,3,4,0,4,0,3,0,4,5,4,4,5,3,0,4,1,0,3,4),
(0,2,0,3,0,3,0,0,0,2,2,2,1,0,1,0,0,0,3,0,3,0,3,0,1,3,1,0,3,1,3,3,3,1,3,3,3,0,1,3,1,3,4,0,0,3,1,1,0,3,2,0,0,0,0,1,3,0,1,0,0,3,3,2,0,3,0,0,0,0,0,3,4,3,4,3,3,0,3,0,0,2,3),
(2,3,0,3,0,2,0,1,0,3,3,4,3,1,3,1,1,1,3,1,4,3,4,3,3,3,0,0,3,1,5,4,3,1,4,3,2,5,5,4,4,4,4,3,3,4,4,4,0,2,1,1,3,2,0,1,2,0,0,1,0,4,1,3,3,3,0,3,0,1,0,4,4,4,5,5,3,0,2,0,0,4,4),
(0,2,0,1,0,3,1,3,0,2,3,3,3,0,3,1,0,0,3,0,3,2,3,1,3,2,1,1,0,0,4,2,1,0,2,3,1,4,3,2,0,4,4,3,1,3,1,3,0,1,0,0,1,0,0,0,1,0,0,0,0,4,1,1,1,2,0,3,0,0,0,3,4,2,4,3,2,0,1,0,0,3,3),
(0,1,0,4,0,5,0,4,0,2,4,4,2,3,3,2,3,3,5,3,3,3,4,3,4,2,3,0,4,3,3,3,4,1,4,3,2,1,5,5,3,4,5,1,3,5,4,2,0,3,3,0,1,3,0,4,2,0,1,3,1,4,3,3,3,3,0,3,0,1,0,3,4,4,4,5,5,0,3,0,1,4,5),
(0,2,0,3,0,3,0,0,0,2,3,1,3,0,4,0,1,1,3,0,3,4,3,2,3,1,0,3,3,2,3,1,3,0,2,3,0,2,1,4,1,2,2,0,0,3,3,0,0,2,0,0,0,1,0,0,0,0,2,2,0,3,2,1,3,3,0,2,0,2,0,0,3,3,1,2,4,0,3,0,2,2,3),
(2,4,0,5,0,4,0,4,0,2,4,4,4,3,4,3,3,3,1,2,4,3,4,3,4,4,5,0,3,3,3,3,2,0,4,3,1,4,3,4,1,4,4,3,3,4,4,3,1,2,3,0,4,2,0,4,1,0,3,3,0,4,3,3,3,4,0,4,0,2,0,3,5,3,4,5,2,0,3,0,0,4,5),
(0,3,0,4,0,1,0,1,0,1,3,2,2,1,3,0,3,0,2,0,2,0,3,0,2,0,0,0,1,0,1,1,0,0,3,1,0,0,0,4,0,3,1,0,2,1,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,4,2,2,3,1,0,3,0,0,0,1,4,4,4,3,0,0,4,0,0,1,4),
(1,4,1,5,0,3,0,3,0,4,5,4,4,3,5,3,3,4,4,3,4,1,3,3,3,3,2,1,4,1,5,4,3,1,4,4,3,5,4,4,3,5,4,3,3,4,4,4,0,3,3,1,2,3,0,3,1,0,3,3,0,5,4,4,4,4,4,4,3,3,5,4,4,3,3,5,4,0,3,2,0,4,4),
(0,2,0,3,0,1,0,0,0,1,3,3,3,2,4,1,3,0,3,1,3,0,2,2,1,1,0,0,2,0,4,3,1,0,4,3,0,4,4,4,1,4,3,1,1,3,3,1,0,2,0,0,1,3,0,0,0,0,2,0,0,4,3,2,4,3,5,4,3,3,3,4,3,3,4,3,3,0,2,1,0,3,3),
(0,2,0,4,0,3,0,2,0,2,5,5,3,4,4,4,4,1,4,3,3,0,4,3,4,3,1,3,3,2,4,3,0,3,4,3,0,3,4,4,2,4,4,0,4,5,3,3,2,2,1,1,1,2,0,1,5,0,3,3,2,4,3,3,3,4,0,3,0,2,0,4,4,3,5,5,0,0,3,0,2,3,3),
(0,3,0,4,0,3,0,1,0,3,4,3,3,1,3,3,3,0,3,1,3,0,4,3,3,1,1,0,3,0,3,3,0,0,4,4,0,1,5,4,3,3,5,0,3,3,4,3,0,2,0,1,1,1,0,1,3,0,1,2,1,3,3,2,3,3,0,3,0,1,0,1,3,3,4,4,1,0,1,2,2,1,3),
(0,1,0,4,0,4,0,3,0,1,3,3,3,2,3,1,1,0,3,0,3,3,4,3,2,4,2,0,1,0,4,3,2,0,4,3,0,5,3,3,2,4,4,4,3,3,3,4,0,1,3,0,0,1,0,0,1,0,0,0,0,4,2,3,3,3,0,3,0,0,0,4,4,4,5,3,2,0,3,3,0,3,5),
(0,2,0,3,0,0,0,3,0,1,3,0,2,0,0,0,1,0,3,1,1,3,3,0,0,3,0,0,3,0,2,3,1,0,3,1,0,3,3,2,0,4,2,2,0,2,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,2,1,2,0,1,0,1,0,0,0,1,3,1,2,0,0,0,1,0,0,1,4),
(0,3,0,3,0,5,0,1,0,2,4,3,1,3,3,2,1,1,5,2,1,0,5,1,2,0,0,0,3,3,2,2,3,2,4,3,0,0,3,3,1,3,3,0,2,5,3,4,0,3,3,0,1,2,0,2,2,0,3,2,0,2,2,3,3,3,0,2,0,1,0,3,4,4,2,5,4,0,3,0,0,3,5),
(0,3,0,3,0,3,0,1,0,3,3,3,3,0,3,0,2,0,2,1,1,0,2,0,1,0,0,0,2,1,0,0,1,0,3,2,0,0,3,3,1,2,3,1,0,3,3,0,0,1,0,0,0,0,0,2,0,0,0,0,0,2,3,1,2,3,0,3,0,1,0,3,2,1,0,4,3,0,1,1,0,3,3),
(0,4,0,5,0,3,0,3,0,4,5,5,4,3,5,3,4,3,5,3,3,2,5,3,4,4,4,3,4,3,4,5,5,3,4,4,3,4,4,5,4,4,4,3,4,5,5,4,2,3,4,2,3,4,0,3,3,1,4,3,2,4,3,3,5,5,0,3,0,3,0,5,5,5,5,4,4,0,4,0,1,4,4),
(0,4,0,4,0,3,0,3,0,3,5,4,4,2,3,2,5,1,3,2,5,1,4,2,3,2,3,3,4,3,3,3,3,2,5,4,1,3,3,5,3,4,4,0,4,4,3,1,1,3,1,0,2,3,0,2,3,0,3,0,0,4,3,1,3,4,0,3,0,2,0,4,4,4,3,4,5,0,4,0,0,3,4),
(0,3,0,3,0,3,1,2,0,3,4,4,3,3,3,0,2,2,4,3,3,1,3,3,3,1,1,0,3,1,4,3,2,3,4,4,2,4,4,4,3,4,4,3,2,4,4,3,1,3,3,1,3,3,0,4,1,0,2,2,1,4,3,2,3,3,5,4,3,3,5,4,4,3,3,0,4,0,3,2,2,4,4),
(0,2,0,1,0,0,0,0,0,1,2,1,3,0,0,0,0,0,2,0,1,2,1,0,0,1,0,0,0,0,3,0,0,1,0,1,1,3,1,0,0,0,1,1,0,1,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,1,2,2,0,3,4,0,0,0,1,1,0,0,1,0,0,0,0,0,1,1),
(0,1,0,0,0,1,0,0,0,0,4,0,4,1,4,0,3,0,4,0,3,0,4,0,3,0,3,0,4,1,5,1,4,0,0,3,0,5,0,5,2,0,1,0,0,0,2,1,4,0,1,3,0,0,3,0,0,3,1,1,4,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0),
(1,4,0,5,0,3,0,2,0,3,5,4,4,3,4,3,5,3,4,3,3,0,4,3,3,3,3,3,3,2,4,4,3,1,3,4,4,5,4,4,3,4,4,1,3,5,4,3,3,3,1,2,2,3,3,1,3,1,3,3,3,5,3,3,4,5,0,3,0,3,0,3,4,3,4,4,3,0,3,0,2,4,3),
(0,1,0,4,0,0,0,0,0,1,4,0,4,1,4,2,4,0,3,0,1,0,1,0,0,0,0,0,2,0,3,1,1,1,0,3,0,0,0,1,2,1,0,0,1,1,1,1,0,1,0,0,0,1,0,0,3,0,0,0,0,3,2,0,2,2,0,1,0,0,0,2,3,2,3,3,0,0,0,0,2,1,0),
(0,5,1,5,0,3,0,3,0,5,4,4,5,1,5,3,3,0,4,3,4,3,5,3,4,3,3,2,4,3,4,3,3,0,3,3,1,4,4,3,4,4,4,3,4,5,5,3,2,3,1,1,3,3,1,3,1,1,3,3,2,4,5,3,3,5,0,4,0,3,0,4,4,3,5,3,3,0,3,4,0,4,3),
(0,5,0,5,0,3,0,2,0,4,4,3,5,2,4,3,3,3,4,4,4,3,5,3,5,3,3,1,4,0,4,3,3,0,3,3,0,4,4,4,4,5,4,3,3,5,5,3,2,3,1,2,3,2,0,1,0,0,3,2,2,4,4,3,1,5,0,4,0,3,0,4,3,1,3,2,1,0,3,3,0,3,3),
(0,4,0,5,0,5,0,4,0,4,5,5,5,3,4,3,3,2,5,4,4,3,5,3,5,3,4,0,4,3,4,4,3,2,4,4,3,4,5,4,4,5,5,0,3,5,5,4,1,3,3,2,3,3,1,3,1,0,4,3,1,4,4,3,4,5,0,4,0,2,0,4,3,4,4,3,3,0,4,0,0,5,5),
(0,4,0,4,0,5,0,1,1,3,3,4,4,3,4,1,3,0,5,1,3,0,3,1,3,1,1,0,3,0,3,3,4,0,4,3,0,4,4,4,3,4,4,0,3,5,4,1,0,3,0,0,2,3,0,3,1,0,3,1,0,3,2,1,3,5,0,3,0,1,0,3,2,3,3,4,4,0,2,2,0,4,4),
(2,4,0,5,0,4,0,3,0,4,5,5,4,3,5,3,5,3,5,3,5,2,5,3,4,3,3,4,3,4,5,3,2,1,5,4,3,2,3,4,5,3,4,1,2,5,4,3,0,3,3,0,3,2,0,2,3,0,4,1,0,3,4,3,3,5,0,3,0,1,0,4,5,5,5,4,3,0,4,2,0,3,5),
(0,5,0,4,0,4,0,2,0,5,4,3,4,3,4,3,3,3,4,3,4,2,5,3,5,3,4,1,4,3,4,4,4,0,3,5,0,4,4,4,4,5,3,1,3,4,5,3,3,3,3,3,3,3,0,2,2,0,3,3,2,4,3,3,3,5,3,4,1,3,3,5,3,2,0,0,0,0,4,3,1,3,3),
(0,1,0,3,0,3,0,1,0,1,3,3,3,2,3,3,3,0,3,0,0,0,3,1,3,0,0,0,2,2,2,3,0,0,3,2,0,1,2,4,1,3,3,0,0,3,3,3,0,1,0,0,2,1,0,0,3,0,3,1,0,3,0,0,1,3,0,2,0,1,0,3,3,1,3,3,0,0,1,1,0,3,3),
(0,2,0,3,0,2,1,4,0,2,2,3,1,1,3,1,1,0,2,0,3,1,2,3,1,3,0,0,1,0,4,3,2,3,3,3,1,4,2,3,3,3,3,1,0,3,1,4,0,1,1,0,1,2,0,1,1,0,1,1,0,3,1,3,2,2,0,1,0,0,0,2,3,3,3,1,0,0,0,0,0,2,3),
(0,5,0,4,0,5,0,2,0,4,5,5,3,3,4,3,3,1,5,4,4,2,4,4,4,3,4,2,4,3,5,5,4,3,3,4,3,3,5,5,4,5,5,1,3,4,5,3,1,4,3,1,3,3,0,3,3,1,4,3,1,4,5,3,3,5,0,4,0,3,0,5,3,3,1,4,3,0,4,0,1,5,3),
(0,5,0,5,0,4,0,2,0,4,4,3,4,3,3,3,3,3,5,4,4,4,4,4,4,5,3,3,5,2,4,4,4,3,4,4,3,3,4,4,5,5,3,3,4,3,4,3,3,4,3,3,3,3,1,2,2,1,4,3,3,5,4,4,3,4,0,4,0,3,0,4,4,4,4,4,1,0,4,2,0,2,4),
(0,4,0,4,0,3,0,1,0,3,5,2,3,0,3,0,2,1,4,2,3,3,4,1,4,3,3,2,4,1,3,3,3,0,3,3,0,0,3,3,3,5,3,3,3,3,3,2,0,2,0,0,2,0,0,2,0,0,1,0,0,3,1,2,2,3,0,3,0,2,0,4,4,3,3,4,1,0,3,0,0,2,4),
(0,0,0,4,0,0,0,0,0,0,1,0,1,0,2,0,0,0,0,0,1,0,2,0,1,0,0,0,0,0,3,1,3,0,3,2,0,0,0,1,0,3,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,4,0,2,0,0,0,0,0,0,2),
(0,2,1,3,0,2,0,2,0,3,3,3,3,1,3,1,3,3,3,3,3,3,4,2,2,1,2,1,4,0,4,3,1,3,3,3,2,4,3,5,4,3,3,3,3,3,3,3,0,1,3,0,2,0,0,1,0,0,1,0,0,4,2,0,2,3,0,3,3,0,3,3,4,2,3,1,4,0,1,2,0,2,3),
(0,3,0,3,0,1,0,3,0,2,3,3,3,0,3,1,2,0,3,3,2,3,3,2,3,2,3,1,3,0,4,3,2,0,3,3,1,4,3,3,2,3,4,3,1,3,3,1,1,0,1,1,0,1,0,1,0,1,0,0,0,4,1,1,0,3,0,3,1,0,2,3,3,3,3,3,1,0,0,2,0,3,3),
(0,0,0,0,0,0,0,0,0,0,3,0,2,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,3,0,3,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,2,0,2,3,0,0,0,0,0,0,0,0,3),
(0,2,0,3,1,3,0,3,0,2,3,3,3,1,3,1,3,1,3,1,3,3,3,1,3,0,2,3,1,1,4,3,3,2,3,3,1,2,2,4,1,3,3,0,1,4,2,3,0,1,3,0,3,0,0,1,3,0,2,0,0,3,3,2,1,3,0,3,0,2,0,3,4,4,4,3,1,0,3,0,0,3,3),
(0,2,0,1,0,2,0,0,0,1,3,2,2,1,3,0,1,1,3,0,3,2,3,1,2,0,2,0,1,1,3,3,3,0,3,3,1,1,2,3,2,3,3,1,2,3,2,0,0,1,0,0,0,0,0,0,3,0,1,0,0,2,1,2,1,3,0,3,0,0,0,3,4,4,4,3,2,0,2,0,0,2,4),
(0,0,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,3,1,0,0,0,0,0,0,0,3),
(0,3,0,3,0,2,0,3,0,3,3,3,2,3,2,2,2,0,3,1,3,3,3,2,3,3,0,0,3,0,3,2,2,0,2,3,1,4,3,4,3,3,2,3,1,5,4,4,0,3,1,2,1,3,0,3,1,1,2,0,2,3,1,3,1,3,0,3,0,1,0,3,3,4,4,2,1,0,2,1,0,2,4),
(0,1,0,3,0,1,0,2,0,1,4,2,5,1,4,0,2,0,2,1,3,1,4,0,2,1,0,0,2,1,4,1,1,0,3,3,0,5,1,3,2,3,3,1,0,3,2,3,0,1,0,0,0,0,0,0,1,0,0,0,0,4,0,1,0,3,0,2,0,1,0,3,3,3,4,3,3,0,0,0,0,2,3),
(0,0,0,1,0,0,0,0,0,0,2,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,1,0,0,1,0,0,0,0,0,3),
(0,1,0,3,0,4,0,3,0,2,4,3,1,0,3,2,2,1,3,1,2,2,3,1,1,1,2,1,3,0,1,2,0,1,3,2,1,3,0,5,5,1,0,0,1,3,2,1,0,3,0,0,1,0,0,0,0,0,3,4,0,1,1,1,3,2,0,2,0,1,0,2,3,3,1,2,3,0,1,0,1,0,4),
(0,0,0,1,0,3,0,3,0,2,2,1,0,0,4,0,3,0,3,1,3,0,3,0,3,0,1,0,3,0,3,1,3,0,3,3,0,0,1,2,1,1,1,0,1,2,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,2,2,1,2,0,0,2,0,0,0,0,2,3,3,3,3,0,0,0,0,1,4),
(0,0,0,3,0,3,0,0,0,0,3,1,1,0,3,0,1,0,2,0,1,0,0,0,0,0,0,0,1,0,3,0,2,0,2,3,0,0,2,2,3,1,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,2,3),
(2,4,0,5,0,5,0,4,0,3,4,3,3,3,4,3,3,3,4,3,4,4,5,4,5,5,5,2,3,0,5,5,4,1,5,4,3,1,5,4,3,4,4,3,3,4,3,3,0,3,2,0,2,3,0,3,0,0,3,3,0,5,3,2,3,3,0,3,0,3,0,3,4,5,4,5,3,0,4,3,0,3,4),
(0,3,0,3,0,3,0,3,0,3,3,4,3,2,3,2,3,0,4,3,3,3,3,3,3,3,3,0,3,2,4,3,3,1,3,4,3,4,4,4,3,4,4,3,2,4,4,1,0,2,0,0,1,1,0,2,0,0,3,1,0,5,3,2,1,3,0,3,0,1,2,4,3,2,4,3,3,0,3,2,0,4,4),
(0,3,0,3,0,1,0,0,0,1,4,3,3,2,3,1,3,1,4,2,3,2,4,2,3,4,3,0,2,2,3,3,3,0,3,3,3,0,3,4,1,3,3,0,3,4,3,3,0,1,1,0,1,0,0,0,4,0,3,0,0,3,1,2,1,3,0,4,0,1,0,4,3,3,4,3,3,0,2,0,0,3,3),
(0,3,0,4,0,1,0,3,0,3,4,3,3,0,3,3,3,1,3,1,3,3,4,3,3,3,0,0,3,1,5,3,3,1,3,3,2,5,4,3,3,4,5,3,2,5,3,4,0,1,0,0,0,0,0,2,0,0,1,1,0,4,2,2,1,3,0,3,0,2,0,4,4,3,5,3,2,0,1,1,0,3,4),
(0,5,0,4,0,5,0,2,0,4,4,3,3,2,3,3,3,1,4,3,4,1,5,3,4,3,4,0,4,2,4,3,4,1,5,4,0,4,4,4,4,5,4,1,3,5,4,2,1,4,1,1,3,2,0,3,1,0,3,2,1,4,3,3,3,4,0,4,0,3,0,4,4,4,3,3,3,0,4,2,0,3,4),
(1,4,0,4,0,3,0,1,0,3,3,3,1,1,3,3,2,2,3,3,1,0,3,2,2,1,2,0,3,1,2,1,2,0,3,2,0,2,2,3,3,4,3,0,3,3,1,2,0,1,1,3,1,2,0,0,3,0,1,1,0,3,2,2,3,3,0,3,0,0,0,2,3,3,4,3,3,0,1,0,0,1,4),
(0,4,0,4,0,4,0,0,0,3,4,4,3,1,4,2,3,2,3,3,3,1,4,3,4,0,3,0,4,2,3,3,2,2,5,4,2,1,3,4,3,4,3,1,3,3,4,2,0,2,1,0,3,3,0,0,2,0,3,1,0,4,4,3,4,3,0,4,0,1,0,2,4,4,4,4,4,0,3,2,0,3,3),
(0,0,0,1,0,4,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,3,2,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,2),
(0,2,0,3,0,4,0,4,0,1,3,3,3,0,4,0,2,1,2,1,1,1,2,0,3,1,1,0,1,0,3,1,0,0,3,3,2,0,1,1,0,0,0,0,0,1,0,2,0,2,2,0,3,1,0,0,1,0,1,1,0,1,2,0,3,0,0,0,0,1,0,0,3,3,4,3,1,0,1,0,3,0,2),
(0,0,0,3,0,5,0,0,0,0,1,0,2,0,3,1,0,1,3,0,0,0,2,0,0,0,1,0,0,0,1,1,0,0,4,0,0,0,2,3,0,1,4,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,1,0,0,0,0,0,0,0,2,0,0,3,0,0,0,0,0,3),
(0,2,0,5,0,5,0,1,0,2,4,3,3,2,5,1,3,2,3,3,3,0,4,1,2,0,3,0,4,0,2,2,1,1,5,3,0,0,1,4,2,3,2,0,3,3,3,2,0,2,4,1,1,2,0,1,1,0,3,1,0,1,3,1,2,3,0,2,0,0,0,1,3,5,4,4,4,0,3,0,0,1,3),
(0,4,0,5,0,4,0,4,0,4,5,4,3,3,4,3,3,3,4,3,4,4,5,3,4,5,4,2,4,2,3,4,3,1,4,4,1,3,5,4,4,5,5,4,4,5,5,5,2,3,3,1,4,3,1,3,3,0,3,3,1,4,3,4,4,4,0,3,0,4,0,3,3,4,4,5,0,0,4,3,0,4,5),
(0,4,0,4,0,3,0,3,0,3,4,4,4,3,3,2,4,3,4,3,4,3,5,3,4,3,2,1,4,2,4,4,3,1,3,4,2,4,5,5,3,4,5,4,1,5,4,3,0,3,2,2,3,2,1,3,1,0,3,3,3,5,3,3,3,5,4,4,2,3,3,4,3,3,3,2,1,0,3,2,1,4,3),
(0,4,0,5,0,4,0,3,0,3,5,5,3,2,4,3,4,0,5,4,4,1,4,4,4,3,3,3,4,3,5,5,2,3,3,4,1,2,5,5,3,5,5,2,3,5,5,4,0,3,2,0,3,3,1,1,5,1,4,1,0,4,3,2,3,5,0,4,0,3,0,5,4,3,4,3,0,0,4,1,0,4,4),
(1,3,0,4,0,2,0,2,0,2,5,5,3,3,3,3,3,0,4,2,3,4,4,4,3,4,0,0,3,4,5,4,3,3,3,3,2,5,5,4,5,5,5,4,3,5,5,5,1,3,1,0,1,0,0,3,2,0,4,2,0,5,2,3,2,4,1,3,0,3,0,4,5,4,5,4,3,0,4,2,0,5,4),
(0,3,0,4,0,5,0,3,0,3,4,4,3,2,3,2,3,3,3,3,3,2,4,3,3,2,2,0,3,3,3,3,3,1,3,3,3,0,4,4,3,4,4,1,1,4,4,2,0,3,1,0,1,1,0,4,1,0,2,3,1,3,3,1,3,4,0,3,0,1,0,3,1,3,0,0,1,0,2,0,0,4,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,3,0,2,0,3,0,1,5,4,3,3,3,1,4,2,1,2,3,4,4,2,4,4,5,0,3,1,4,3,4,0,4,3,3,3,2,3,2,5,3,4,3,2,2,3,0,0,3,0,2,1,0,1,2,0,0,0,0,2,1,1,3,1,0,2,0,4,0,3,4,4,4,5,2,0,2,0,0,1,3),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,1,0,0,1,1,0,0,0,4,2,1,1,0,1,0,3,2,0,0,3,1,1,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,1,0,0,0,2,0,0,0,1,4,0,4,2,1,0,0,0,0,0,1),
(0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,1,0,1,0,0,0,0,3,1,0,0,0,2,0,2,1,0,0,1,2,1,0,1,1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,1,3,1,0,0,0,0,0,1,0,0,2,1,0,0,0,0,0,0,0,0,2),
(0,4,0,4,0,4,0,3,0,4,4,3,4,2,4,3,2,0,4,4,4,3,5,3,5,3,3,2,4,2,4,3,4,3,1,4,0,2,3,4,4,4,3,3,3,4,4,4,3,4,1,3,4,3,2,1,2,1,3,3,3,4,4,3,3,5,0,4,0,3,0,4,3,3,3,2,1,0,3,0,0,3,3),
(0,4,0,3,0,3,0,3,0,3,5,5,3,3,3,3,4,3,4,3,3,3,4,4,4,3,3,3,3,4,3,5,3,3,1,3,2,4,5,5,5,5,4,3,4,5,5,3,2,2,3,3,3,3,2,3,3,1,2,3,2,4,3,3,3,4,0,4,0,2,0,4,3,2,2,1,2,0,3,0,0,4,1),
)
class JapaneseContextAnalysis:
def __init__(self):
self.reset()
def reset(self):
self._mTotalRel = 0 # total sequence received
self._mRelSample = [0] * NUM_OF_CATEGORY # category counters, each interger counts sequence in its category
self._mNeedToSkipCharNum = 0 # if last byte in current buffer is not the last byte of a character, we need to know how many bytes to skip in next buffer
self._mLastCharOrder = -1 # The order of previous char
self._mDone = False # If this flag is set to True, detection is done and conclusion has been made
def feed(self, aBuf, aLen):
if self._mDone: return
# The buffer we got is byte oriented, and a character may span in more than one
# buffers. In case the last one or two byte in last buffer is not complete, we
# record how many byte needed to complete that character and skip these bytes here.
# We can choose to record those bytes as well and analyse the character once it
# is complete, but since a character will not make much difference, by simply skipping
# this character will simply our logic and improve performance.
i = self._mNeedToSkipCharNum
while i < aLen:
order, charLen = self.get_order(aBuf[i:i+2])
i += charLen
if i > aLen:
self._mNeedToSkipCharNum = i - aLen
self._mLastCharOrder = -1
else:
if (order != -1) and (self._mLastCharOrder != -1):
self._mTotalRel += 1
if self._mTotalRel > MAX_REL_THRESHOLD:
self._mDone = True
break
self._mRelSample[jp2CharContext[self._mLastCharOrder][order]] += 1
self._mLastCharOrder = order
def got_enough_data(self):
return self._mTotalRel > ENOUGH_REL_THRESHOLD
def get_confidence(self):
# This is just one way to calculate confidence. It works well for me.
if self._mTotalRel > MINIMUM_DATA_THRESHOLD:
return (self._mTotalRel - self._mRelSample[0]) / self._mTotalRel
else:
return DONT_KNOW
def get_order(self, aBuf):
return -1, 1
class SJISContextAnalysis(JapaneseContextAnalysis):
def get_order(self, aBuf):
if not aBuf: return -1, 1
# find out current char's byte length
if ((aBuf[0] >= 0x81) and (aBuf[0] <= 0x9F)) or \
((aBuf[0] >= 0xE0) and (aBuf[0] <= 0xFC)):
charLen = 2
else:
charLen = 1
# return its order if it is hiragana
if len(aBuf) > 1:
if (aBuf[0] == 202) and \
(aBuf[1] >= 0x9F) and \
(aBuf[1] <= 0xF1):
return aBuf[1] - 0x9F, charLen
return -1, charLen
class EUCJPContextAnalysis(JapaneseContextAnalysis):
def get_order(self, aBuf):
if not aBuf: return -1, 1
# find out current char's byte length
if (aBuf[0] == 0x8E) or \
((aBuf[0] >= 0xA1) and (aBuf[0] <= 0xFE)):
charLen = 2
elif aBuf[0] == 0x8F:
charLen = 3
else:
charLen = 1
# return its order if it is hiragana
if len(aBuf) > 1:
if (aBuf[0] == 0xA4) and \
(aBuf[1] >= 0xA1) and \
(aBuf[1] <= 0xF3):
return aBuf[1] - 0xA1, charLen
return -1, charLen
| apache-2.0 |
FreeScienceCommunity/or-tools | examples/python/volsay2.py | 34 | 2029 | # Copyright 2011 Hakan Kjellerstrand [email protected]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Volsay problem in Google or-tools.
From the OPL model volsay.mod
Using arrays.
This model was created by Hakan Kjellerstrand ([email protected])
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
from ortools.linear_solver import pywraplp
def main(unused_argv):
# Create the solver.
# using GLPK
solver = pywraplp.Solver('CoinsGridGLPK',
pywraplp.Solver.GLPK_LINEAR_PROGRAMMING)
# Using CLP
# solver = pywraplp.Solver('CoinsGridCLP',
# pywraplp.Solver.CLP_LINEAR_PROGRAMMING)
# data
num_products = 2
Gas = 0
Chloride = 1
products = ['Gas', 'Chloride']
# declare variables
production = [solver.NumVar(0, 100000, 'production[%i]' % i)
for i in range(num_products)]
#
# constraints
#
solver.Add(production[Gas] + production[Chloride] <= 50)
solver.Add(3 * production[Gas] + 4 * production[Chloride] <= 180)
# objective
objective = solver.Maximize(40 * production[Gas] + 50 * production[Chloride])
print 'NumConstraints:', solver.NumConstraints()
#
# solution and search
#
solver.Solve()
print
print 'objective = ', solver.Objective().Value()
for i in range(num_products):
print products[i], '=', production[i].SolutionValue(),
print 'ReducedCost = ', production[i].ReducedCost()
if __name__ == '__main__':
main('Volsay')
| apache-2.0 |
mandeepdhami/nova | nova/tests/functional/v3/test_pci.py | 24 | 7238 | # Copyright 2013 Intel.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import mock
from oslo_serialization import jsonutils
import testtools
from nova import db
from nova import objects
from nova.objects import pci_device_pool
from nova.tests.functional.v3 import api_sample_base
from nova.tests.functional.v3 import test_servers
skip_msg = "Bug 1426241"
fake_db_dev_1 = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': None,
'id': 1,
'compute_node_id': 1,
'address': '0000:04:10.0',
'vendor_id': '8086',
'numa_node': 0,
'product_id': '1520',
'dev_type': 'type-VF',
'status': 'available',
'dev_id': 'pci_0000_04_10_0',
'label': 'label_8086_1520',
'instance_uuid': '69ba1044-0766-4ec0-b60d-09595de034a1',
'request_id': None,
'extra_info': '{"key1": "value1", "key2": "value2"}'
}
fake_db_dev_2 = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': None,
'id': 2,
'compute_node_id': 1,
'address': '0000:04:10.1',
'vendor_id': '8086',
'numa_node': 1,
'product_id': '1520',
'dev_type': 'type-VF',
'status': 'available',
'dev_id': 'pci_0000_04_10_1',
'label': 'label_8086_1520',
'instance_uuid': 'd5b446a6-a1b4-4d01-b4f0-eac37b3a62fc',
'request_id': None,
'extra_info': '{"key3": "value3", "key4": "value4"}'
}
class ExtendedServerPciSampleJsonTest(test_servers.ServersSampleBase):
extension_name = "os-pci"
def setUp(self):
raise testtools.TestCase.skipException(skip_msg)
def test_show(self):
uuid = self._post_server()
response = self._do_get('servers/%s' % uuid)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
self._verify_response('server-get-resp', subs, response, 200)
def test_detail(self):
self._post_server()
response = self._do_get('servers/detail')
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
self._verify_response('servers-detail-resp', subs, response, 200)
class ExtendedHyervisorPciSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
ADMIN_API = True
extra_extensions_to_load = ['os-hypervisors']
extension_name = 'os-pci'
def setUp(self):
raise testtools.TestCase.skipException(skip_msg)
super(ExtendedHyervisorPciSampleJsonTest, self).setUp()
cpu_info = collections.OrderedDict([
('arch', 'x86_64'),
('model', 'Nehalem'),
('vendor', 'Intel'),
('features', ['pge', 'clflush']),
('topology', {
'cores': 1,
'threads': 1,
'sockets': 4,
}),
])
self.fake_compute_node = objects.ComputeNode(
cpu_info=jsonutils.dumps(cpu_info),
current_workload=0,
disk_available_least=0,
host_ip="1.1.1.1",
state="up",
status="enabled",
free_disk_gb=1028,
free_ram_mb=7680,
hypervisor_hostname="fake-mini",
hypervisor_type="fake",
hypervisor_version=1000,
id=1,
local_gb=1028,
local_gb_used=0,
memory_mb=8192,
memory_mb_used=512,
running_vms=0,
vcpus=1,
vcpus_used=0,
service_id=2,
host='043b3cacf6f34c90a7245151fc8ebcda',
pci_device_pools=pci_device_pool.from_pci_stats(
{"count": 5,
"vendor_id": "8086",
"product_id": "1520",
"keya": "valuea",
"key1": "value1",
"numa_node": 1}),)
self.fake_service = objects.Service(
id=2,
host='043b3cacf6f34c90a7245151fc8ebcda',
disabled=False,
disabled_reason=None)
@mock.patch("nova.servicegroup.API.service_is_up", return_value=True)
@mock.patch("nova.objects.Service.get_by_compute_host")
@mock.patch("nova.objects.ComputeNode.get_by_id")
def test_pci_show(self, mock_obj, mock_svc_get, mock_service):
mock_obj.return_value = self.fake_compute_node
mock_svc_get.return_value = self.fake_service
hypervisor_id = 1
response = self._do_get('os-hypervisors/%s' % hypervisor_id)
subs = {
'hypervisor_id': hypervisor_id,
}
subs.update(self._get_regexes())
self._verify_response('hypervisors-pci-show-resp',
subs, response, 200)
@mock.patch("nova.servicegroup.API.service_is_up", return_value=True)
@mock.patch("nova.objects.Service.get_by_compute_host")
@mock.patch("nova.objects.ComputeNodeList.get_all")
def test_pci_detail(self, mock_obj, mock_svc_get, mock_service):
mock_obj.return_value = [self.fake_compute_node]
mock_svc_get.return_value = self.fake_service
hypervisor_id = 1
subs = {
'hypervisor_id': hypervisor_id
}
response = self._do_get('os-hypervisors/detail')
subs.update(self._get_regexes())
self._verify_response('hypervisors-pci-detail-resp',
subs, response, 200)
class PciSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
ADMIN_API = True
extension_name = "os-pci"
def setUp(self):
raise testtools.TestCase.skipException(skip_msg)
def _fake_pci_device_get_by_id(self, context, id):
return fake_db_dev_1
def _fake_pci_device_get_all_by_node(self, context, id):
return [fake_db_dev_1, fake_db_dev_2]
def test_pci_show(self):
self.stubs.Set(db, 'pci_device_get_by_id',
self._fake_pci_device_get_by_id)
response = self._do_get('os-pci/1')
subs = self._get_regexes()
self._verify_response('pci-show-resp', subs, response, 200)
def test_pci_index(self):
self.stubs.Set(db, 'pci_device_get_all_by_node',
self._fake_pci_device_get_all_by_node)
response = self._do_get('os-pci')
subs = self._get_regexes()
self._verify_response('pci-index-resp', subs, response, 200)
def test_pci_detail(self):
self.stubs.Set(db, 'pci_device_get_all_by_node',
self._fake_pci_device_get_all_by_node)
response = self._do_get('os-pci/detail')
subs = self._get_regexes()
self._verify_response('pci-detail-resp', subs, response, 200)
| apache-2.0 |
hbrunn/OCB | openerp/service/model.py | 62 | 8421 | # -*- coding: utf-8 -*-
from functools import wraps
import logging
from psycopg2 import IntegrityError, OperationalError, errorcodes
import random
import threading
import time
import openerp
from openerp.tools.translate import translate
from openerp.osv.orm import except_orm
from contextlib import contextmanager
import security
_logger = logging.getLogger(__name__)
PG_CONCURRENCY_ERRORS_TO_RETRY = (errorcodes.LOCK_NOT_AVAILABLE, errorcodes.SERIALIZATION_FAILURE, errorcodes.DEADLOCK_DETECTED)
MAX_TRIES_ON_CONCURRENCY_FAILURE = 5
def dispatch(method, params):
(db, uid, passwd ) = params[0:3]
# set uid tracker - cleaned up at the WSGI
# dispatching phase in openerp.service.wsgi_server.application
threading.current_thread().uid = uid
params = params[3:]
if method == 'obj_list':
raise NameError("obj_list has been discontinued via RPC as of 6.0, please query ir.model directly!")
if method not in ['execute', 'execute_kw', 'exec_workflow']:
raise NameError("Method not available %s" % method)
security.check(db,uid,passwd)
openerp.modules.registry.RegistryManager.check_registry_signaling(db)
fn = globals()[method]
res = fn(db, uid, *params)
openerp.modules.registry.RegistryManager.signal_caches_change(db)
return res
def check(f):
@wraps(f)
def wrapper(___dbname, *args, **kwargs):
""" Wraps around OSV functions and normalises a few exceptions
"""
dbname = ___dbname # NOTE: this forbid to use "___dbname" as arguments in http routes
def tr(src, ttype):
# We try to do the same as the _(), but without the frame
# inspection, since we aready are wrapping an osv function
# trans_obj = self.get('ir.translation') cannot work yet :(
ctx = {}
if not kwargs:
if args and isinstance(args[-1], dict):
ctx = args[-1]
elif isinstance(kwargs, dict):
if 'context' in kwargs:
ctx = kwargs['context']
elif 'kwargs' in kwargs:
# http entry points such as call_kw()
ctx = kwargs['kwargs'].get('context')
uid = 1
if args and isinstance(args[0], (long, int)):
uid = args[0]
lang = ctx and ctx.get('lang')
if not (lang or hasattr(src, '__call__')):
return src
# We open a *new* cursor here, one reason is that failed SQL
# queries (as in IntegrityError) will invalidate the current one.
cr = False
if hasattr(src, '__call__'):
# callable. We need to find the right parameters to call
# the orm._sql_message(self, cr, uid, ids, context) function,
# or we skip..
# our signature is f(registry, dbname [,uid, obj, method, args])
try:
if args and len(args) > 1:
# TODO self doesn't exist, but was already wrong before (it was not a registry but just the object_service.
obj = self.get(args[1])
if len(args) > 3 and isinstance(args[3], (long, int, list)):
ids = args[3]
else:
ids = []
cr = openerp.sql_db.db_connect(dbname).cursor()
return src(obj, cr, uid, ids, context=(ctx or {}))
except Exception:
pass
finally:
if cr: cr.close()
return False # so that the original SQL error will
# be returned, it is the best we have.
try:
cr = openerp.sql_db.db_connect(dbname).cursor()
res = translate(cr, name=False, source_type=ttype,
lang=lang, source=src)
if res:
return res
else:
return src
finally:
if cr: cr.close()
def _(src):
return tr(src, 'code')
tries = 0
while True:
try:
if openerp.registry(dbname)._init and not openerp.tools.config['test_enable']:
raise openerp.exceptions.Warning('Currently, this database is not fully loaded and can not be used.')
return f(dbname, *args, **kwargs)
except OperationalError, e:
# Automatically retry the typical transaction serialization errors
if e.pgcode not in PG_CONCURRENCY_ERRORS_TO_RETRY:
raise
if tries >= MAX_TRIES_ON_CONCURRENCY_FAILURE:
_logger.warning("%s, maximum number of tries reached" % errorcodes.lookup(e.pgcode))
raise
wait_time = random.uniform(0.0, 2 ** tries)
tries += 1
_logger.info("%s, retry %d/%d in %.04f sec..." % (errorcodes.lookup(e.pgcode), tries, MAX_TRIES_ON_CONCURRENCY_FAILURE, wait_time))
time.sleep(wait_time)
except IntegrityError, inst:
registry = openerp.registry(dbname)
for key in registry._sql_error.keys():
if key in inst[0]:
raise openerp.osv.orm.except_orm(_('Constraint Error'), tr(registry._sql_error[key], 'sql_constraint') or inst[0])
if inst.pgcode in (errorcodes.NOT_NULL_VIOLATION, errorcodes.FOREIGN_KEY_VIOLATION, errorcodes.RESTRICT_VIOLATION):
msg = _('The operation cannot be completed, probably due to the following:\n- deletion: you may be trying to delete a record while other records still reference it\n- creation/update: a mandatory field is not correctly set')
_logger.debug("IntegrityError", exc_info=True)
try:
errortxt = inst.pgerror.replace('«','"').replace('»','"')
if '"public".' in errortxt:
context = errortxt.split('"public".')[1]
model_name = table = context.split('"')[1]
else:
last_quote_end = errortxt.rfind('"')
last_quote_begin = errortxt.rfind('"', 0, last_quote_end)
model_name = table = errortxt[last_quote_begin+1:last_quote_end].strip()
model = table.replace("_",".")
if model in registry:
model_obj = registry[model]
model_name = model_obj._description or model_obj._name
msg += _('\n\n[object with reference: %s - %s]') % (model_name, model)
except Exception:
pass
raise openerp.osv.orm.except_orm(_('Integrity Error'), msg)
else:
raise openerp.osv.orm.except_orm(_('Integrity Error'), inst[0])
return wrapper
def execute_cr(cr, uid, obj, method, *args, **kw):
object = openerp.registry(cr.dbname).get(obj)
if object is None:
raise except_orm('Object Error', "Object %s doesn't exist" % obj)
return getattr(object, method)(cr, uid, *args, **kw)
def execute_kw(db, uid, obj, method, args, kw=None):
return execute(db, uid, obj, method, *args, **kw or {})
@check
def execute(db, uid, obj, method, *args, **kw):
threading.currentThread().dbname = db
with openerp.registry(db).cursor() as cr:
if method.startswith('_'):
raise except_orm('Access Denied', 'Private methods (such as %s) cannot be called remotely.' % (method,))
res = execute_cr(cr, uid, obj, method, *args, **kw)
if res is None:
_logger.warning('The method %s of the object %s can not return `None` !', method, obj)
return res
def exec_workflow_cr(cr, uid, obj, signal, *args):
res_id = args[0]
return execute_cr(cr, uid, obj, 'signal_workflow', [res_id], signal)[res_id]
@check
def exec_workflow(db, uid, obj, signal, *args):
with openerp.registry(db).cursor() as cr:
return exec_workflow_cr(cr, uid, obj, signal, *args)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ocaisa/easybuild-framework | easybuild/toolchains/iompi.py | 5 | 1519 | ##
# Copyright 2012-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for iompi compiler toolchain (includes Intel compilers (icc, ifort) and OpenMPI.
@author: Stijn De Weirdt (Ghent University)
@author: Kenneth Hoste (Ghent University)
"""
from easybuild.toolchains.compiler.inteliccifort import IntelIccIfort
from easybuild.toolchains.mpi.openmpi import OpenMPI
class Iompi(IntelIccIfort, OpenMPI):
"""
Compiler toolchain with Intel compilers (icc/ifort) and OpenMPI.
"""
NAME = 'iompi'
| gpl-2.0 |
shadyueh/pyranking | env/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.py | 252 | 106466 | """
Package resource API
--------------------
A resource is a logical file contained within a package, or a logical
subdirectory thereof. The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is. Do not use os.path operations to manipulate resource
names being passed into the API.
The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files. It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
"""
from __future__ import absolute_import
import sys
import os
import io
import time
import re
import types
import zipfile
import zipimport
import warnings
import stat
import functools
import pkgutil
import token
import symbol
import operator
import platform
import collections
import plistlib
import email.parser
import tempfile
import textwrap
from pkgutil import get_importer
try:
import _imp
except ImportError:
# Python 3.2 compatibility
import imp as _imp
PY3 = sys.version_info > (3,)
PY2 = not PY3
if PY3:
from urllib.parse import urlparse, urlunparse
if PY2:
from urlparse import urlparse, urlunparse
if PY3:
string_types = str,
else:
string_types = str, eval('unicode')
iteritems = (lambda i: i.items()) if PY3 else lambda i: i.iteritems()
# capture these to bypass sandboxing
from os import utime
try:
from os import mkdir, rename, unlink
WRITE_SUPPORT = True
except ImportError:
# no write support, probably under GAE
WRITE_SUPPORT = False
from os import open as os_open
from os.path import isdir, split
# Avoid try/except due to potential problems with delayed import mechanisms.
if sys.version_info >= (3, 3) and sys.implementation.name == "cpython":
import importlib.machinery as importlib_machinery
else:
importlib_machinery = None
try:
import parser
except ImportError:
pass
import pip._vendor.packaging.version
import pip._vendor.packaging.specifiers
packaging = pip._vendor.packaging
# declare some globals that will be defined later to
# satisfy the linters.
require = None
working_set = None
class PEP440Warning(RuntimeWarning):
"""
Used when there is an issue with a version or specifier not complying with
PEP 440.
"""
class _SetuptoolsVersionMixin(object):
def __hash__(self):
return super(_SetuptoolsVersionMixin, self).__hash__()
def __lt__(self, other):
if isinstance(other, tuple):
return tuple(self) < other
else:
return super(_SetuptoolsVersionMixin, self).__lt__(other)
def __le__(self, other):
if isinstance(other, tuple):
return tuple(self) <= other
else:
return super(_SetuptoolsVersionMixin, self).__le__(other)
def __eq__(self, other):
if isinstance(other, tuple):
return tuple(self) == other
else:
return super(_SetuptoolsVersionMixin, self).__eq__(other)
def __ge__(self, other):
if isinstance(other, tuple):
return tuple(self) >= other
else:
return super(_SetuptoolsVersionMixin, self).__ge__(other)
def __gt__(self, other):
if isinstance(other, tuple):
return tuple(self) > other
else:
return super(_SetuptoolsVersionMixin, self).__gt__(other)
def __ne__(self, other):
if isinstance(other, tuple):
return tuple(self) != other
else:
return super(_SetuptoolsVersionMixin, self).__ne__(other)
def __getitem__(self, key):
return tuple(self)[key]
def __iter__(self):
component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE)
replace = {
'pre': 'c',
'preview': 'c',
'-': 'final-',
'rc': 'c',
'dev': '@',
}.get
def _parse_version_parts(s):
for part in component_re.split(s):
part = replace(part, part)
if not part or part == '.':
continue
if part[:1] in '0123456789':
# pad for numeric comparison
yield part.zfill(8)
else:
yield '*'+part
# ensure that alpha/beta/candidate are before final
yield '*final'
def old_parse_version(s):
parts = []
for part in _parse_version_parts(s.lower()):
if part.startswith('*'):
# remove '-' before a prerelease tag
if part < '*final':
while parts and parts[-1] == '*final-':
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == '00000000':
parts.pop()
parts.append(part)
return tuple(parts)
# Warn for use of this function
warnings.warn(
"You have iterated over the result of "
"pkg_resources.parse_version. This is a legacy behavior which is "
"inconsistent with the new version class introduced in setuptools "
"8.0. In most cases, conversion to a tuple is unnecessary. For "
"comparison of versions, sort the Version instances directly. If "
"you have another use case requiring the tuple, please file a "
"bug with the setuptools project describing that need.",
RuntimeWarning,
stacklevel=1,
)
for part in old_parse_version(str(self)):
yield part
class SetuptoolsVersion(_SetuptoolsVersionMixin, packaging.version.Version):
pass
class SetuptoolsLegacyVersion(_SetuptoolsVersionMixin,
packaging.version.LegacyVersion):
pass
def parse_version(v):
try:
return SetuptoolsVersion(v)
except packaging.version.InvalidVersion:
return SetuptoolsLegacyVersion(v)
_state_vars = {}
def _declare_state(vartype, **kw):
globals().update(kw)
_state_vars.update(dict.fromkeys(kw, vartype))
def __getstate__():
state = {}
g = globals()
for k, v in _state_vars.items():
state[k] = g['_sget_'+v](g[k])
return state
def __setstate__(state):
g = globals()
for k, v in state.items():
g['_sset_'+_state_vars[k]](k, g[k], v)
return state
def _sget_dict(val):
return val.copy()
def _sset_dict(key, ob, state):
ob.clear()
ob.update(state)
def _sget_object(val):
return val.__getstate__()
def _sset_object(key, ob, state):
ob.__setstate__(state)
_sget_none = _sset_none = lambda *args: None
def get_supported_platform():
"""Return this platform's maximum compatible version.
distutils.util.get_platform() normally reports the minimum version
of Mac OS X that would be required to *use* extensions produced by
distutils. But what we want when checking compatibility is to know the
version of Mac OS X that we are *running*. To allow usage of packages that
explicitly require a newer version of Mac OS X, we must also know the
current version of the OS.
If this condition occurs for any other platform with a version in its
platform strings, this function should be extended accordingly.
"""
plat = get_build_platform()
m = macosVersionString.match(plat)
if m is not None and sys.platform == "darwin":
try:
plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
except ValueError:
# not Mac OS X
pass
return plat
__all__ = [
# Basic resource access and distribution/entry point discovery
'require', 'run_script', 'get_provider', 'get_distribution',
'load_entry_point', 'get_entry_map', 'get_entry_info',
'iter_entry_points',
'resource_string', 'resource_stream', 'resource_filename',
'resource_listdir', 'resource_exists', 'resource_isdir',
# Environmental control
'declare_namespace', 'working_set', 'add_activation_listener',
'find_distributions', 'set_extraction_path', 'cleanup_resources',
'get_default_cache',
# Primary implementation classes
'Environment', 'WorkingSet', 'ResourceManager',
'Distribution', 'Requirement', 'EntryPoint',
# Exceptions
'ResolutionError', 'VersionConflict', 'DistributionNotFound',
'UnknownExtra', 'ExtractionError',
# Warnings
'PEP440Warning',
# Parsing functions and string utilities
'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
# filesystem utilities
'ensure_directory', 'normalize_path',
# Distribution "precedence" constants
'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
# "Provider" interfaces, implementations, and registration/lookup APIs
'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
'register_finder', 'register_namespace_handler', 'register_loader_type',
'fixup_namespace_packages', 'get_importer',
# Deprecated/backward compatibility only
'run_main', 'AvailableDistributions',
]
class ResolutionError(Exception):
"""Abstract base for dependency resolution errors"""
def __repr__(self):
return self.__class__.__name__+repr(self.args)
class VersionConflict(ResolutionError):
"""
An already-installed version conflicts with the requested version.
Should be initialized with the installed Distribution and the requested
Requirement.
"""
_template = "{self.dist} is installed but {self.req} is required"
@property
def dist(self):
return self.args[0]
@property
def req(self):
return self.args[1]
def report(self):
return self._template.format(**locals())
def with_context(self, required_by):
"""
If required_by is non-empty, return a version of self that is a
ContextualVersionConflict.
"""
if not required_by:
return self
args = self.args + (required_by,)
return ContextualVersionConflict(*args)
class ContextualVersionConflict(VersionConflict):
"""
A VersionConflict that accepts a third parameter, the set of the
requirements that required the installed Distribution.
"""
_template = VersionConflict._template + ' by {self.required_by}'
@property
def required_by(self):
return self.args[2]
class DistributionNotFound(ResolutionError):
"""A requested distribution was not found"""
_template = ("The '{self.req}' distribution was not found "
"and is required by {self.requirers_str}")
@property
def req(self):
return self.args[0]
@property
def requirers(self):
return self.args[1]
@property
def requirers_str(self):
if not self.requirers:
return 'the application'
return ', '.join(self.requirers)
def report(self):
return self._template.format(**locals())
def __str__(self):
return self.report()
class UnknownExtra(ResolutionError):
"""Distribution doesn't have an "extra feature" of the given name"""
_provider_factories = {}
PY_MAJOR = sys.version[:3]
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(loader_type, provider_factory):
"""Register `provider_factory` to make providers for `loader_type`
`loader_type` is the type or class of a PEP 302 ``module.__loader__``,
and `provider_factory` is a function that, passed a *module* object,
returns an ``IResourceProvider`` for that module.
"""
_provider_factories[loader_type] = provider_factory
def get_provider(moduleOrReq):
"""Return an IResourceProvider for the named module or requirement"""
if isinstance(moduleOrReq, Requirement):
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
try:
module = sys.modules[moduleOrReq]
except KeyError:
__import__(moduleOrReq)
module = sys.modules[moduleOrReq]
loader = getattr(module, '__loader__', None)
return _find_adapter(_provider_factories, loader)(module)
def _macosx_vers(_cache=[]):
if not _cache:
version = platform.mac_ver()[0]
# fallback for MacPorts
if version == '':
plist = '/System/Library/CoreServices/SystemVersion.plist'
if os.path.exists(plist):
if hasattr(plistlib, 'readPlist'):
plist_content = plistlib.readPlist(plist)
if 'ProductVersion' in plist_content:
version = plist_content['ProductVersion']
_cache.append(version.split('.'))
return _cache[0]
def _macosx_arch(machine):
return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
def get_build_platform():
"""Return this platform's string for platform-specific distributions
XXX Currently this is the same as ``distutils.util.get_platform()``, but it
needs some hacks for Linux and Mac OS X.
"""
try:
# Python 2.7 or >=3.2
from sysconfig import get_platform
except ImportError:
from distutils.util import get_platform
plat = get_platform()
if sys.platform == "darwin" and not plat.startswith('macosx-'):
try:
version = _macosx_vers()
machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]),
_macosx_arch(machine))
except ValueError:
# if someone is running a non-Mac darwin system, this will fall
# through to the default implementation
pass
return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
# XXX backward compat
get_platform = get_build_platform
def compatible_platforms(provided, required):
"""Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
"""
if provided is None or required is None or provided==required:
# easy case
return True
# Mac OS X special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macosx designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if dversion == 7 and macosversion >= "10.3" or \
dversion == 8 and macosversion >= "10.4":
return True
# egg isn't macosx or legacy darwin
return False
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or \
provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
def run_script(dist_spec, script_name):
"""Locate distribution `dist_spec` and run its `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
require(dist_spec)[0].run_script(script_name, ns)
# backward compatibility
run_main = run_script
def get_distribution(dist):
"""Return a current distribution object for a Requirement or string"""
if isinstance(dist, string_types):
dist = Requirement.parse(dist)
if isinstance(dist, Requirement):
dist = get_provider(dist)
if not isinstance(dist, Distribution):
raise TypeError("Expected string, Requirement, or Distribution", dist)
return dist
def load_entry_point(dist, group, name):
"""Return `name` entry point of `group` for `dist` or raise ImportError"""
return get_distribution(dist).load_entry_point(group, name)
def get_entry_map(dist, group=None):
"""Return the entry point map for `group`, or the full entry map"""
return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return get_distribution(dist).get_entry_info(group, name)
class IMetadataProvider:
def has_metadata(name):
"""Does the package's distribution contain the named metadata?"""
def get_metadata(name):
"""The named metadata resource as a string"""
def get_metadata_lines(name):
"""Yield named metadata resource as list of non-blank non-comment lines
Leading and trailing whitespace is stripped from each line, and lines
with ``#`` as the first non-blank character are omitted."""
def metadata_isdir(name):
"""Is the named metadata a directory? (like ``os.path.isdir()``)"""
def metadata_listdir(name):
"""List of metadata names in the directory (like ``os.listdir()``)"""
def run_script(script_name, namespace):
"""Execute the named script in the supplied namespace dictionary"""
class IResourceProvider(IMetadataProvider):
"""An object that provides access to package resources"""
def get_resource_filename(manager, resource_name):
"""Return a true filesystem path for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_stream(manager, resource_name):
"""Return a readable file-like object for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_string(manager, resource_name):
"""Return a string containing the contents of `resource_name`
`manager` must be an ``IResourceManager``"""
def has_resource(resource_name):
"""Does the package contain the named resource?"""
def resource_isdir(resource_name):
"""Is the named resource a directory? (like ``os.path.isdir()``)"""
def resource_listdir(resource_name):
"""List of resource names in the directory (like ``os.listdir()``)"""
class WorkingSet(object):
"""A collection of active distributions on sys.path (or a similar list)"""
def __init__(self, entries=None):
"""Create working set from list of path entries (default=sys.path)"""
self.entries = []
self.entry_keys = {}
self.by_key = {}
self.callbacks = []
if entries is None:
entries = sys.path
for entry in entries:
self.add_entry(entry)
@classmethod
def _build_master(cls):
"""
Prepare the master working set.
"""
ws = cls()
try:
from __main__ import __requires__
except ImportError:
# The main program does not list any requirements
return ws
# ensure the requirements are met
try:
ws.require(__requires__)
except VersionConflict:
return cls._build_from_requirements(__requires__)
return ws
@classmethod
def _build_from_requirements(cls, req_spec):
"""
Build a working set from a requirement spec. Rewrites sys.path.
"""
# try it without defaults already on sys.path
# by starting with an empty path
ws = cls([])
reqs = parse_requirements(req_spec)
dists = ws.resolve(reqs, Environment())
for dist in dists:
ws.add(dist)
# add any missing entries from sys.path
for entry in sys.path:
if entry not in ws.entries:
ws.add_entry(entry)
# then copy back to sys.path
sys.path[:] = ws.entries
return ws
def add_entry(self, entry):
"""Add a path item to ``.entries``, finding any distributions on it
``find_distributions(entry, True)`` is used to find distributions
corresponding to the path entry, and they are added. `entry` is
always appended to ``.entries``, even if it is already present.
(This is because ``sys.path`` can contain the same value more than
once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
equal ``sys.path``.)
"""
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in find_distributions(entry, True):
self.add(dist, entry, False)
def __contains__(self, dist):
"""True if `dist` is the active distribution for its project"""
return self.by_key.get(dist.key) == dist
def find(self, req):
"""Find a distribution matching requirement `req`
If there is an active distribution for the requested project, this
returns it as long as it meets the version requirement specified by
`req`. But, if there is an active distribution for the project and it
does *not* meet the `req` requirement, ``VersionConflict`` is raised.
If there is no active distribution for the requested project, ``None``
is returned.
"""
dist = self.by_key.get(req.key)
if dist is not None and dist not in req:
# XXX add more info
raise VersionConflict(dist, req)
return dist
def iter_entry_points(self, group, name=None):
"""Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order).
"""
for dist in self:
entries = dist.get_entry_map(group)
if name is None:
for ep in entries.values():
yield ep
elif name in entries:
yield entries[name]
def run_script(self, requires, script_name):
"""Locate distribution for `requires` and run `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
self.require(requires)[0].run_script(script_name, ns)
def __iter__(self):
"""Yield distributions for non-duplicate projects in the working set
The yield order is the order in which the items' path entries were
added to the working set.
"""
seen = {}
for item in self.entries:
if item not in self.entry_keys:
# workaround a cache issue
continue
for key in self.entry_keys[item]:
if key not in seen:
seen[key]=1
yield self.by_key[key]
def add(self, dist, entry=None, insert=True, replace=False):
"""Add `dist` to working set, associated with `entry`
If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
On exit from this routine, `entry` is added to the end of the working
set's ``.entries`` (if it wasn't already present).
`dist` is only added to the working set if it's for a project that
doesn't already have a distribution in the set, unless `replace=True`.
If it's added, any callbacks registered with the ``subscribe()`` method
will be called.
"""
if insert:
dist.insert_on(self.entries, entry)
if entry is None:
entry = dist.location
keys = self.entry_keys.setdefault(entry,[])
keys2 = self.entry_keys.setdefault(dist.location,[])
if not replace and dist.key in self.by_key:
# ignore hidden distros
return
self.by_key[dist.key] = dist
if dist.key not in keys:
keys.append(dist.key)
if dist.key not in keys2:
keys2.append(dist.key)
self._added_new(dist)
def resolve(self, requirements, env=None, installer=None,
replace_conflicting=False):
"""List all distributions needed to (recursively) meet `requirements`
`requirements` must be a sequence of ``Requirement`` objects. `env`,
if supplied, should be an ``Environment`` instance. If
not supplied, it defaults to all distributions available within any
entry or distribution in the working set. `installer`, if supplied,
will be invoked with each requirement that cannot be met by an
already-installed distribution; it should return a ``Distribution`` or
``None``.
Unless `replace_conflicting=True`, raises a VersionConflict exception if
any requirements are found on the path that have the correct name but
the wrong version. Otherwise, if an `installer` is supplied it will be
invoked to obtain the correct version of the requirement and activate
it.
"""
# set up the stack
requirements = list(requirements)[::-1]
# set of processed requirements
processed = {}
# key -> dist
best = {}
to_activate = []
# Mapping of requirement to set of distributions that required it;
# useful for reporting info about conflicts.
required_by = collections.defaultdict(set)
while requirements:
# process dependencies breadth-first
req = requirements.pop(0)
if req in processed:
# Ignore cyclic or redundant dependencies
continue
dist = best.get(req.key)
if dist is None:
# Find the best distribution and add it to the map
dist = self.by_key.get(req.key)
if dist is None or (dist not in req and replace_conflicting):
ws = self
if env is None:
if dist is None:
env = Environment(self.entries)
else:
# Use an empty environment and workingset to avoid
# any further conflicts with the conflicting
# distribution
env = Environment([])
ws = WorkingSet([])
dist = best[req.key] = env.best_match(req, ws, installer)
if dist is None:
requirers = required_by.get(req, None)
raise DistributionNotFound(req, requirers)
to_activate.append(dist)
if dist not in req:
# Oops, the "best" so far conflicts with a dependency
dependent_req = required_by[req]
raise VersionConflict(dist, req).with_context(dependent_req)
# push the new requirements onto the stack
new_requirements = dist.requires(req.extras)[::-1]
requirements.extend(new_requirements)
# Register the new requirements needed by req
for new_requirement in new_requirements:
required_by[new_requirement].add(req.project_name)
processed[req] = True
# return list of distros to activate
return to_activate
def find_plugins(self, plugin_env, full_env=None, installer=None,
fallback=True):
"""Find all activatable distributions in `plugin_env`
Example usage::
distributions, errors = working_set.find_plugins(
Environment(plugin_dirlist)
)
# add plugins+libs to sys.path
map(working_set.add, distributions)
# display errors
print('Could not load', errors)
The `plugin_env` should be an ``Environment`` instance that contains
only distributions that are in the project's "plugin directory" or
directories. The `full_env`, if supplied, should be an ``Environment``
contains all currently-available distributions. If `full_env` is not
supplied, one is created automatically from the ``WorkingSet`` this
method is called on, which will typically mean that every directory on
``sys.path`` will be scanned for distributions.
`installer` is a standard installer callback as used by the
``resolve()`` method. The `fallback` flag indicates whether we should
attempt to resolve older versions of a plugin if the newest version
cannot be resolved.
This method returns a 2-tuple: (`distributions`, `error_info`), where
`distributions` is a list of the distributions found in `plugin_env`
that were loadable, along with any other distributions that are needed
to resolve their dependencies. `error_info` is a dictionary mapping
unloadable plugin distributions to an exception instance describing the
error that occurred. Usually this will be a ``DistributionNotFound`` or
``VersionConflict`` instance.
"""
plugin_projects = list(plugin_env)
# scan project names in alphabetic order
plugin_projects.sort()
error_info = {}
distributions = {}
if full_env is None:
env = Environment(self.entries)
env += plugin_env
else:
env = full_env + plugin_env
shadow_set = self.__class__([])
# put all our entries in shadow_set
list(map(shadow_set.add, self))
for project_name in plugin_projects:
for dist in plugin_env[project_name]:
req = [dist.as_requirement()]
try:
resolvees = shadow_set.resolve(req, env, installer)
except ResolutionError as v:
# save error info
error_info[dist] = v
if fallback:
# try the next older version of project
continue
else:
# give up on this project, keep going
break
else:
list(map(shadow_set.add, resolvees))
distributions.update(dict.fromkeys(resolvees))
# success, no need to try any more versions of this project
break
distributions = list(distributions)
distributions.sort()
return distributions, error_info
def require(self, *requirements):
"""Ensure that distributions matching `requirements` are activated
`requirements` must be a string or a (possibly-nested) sequence
thereof, specifying the distributions and versions required. The
return value is a sequence of the distributions that needed to be
activated to fulfill the requirements; all relevant distributions are
included, even if they were already activated in this working set.
"""
needed = self.resolve(parse_requirements(requirements))
for dist in needed:
self.add(dist)
return needed
def subscribe(self, callback):
"""Invoke `callback` for all distributions (including existing ones)"""
if callback in self.callbacks:
return
self.callbacks.append(callback)
for dist in self:
callback(dist)
def _added_new(self, dist):
for callback in self.callbacks:
callback(dist)
def __getstate__(self):
return (
self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
self.callbacks[:]
)
def __setstate__(self, e_k_b_c):
entries, keys, by_key, callbacks = e_k_b_c
self.entries = entries[:]
self.entry_keys = keys.copy()
self.by_key = by_key.copy()
self.callbacks = callbacks[:]
class Environment(object):
"""Searchable snapshot of distributions on a search path"""
def __init__(self, search_path=None, platform=get_supported_platform(),
python=PY_MAJOR):
"""Snapshot distributions available on a search path
Any distributions found on `search_path` are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used.
`platform` is an optional string specifying the name of the platform
that platform-specific distributions must be compatible with. If
unspecified, it defaults to the current platform. `python` is an
optional string naming the desired version of Python (e.g. ``'3.3'``);
it defaults to the current version.
You may explicitly set `platform` (and/or `python`) to ``None`` if you
wish to map *all* distributions, not just those compatible with the
running platform or Python version.
"""
self._distmap = {}
self.platform = platform
self.python = python
self.scan(search_path)
def can_add(self, dist):
"""Is distribution `dist` acceptable for this environment?
The distribution must match the platform and python version
requirements specified when this environment was created, or False
is returned.
"""
return (self.python is None or dist.py_version is None
or dist.py_version==self.python) \
and compatible_platforms(dist.platform, self.platform)
def remove(self, dist):
"""Remove `dist` from the environment"""
self._distmap[dist.key].remove(dist)
def scan(self, search_path=None):
"""Scan `search_path` for distributions usable in this environment
Any distributions found are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used. Only distributions conforming to
the platform/python version defined at initialization are added.
"""
if search_path is None:
search_path = sys.path
for item in search_path:
for dist in find_distributions(item):
self.add(dist)
def __getitem__(self, project_name):
"""Return a newest-to-oldest list of distributions for `project_name`
Uses case-insensitive `project_name` comparison, assuming all the
project's distributions use their project's name converted to all
lowercase as their key.
"""
distribution_key = project_name.lower()
return self._distmap.get(distribution_key, [])
def add(self, dist):
"""Add `dist` if we ``can_add()`` it and it has not already been added
"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key, [])
if dist not in dists:
dists.append(dist)
dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
def best_match(self, req, working_set, installer=None):
"""Find distribution best matching `req` and usable on `working_set`
This calls the ``find(req)`` method of the `working_set` to see if a
suitable distribution is already active. (This may raise
``VersionConflict`` if an unsuitable version of the project is already
active in the specified `working_set`.) If a suitable distribution
isn't active, this method returns the newest distribution in the
environment that meets the ``Requirement`` in `req`. If no suitable
distribution is found, and `installer` is supplied, then the result of
calling the environment's ``obtain(req, installer)`` method will be
returned.
"""
dist = working_set.find(req)
if dist is not None:
return dist
for dist in self[req.key]:
if dist in req:
return dist
# try to download/install
return self.obtain(req, installer)
def obtain(self, requirement, installer=None):
"""Obtain a distribution matching `requirement` (e.g. via download)
Obtain a distro that matches requirement (e.g. via download). In the
base ``Environment`` class, this routine just returns
``installer(requirement)``, unless `installer` is None, in which case
None is returned instead. This method is a hook that allows subclasses
to attempt other ways of obtaining a distribution before falling back
to the `installer` argument."""
if installer is not None:
return installer(requirement)
def __iter__(self):
"""Yield the unique project names of the available distributions"""
for key in self._distmap.keys():
if self[key]:
yield key
def __iadd__(self, other):
"""In-place addition of a distribution or environment"""
if isinstance(other, Distribution):
self.add(other)
elif isinstance(other, Environment):
for project in other:
for dist in other[project]:
self.add(dist)
else:
raise TypeError("Can't add %r to environment" % (other,))
return self
def __add__(self, other):
"""Add an environment or distribution to an environment"""
new = self.__class__([], platform=None, python=None)
for env in self, other:
new += env
return new
# XXX backward compatibility
AvailableDistributions = Environment
class ExtractionError(RuntimeError):
"""An error occurred extracting a resource
The following attributes are available from instances of this exception:
manager
The resource manager that raised this exception
cache_path
The base directory for resource extraction
original_error
The exception instance that caused extraction to fail
"""
class ResourceManager:
"""Manage resource extraction and packages"""
extraction_path = None
def __init__(self):
self.cached_files = {}
def resource_exists(self, package_or_requirement, resource_name):
"""Does the named resource exist?"""
return get_provider(package_or_requirement).has_resource(resource_name)
def resource_isdir(self, package_or_requirement, resource_name):
"""Is the named resource an existing directory?"""
return get_provider(package_or_requirement).resource_isdir(
resource_name
)
def resource_filename(self, package_or_requirement, resource_name):
"""Return a true filesystem path for specified resource"""
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
)
def resource_stream(self, package_or_requirement, resource_name):
"""Return a readable file-like object for specified resource"""
return get_provider(package_or_requirement).get_resource_stream(
self, resource_name
)
def resource_string(self, package_or_requirement, resource_name):
"""Return specified resource as a string"""
return get_provider(package_or_requirement).get_resource_string(
self, resource_name
)
def resource_listdir(self, package_or_requirement, resource_name):
"""List the contents of the named resource directory"""
return get_provider(package_or_requirement).resource_listdir(
resource_name
)
def extraction_error(self):
"""Give an error message for problems extracting file(s)"""
old_exc = sys.exc_info()[1]
cache_path = self.extraction_path or get_default_cache()
err = ExtractionError("""Can't extract file(s) to egg cache
The following error occurred while trying to extract file(s) to the Python egg
cache:
%s
The Python egg cache directory is currently set to:
%s
Perhaps your account does not have write access to this directory? You can
change the cache directory by setting the PYTHON_EGG_CACHE environment
variable to point to an accessible directory.
""" % (old_exc, cache_path)
)
err.manager = self
err.cache_path = cache_path
err.original_error = old_exc
raise err
def get_cache_path(self, archive_name, names=()):
"""Return absolute location in cache for `archive_name` and `names`
The parent directory of the resulting path will be created if it does
not already exist. `archive_name` should be the base filename of the
enclosing egg (which may not be the name of the enclosing zipfile!),
including its ".egg" extension. `names`, if provided, should be a
sequence of path name parts "under" the egg's extraction location.
This method should only be called by resource providers that need to
obtain an extraction location, and only for names they intend to
extract, as it tracks the generated names for possible cleanup later.
"""
extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name+'-tmp', *names)
try:
_bypass_ensure_directory(target_path)
except:
self.extraction_error()
self._warn_unsafe_extraction_path(extract_path)
self.cached_files[target_path] = 1
return target_path
@staticmethod
def _warn_unsafe_extraction_path(path):
"""
If the default extraction path is overridden and set to an insecure
location, such as /tmp, it opens up an opportunity for an attacker to
replace an extracted file with an unauthorized payload. Warn the user
if a known insecure location is used.
See Distribute #375 for more details.
"""
if os.name == 'nt' and not path.startswith(os.environ['windir']):
# On Windows, permissions are generally restrictive by default
# and temp directories are not writable by other users, so
# bypass the warning.
return
mode = os.stat(path).st_mode
if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
msg = ("%s is writable by group/others and vulnerable to attack "
"when "
"used with get_resource_filename. Consider a more secure "
"location (set with .set_extraction_path or the "
"PYTHON_EGG_CACHE environment variable)." % path)
warnings.warn(msg, UserWarning)
def postprocess(self, tempname, filename):
"""Perform any platform-specific postprocessing of `tempname`
This is where Mac header rewrites should be done; other platforms don't
have anything special they should do.
Resource providers should call this method ONLY after successfully
extracting a compressed resource. They must NOT call it on resources
that are already in the filesystem.
`tempname` is the current (temporary) name of the file, and `filename`
is the name it will be renamed to by the caller after this routine
returns.
"""
if os.name == 'posix':
# Make the resource executable
mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
os.chmod(tempname, mode)
def set_extraction_path(self, path):
"""Set the base path where resources will be extracted to, if needed.
If you do not call this routine before any extractions take place, the
path defaults to the return value of ``get_default_cache()``. (Which
is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
platform-specific fallbacks. See that routine's documentation for more
details.)
Resources are extracted to subdirectories of this path based upon
information given by the ``IResourceProvider``. You may set this to a
temporary directory, but then you must call ``cleanup_resources()`` to
delete the extracted files when done. There is no guarantee that
``cleanup_resources()`` will be able to remove all extracted files.
(Note: you may not change the extraction path for a given resource
manager once resources have been extracted, unless you first call
``cleanup_resources()``.)
"""
if self.cached_files:
raise ValueError(
"Can't change extraction path, files already extracted"
)
self.extraction_path = path
def cleanup_resources(self, force=False):
"""
Delete all extracted resource files and directories, returning a list
of the file and directory names that could not be successfully removed.
This function does not have any concurrency protection, so it should
generally only be called when the extraction path is a temporary
directory exclusive to a single process. This method is not
automatically called; you must call it explicitly or register it as an
``atexit`` function if you wish to ensure cleanup of a temporary
directory used for extractions.
"""
# XXX
def get_default_cache():
"""Determine the default cache location
This returns the ``PYTHON_EGG_CACHE`` environment variable, if set.
Otherwise, on Windows, it returns a "Python-Eggs" subdirectory of the
"Application Data" directory. On all other systems, it's "~/.python-eggs".
"""
try:
return os.environ['PYTHON_EGG_CACHE']
except KeyError:
pass
if os.name!='nt':
return os.path.expanduser('~/.python-eggs')
# XXX this may be locale-specific!
app_data = 'Application Data'
app_homes = [
# best option, should be locale-safe
(('APPDATA',), None),
(('USERPROFILE',), app_data),
(('HOMEDRIVE','HOMEPATH'), app_data),
(('HOMEPATH',), app_data),
(('HOME',), None),
# 95/98/ME
(('WINDIR',), app_data),
]
for keys, subdir in app_homes:
dirname = ''
for key in keys:
if key in os.environ:
dirname = os.path.join(dirname, os.environ[key])
else:
break
else:
if subdir:
dirname = os.path.join(dirname, subdir)
return os.path.join(dirname, 'Python-Eggs')
else:
raise RuntimeError(
"Please set the PYTHON_EGG_CACHE enviroment variable"
)
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""
Convert an arbitrary string to a standard version string
"""
try:
# normalize the version
return str(packaging.version.Version(version))
except packaging.version.InvalidVersion:
version = version.replace(' ','.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def safe_extra(extra):
"""Convert an arbitrary string to a standard 'extra' name
Any runs of non-alphanumeric characters are replaced with a single '_',
and the result is always lowercased.
"""
return re.sub('[^A-Za-z0-9.]+', '_', extra).lower()
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-','_')
class MarkerEvaluation(object):
values = {
'os_name': lambda: os.name,
'sys_platform': lambda: sys.platform,
'python_full_version': platform.python_version,
'python_version': lambda: platform.python_version()[:3],
'platform_version': platform.version,
'platform_machine': platform.machine,
'python_implementation': platform.python_implementation,
}
@classmethod
def is_invalid_marker(cls, text):
"""
Validate text as a PEP 426 environment marker; return an exception
if invalid or False otherwise.
"""
try:
cls.evaluate_marker(text)
except SyntaxError as e:
return cls.normalize_exception(e)
return False
@staticmethod
def normalize_exception(exc):
"""
Given a SyntaxError from a marker evaluation, normalize the error
message:
- Remove indications of filename and line number.
- Replace platform-specific error messages with standard error
messages.
"""
subs = {
'unexpected EOF while parsing': 'invalid syntax',
'parenthesis is never closed': 'invalid syntax',
}
exc.filename = None
exc.lineno = None
exc.msg = subs.get(exc.msg, exc.msg)
return exc
@classmethod
def and_test(cls, nodelist):
# MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
items = [
cls.interpret(nodelist[i])
for i in range(1, len(nodelist), 2)
]
return functools.reduce(operator.and_, items)
@classmethod
def test(cls, nodelist):
# MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
items = [
cls.interpret(nodelist[i])
for i in range(1, len(nodelist), 2)
]
return functools.reduce(operator.or_, items)
@classmethod
def atom(cls, nodelist):
t = nodelist[1][0]
if t == token.LPAR:
if nodelist[2][0] == token.RPAR:
raise SyntaxError("Empty parentheses")
return cls.interpret(nodelist[2])
msg = "Language feature not supported in environment markers"
raise SyntaxError(msg)
@classmethod
def comparison(cls, nodelist):
if len(nodelist) > 4:
msg = "Chained comparison not allowed in environment markers"
raise SyntaxError(msg)
comp = nodelist[2][1]
cop = comp[1]
if comp[0] == token.NAME:
if len(nodelist[2]) == 3:
if cop == 'not':
cop = 'not in'
else:
cop = 'is not'
try:
cop = cls.get_op(cop)
except KeyError:
msg = repr(cop) + " operator not allowed in environment markers"
raise SyntaxError(msg)
return cop(cls.evaluate(nodelist[1]), cls.evaluate(nodelist[3]))
@classmethod
def get_op(cls, op):
ops = {
symbol.test: cls.test,
symbol.and_test: cls.and_test,
symbol.atom: cls.atom,
symbol.comparison: cls.comparison,
'not in': lambda x, y: x not in y,
'in': lambda x, y: x in y,
'==': operator.eq,
'!=': operator.ne,
'<': operator.lt,
'>': operator.gt,
'<=': operator.le,
'>=': operator.ge,
}
if hasattr(symbol, 'or_test'):
ops[symbol.or_test] = cls.test
return ops[op]
@classmethod
def evaluate_marker(cls, text, extra=None):
"""
Evaluate a PEP 426 environment marker on CPython 2.4+.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
This implementation uses the 'parser' module, which is not implemented
on
Jython and has been superseded by the 'ast' module in Python 2.6 and
later.
"""
return cls.interpret(parser.expr(text).totuple(1)[1])
@classmethod
def _markerlib_evaluate(cls, text):
"""
Evaluate a PEP 426 environment marker using markerlib.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
"""
from pip._vendor import _markerlib
# markerlib implements Metadata 1.2 (PEP 345) environment markers.
# Translate the variables to Metadata 2.0 (PEP 426).
env = _markerlib.default_environment()
for key in env.keys():
new_key = key.replace('.', '_')
env[new_key] = env.pop(key)
try:
result = _markerlib.interpret(text, env)
except NameError as e:
raise SyntaxError(e.args[0])
return result
if 'parser' not in globals():
# Fall back to less-complete _markerlib implementation if 'parser' module
# is not available.
evaluate_marker = _markerlib_evaluate
@classmethod
def interpret(cls, nodelist):
while len(nodelist)==2: nodelist = nodelist[1]
try:
op = cls.get_op(nodelist[0])
except KeyError:
raise SyntaxError("Comparison or logical expression expected")
return op(nodelist)
@classmethod
def evaluate(cls, nodelist):
while len(nodelist)==2: nodelist = nodelist[1]
kind = nodelist[0]
name = nodelist[1]
if kind==token.NAME:
try:
op = cls.values[name]
except KeyError:
raise SyntaxError("Unknown name %r" % name)
return op()
if kind==token.STRING:
s = nodelist[1]
if not cls._safe_string(s):
raise SyntaxError(
"Only plain strings allowed in environment markers")
return s[1:-1]
msg = "Language feature not supported in environment markers"
raise SyntaxError(msg)
@staticmethod
def _safe_string(cand):
return (
cand[:1] in "'\"" and
not cand.startswith('"""') and
not cand.startswith("'''") and
'\\' not in cand
)
invalid_marker = MarkerEvaluation.is_invalid_marker
evaluate_marker = MarkerEvaluation.evaluate_marker
class NullProvider:
"""Try to implement resources and metadata for arbitrary PEP 302 loaders"""
egg_name = None
egg_info = None
loader = None
def __init__(self, module):
self.loader = getattr(module, '__loader__', None)
self.module_path = os.path.dirname(getattr(module, '__file__', ''))
def get_resource_filename(self, manager, resource_name):
return self._fn(self.module_path, resource_name)
def get_resource_stream(self, manager, resource_name):
return io.BytesIO(self.get_resource_string(manager, resource_name))
def get_resource_string(self, manager, resource_name):
return self._get(self._fn(self.module_path, resource_name))
def has_resource(self, resource_name):
return self._has(self._fn(self.module_path, resource_name))
def has_metadata(self, name):
return self.egg_info and self._has(self._fn(self.egg_info, name))
if sys.version_info <= (3,):
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info, name))
else:
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info, name)).decode("utf-8")
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def resource_isdir(self, resource_name):
return self._isdir(self._fn(self.module_path, resource_name))
def metadata_isdir(self, name):
return self.egg_info and self._isdir(self._fn(self.egg_info, name))
def resource_listdir(self, resource_name):
return self._listdir(self._fn(self.module_path, resource_name))
def metadata_listdir(self, name):
if self.egg_info:
return self._listdir(self._fn(self.egg_info, name))
return []
def run_script(self, script_name, namespace):
script = 'scripts/'+script_name
if not self.has_metadata(script):
raise ResolutionError("No script named %r" % script_name)
script_text = self.get_metadata(script).replace('\r\n', '\n')
script_text = script_text.replace('\r', '\n')
script_filename = self._fn(self.egg_info, script)
namespace['__file__'] = script_filename
if os.path.exists(script_filename):
source = open(script_filename).read()
code = compile(source, script_filename, 'exec')
exec(code, namespace, namespace)
else:
from linecache import cache
cache[script_filename] = (
len(script_text), 0, script_text.split('\n'), script_filename
)
script_code = compile(script_text, script_filename,'exec')
exec(script_code, namespace, namespace)
def _has(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _isdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _listdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _fn(self, base, resource_name):
if resource_name:
return os.path.join(base, *resource_name.split('/'))
return base
def _get(self, path):
if hasattr(self.loader, 'get_data'):
return self.loader.get_data(path)
raise NotImplementedError(
"Can't perform this operation for loaders without 'get_data()'"
)
register_loader_type(object, NullProvider)
class EggProvider(NullProvider):
"""Provider based on a virtual filesystem"""
def __init__(self, module):
NullProvider.__init__(self, module)
self._setup_prefix()
def _setup_prefix(self):
# we assume here that our metadata may be nested inside a "basket"
# of multiple eggs; that's why we use module_path instead of .archive
path = self.module_path
old = None
while path!=old:
if path.lower().endswith('.egg'):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
break
old = path
path, base = os.path.split(path)
class DefaultProvider(EggProvider):
"""Provides access to package resources in the filesystem"""
def _has(self, path):
return os.path.exists(path)
def _isdir(self, path):
return os.path.isdir(path)
def _listdir(self, path):
return os.listdir(path)
def get_resource_stream(self, manager, resource_name):
return open(self._fn(self.module_path, resource_name), 'rb')
def _get(self, path):
with open(path, 'rb') as stream:
return stream.read()
register_loader_type(type(None), DefaultProvider)
if importlib_machinery is not None:
register_loader_type(importlib_machinery.SourceFileLoader, DefaultProvider)
class EmptyProvider(NullProvider):
"""Provider that returns nothing for all requests"""
_isdir = _has = lambda self, path: False
_get = lambda self, path: ''
_listdir = lambda self, path: []
module_path = None
def __init__(self):
pass
empty_provider = EmptyProvider()
class ZipManifests(dict):
"""
zip manifest builder
"""
@classmethod
def build(cls, path):
"""
Build a dictionary similar to the zipimport directory
caches, except instead of tuples, store ZipInfo objects.
Use a platform-specific path separator (os.sep) for the path keys
for compatibility with pypy on Windows.
"""
with ContextualZipFile(path) as zfile:
items = (
(
name.replace('/', os.sep),
zfile.getinfo(name),
)
for name in zfile.namelist()
)
return dict(items)
load = build
class MemoizedZipManifests(ZipManifests):
"""
Memoized zipfile manifests.
"""
manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime')
def load(self, path):
"""
Load a manifest at path or return a suitable manifest already loaded.
"""
path = os.path.normpath(path)
mtime = os.stat(path).st_mtime
if path not in self or self[path].mtime != mtime:
manifest = self.build(path)
self[path] = self.manifest_mod(manifest, mtime)
return self[path].manifest
class ContextualZipFile(zipfile.ZipFile):
"""
Supplement ZipFile class to support context manager for Python 2.6
"""
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __new__(cls, *args, **kwargs):
"""
Construct a ZipFile or ContextualZipFile as appropriate
"""
if hasattr(zipfile.ZipFile, '__exit__'):
return zipfile.ZipFile(*args, **kwargs)
return super(ContextualZipFile, cls).__new__(cls)
class ZipProvider(EggProvider):
"""Resource support for zips and eggs"""
eagers = None
_zip_manifests = MemoizedZipManifests()
def __init__(self, module):
EggProvider.__init__(self, module)
self.zip_pre = self.loader.archive+os.sep
def _zipinfo_name(self, fspath):
# Convert a virtual filename (full path to file) into a zipfile subpath
# usable with the zipimport directory cache for our target archive
if fspath.startswith(self.zip_pre):
return fspath[len(self.zip_pre):]
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.zip_pre)
)
def _parts(self, zip_path):
# Convert a zipfile subpath into an egg-relative path part list.
# pseudo-fs path
fspath = self.zip_pre+zip_path
if fspath.startswith(self.egg_root+os.sep):
return fspath[len(self.egg_root)+1:].split(os.sep)
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.egg_root)
)
@property
def zipinfo(self):
return self._zip_manifests.load(self.loader.archive)
def get_resource_filename(self, manager, resource_name):
if not self.egg_name:
raise NotImplementedError(
"resource_filename() only supported for .egg, not .zip"
)
# no need to lock for extraction, since we use temp names
zip_path = self._resource_to_zip(resource_name)
eagers = self._get_eager_resources()
if '/'.join(self._parts(zip_path)) in eagers:
for name in eagers:
self._extract_resource(manager, self._eager_to_zip(name))
return self._extract_resource(manager, zip_path)
@staticmethod
def _get_date_and_size(zip_stat):
size = zip_stat.file_size
# ymdhms+wday, yday, dst
date_time = zip_stat.date_time + (0, 0, -1)
# 1980 offset already done
timestamp = time.mktime(date_time)
return timestamp, size
def _extract_resource(self, manager, zip_path):
if zip_path in self._index():
for name in self._index()[zip_path]:
last = self._extract_resource(
manager, os.path.join(zip_path, name)
)
# return the extracted directory name
return os.path.dirname(last)
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not WRITE_SUPPORT:
raise IOError('"os.rename" and "os.unlink" are not supported '
'on this platform')
try:
real_path = manager.get_cache_path(
self.egg_name, self._parts(zip_path)
)
if self._is_current(real_path, zip_path):
return real_path
outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path))
os.write(outf, self.loader.get_data(zip_path))
os.close(outf)
utime(tmpnam, (timestamp, timestamp))
manager.postprocess(tmpnam, real_path)
try:
rename(tmpnam, real_path)
except os.error:
if os.path.isfile(real_path):
if self._is_current(real_path, zip_path):
# the file became current since it was checked above,
# so proceed.
return real_path
# Windows, del old file and retry
elif os.name=='nt':
unlink(real_path)
rename(tmpnam, real_path)
return real_path
raise
except os.error:
# report a user-friendly error
manager.extraction_error()
return real_path
def _is_current(self, file_path, zip_path):
"""
Return True if the file_path is current for this zip_path
"""
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not os.path.isfile(file_path):
return False
stat = os.stat(file_path)
if stat.st_size!=size or stat.st_mtime!=timestamp:
return False
# check that the contents match
zip_contents = self.loader.get_data(zip_path)
with open(file_path, 'rb') as f:
file_contents = f.read()
return zip_contents == file_contents
def _get_eager_resources(self):
if self.eagers is None:
eagers = []
for name in ('native_libs.txt', 'eager_resources.txt'):
if self.has_metadata(name):
eagers.extend(self.get_metadata_lines(name))
self.eagers = eagers
return self.eagers
def _index(self):
try:
return self._dirindex
except AttributeError:
ind = {}
for path in self.zipinfo:
parts = path.split(os.sep)
while parts:
parent = os.sep.join(parts[:-1])
if parent in ind:
ind[parent].append(parts[-1])
break
else:
ind[parent] = [parts.pop()]
self._dirindex = ind
return ind
def _has(self, fspath):
zip_path = self._zipinfo_name(fspath)
return zip_path in self.zipinfo or zip_path in self._index()
def _isdir(self, fspath):
return self._zipinfo_name(fspath) in self._index()
def _listdir(self, fspath):
return list(self._index().get(self._zipinfo_name(fspath), ()))
def _eager_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.egg_root, resource_name))
def _resource_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.module_path, resource_name))
register_loader_type(zipimport.zipimporter, ZipProvider)
class FileMetadata(EmptyProvider):
"""Metadata handler for standalone PKG-INFO files
Usage::
metadata = FileMetadata("/path/to/PKG-INFO")
This provider rejects all data and metadata requests except for PKG-INFO,
which is treated as existing, and will be the contents of the file at
the provided location.
"""
def __init__(self, path):
self.path = path
def has_metadata(self, name):
return name=='PKG-INFO'
def get_metadata(self, name):
if name=='PKG-INFO':
with open(self.path,'rU') as f:
metadata = f.read()
return metadata
raise KeyError("No metadata except PKG-INFO is available")
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
class PathMetadata(DefaultProvider):
"""Metadata provider for egg directories
Usage::
# Development eggs:
egg_info = "/path/to/PackageName.egg-info"
base_dir = os.path.dirname(egg_info)
metadata = PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
# Unpacked egg directories:
egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
dist = Distribution.from_filename(egg_path, metadata=metadata)
"""
def __init__(self, path, egg_info):
self.module_path = path
self.egg_info = egg_info
class EggMetadata(ZipProvider):
"""Metadata provider for .egg files"""
def __init__(self, importer):
"""Create a metadata provider from a zipimporter"""
self.zip_pre = importer.archive+os.sep
self.loader = importer
if importer.prefix:
self.module_path = os.path.join(importer.archive, importer.prefix)
else:
self.module_path = importer.archive
self._setup_prefix()
_declare_state('dict', _distribution_finders = {})
def register_finder(importer_type, distribution_finder):
"""Register `distribution_finder` to find distributions in sys.path items
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `distribution_finder` is a callable that, passed a path
item and the importer instance, yields ``Distribution`` instances found on
that path item. See ``pkg_resources.find_on_path`` for an example."""
_distribution_finders[importer_type] = distribution_finder
def find_distributions(path_item, only=False):
"""Yield distributions accessible via `path_item`"""
importer = get_importer(path_item)
finder = _find_adapter(_distribution_finders, importer)
return finder(importer, path_item, only)
def find_eggs_in_zip(importer, path_item, only=False):
"""
Find eggs in zip files; possibly multiple nested eggs.
"""
if importer.archive.endswith('.whl'):
# wheels are not supported with this finder
# they don't have PKG-INFO metadata, and won't ever contain eggs
return
metadata = EggMetadata(importer)
if metadata.has_metadata('PKG-INFO'):
yield Distribution.from_filename(path_item, metadata=metadata)
if only:
# don't yield nested distros
return
for subitem in metadata.resource_listdir('/'):
if subitem.endswith('.egg'):
subpath = os.path.join(path_item, subitem)
for dist in find_eggs_in_zip(zipimport.zipimporter(subpath), subpath):
yield dist
register_finder(zipimport.zipimporter, find_eggs_in_zip)
def find_nothing(importer, path_item, only=False):
return ()
register_finder(object, find_nothing)
def find_on_path(importer, path_item, only=False):
"""Yield distributions accessible on a sys.path directory"""
path_item = _normalize_cached(path_item)
if os.path.isdir(path_item) and os.access(path_item, os.R_OK):
if path_item.lower().endswith('.egg'):
# unpacked egg
yield Distribution.from_filename(
path_item, metadata=PathMetadata(
path_item, os.path.join(path_item,'EGG-INFO')
)
)
else:
# scan for .egg and .egg-info in directory
for entry in os.listdir(path_item):
lower = entry.lower()
if lower.endswith('.egg-info') or lower.endswith('.dist-info'):
fullpath = os.path.join(path_item, entry)
if os.path.isdir(fullpath):
# egg-info directory, allow getting metadata
metadata = PathMetadata(path_item, fullpath)
else:
metadata = FileMetadata(fullpath)
yield Distribution.from_location(
path_item, entry, metadata, precedence=DEVELOP_DIST
)
elif not only and lower.endswith('.egg'):
dists = find_distributions(os.path.join(path_item, entry))
for dist in dists:
yield dist
elif not only and lower.endswith('.egg-link'):
with open(os.path.join(path_item, entry)) as entry_file:
entry_lines = entry_file.readlines()
for line in entry_lines:
if not line.strip():
continue
path = os.path.join(path_item, line.rstrip())
dists = find_distributions(path)
for item in dists:
yield item
break
register_finder(pkgutil.ImpImporter, find_on_path)
if importlib_machinery is not None:
register_finder(importlib_machinery.FileFinder, find_on_path)
_declare_state('dict', _namespace_handlers={})
_declare_state('dict', _namespace_packages={})
def register_namespace_handler(importer_type, namespace_handler):
"""Register `namespace_handler` to declare namespace packages
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `namespace_handler` is a callable like this::
def namespace_handler(importer, path_entry, moduleName, module):
# return a path_entry to use for child packages
Namespace handlers are only called if the importer object has already
agreed that it can handle the relevant path item, and they should only
return a subpath if the module __path__ does not already contain an
equivalent subpath. For an example namespace handler, see
``pkg_resources.file_ns_handler``.
"""
_namespace_handlers[importer_type] = namespace_handler
def _handle_ns(packageName, path_item):
"""Ensure that named package includes a subpath of path_item (if needed)"""
importer = get_importer(path_item)
if importer is None:
return None
loader = importer.find_module(packageName)
if loader is None:
return None
module = sys.modules.get(packageName)
if module is None:
module = sys.modules[packageName] = types.ModuleType(packageName)
module.__path__ = []
_set_parent_ns(packageName)
elif not hasattr(module,'__path__'):
raise TypeError("Not a package:", packageName)
handler = _find_adapter(_namespace_handlers, importer)
subpath = handler(importer, path_item, packageName, module)
if subpath is not None:
path = module.__path__
path.append(subpath)
loader.load_module(packageName)
for path_item in path:
if path_item not in module.__path__:
module.__path__.append(path_item)
return subpath
def declare_namespace(packageName):
"""Declare that package 'packageName' is a namespace package"""
_imp.acquire_lock()
try:
if packageName in _namespace_packages:
return
path, parent = sys.path, None
if '.' in packageName:
parent = '.'.join(packageName.split('.')[:-1])
declare_namespace(parent)
if parent not in _namespace_packages:
__import__(parent)
try:
path = sys.modules[parent].__path__
except AttributeError:
raise TypeError("Not a package:", parent)
# Track what packages are namespaces, so when new path items are added,
# they can be updated
_namespace_packages.setdefault(parent,[]).append(packageName)
_namespace_packages.setdefault(packageName,[])
for path_item in path:
# Ensure all the parent's path items are reflected in the child,
# if they apply
_handle_ns(packageName, path_item)
finally:
_imp.release_lock()
def fixup_namespace_packages(path_item, parent=None):
"""Ensure that previously-declared namespace packages include path_item"""
_imp.acquire_lock()
try:
for package in _namespace_packages.get(parent,()):
subpath = _handle_ns(package, path_item)
if subpath:
fixup_namespace_packages(subpath, package)
finally:
_imp.release_lock()
def file_ns_handler(importer, path_item, packageName, module):
"""Compute an ns-package subpath for a filesystem or zipfile importer"""
subpath = os.path.join(path_item, packageName.split('.')[-1])
normalized = _normalize_cached(subpath)
for item in module.__path__:
if _normalize_cached(item)==normalized:
break
else:
# Only return the path if it's not already there
return subpath
register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
register_namespace_handler(zipimport.zipimporter, file_ns_handler)
if importlib_machinery is not None:
register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)
def null_ns_handler(importer, path_item, packageName, module):
return None
register_namespace_handler(object, null_ns_handler)
def normalize_path(filename):
"""Normalize a file/dir name for comparison purposes"""
return os.path.normcase(os.path.realpath(filename))
def _normalize_cached(filename, _cache={}):
try:
return _cache[filename]
except KeyError:
_cache[filename] = result = normalize_path(filename)
return result
def _set_parent_ns(packageName):
parts = packageName.split('.')
name = parts.pop()
if parts:
parent = '.'.join(parts)
setattr(sys.modules[parent], name, sys.modules[packageName])
def yield_lines(strs):
"""Yield non-empty/non-comment lines of a string or sequence"""
if isinstance(strs, string_types):
for s in strs.splitlines():
s = s.strip()
# skip blank lines/comments
if s and not s.startswith('#'):
yield s
else:
for ss in strs:
for s in yield_lines(ss):
yield s
# whitespace and comment
LINE_END = re.compile(r"\s*(#.*)?$").match
# line continuation
CONTINUE = re.compile(r"\s*\\\s*(#.*)?$").match
# Distribution or extra
DISTRO = re.compile(r"\s*((\w|[-.])+)").match
# ver. info
VERSION = re.compile(r"\s*(<=?|>=?|===?|!=|~=)\s*((\w|[-.*_!+])+)").match
# comma between items
COMMA = re.compile(r"\s*,").match
OBRACKET = re.compile(r"\s*\[").match
CBRACKET = re.compile(r"\s*\]").match
MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
r"""
(?P<name>[^-]+) (
-(?P<ver>[^-]+) (
-py(?P<pyver>[^-]+) (
-(?P<plat>.+)
)?
)?
)?
""",
re.VERBOSE | re.IGNORECASE,
).match
class EntryPoint(object):
"""Object representing an advertised importable object"""
def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
if not MODULE(module_name):
raise ValueError("Invalid module name", module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras
self.dist = dist
def __str__(self):
s = "%s = %s" % (self.name, self.module_name)
if self.attrs:
s += ':' + '.'.join(self.attrs)
if self.extras:
s += ' [%s]' % ','.join(self.extras)
return s
def __repr__(self):
return "EntryPoint.parse(%r)" % str(self)
def load(self, require=True, *args, **kwargs):
"""
Require packages for this EntryPoint, then resolve it.
"""
if not require or args or kwargs:
warnings.warn(
"Parameters to load are deprecated. Call .resolve and "
".require separately.",
DeprecationWarning,
stacklevel=2,
)
if require:
self.require(*args, **kwargs)
return self.resolve()
def resolve(self):
"""
Resolve the entry point from its module and attrs.
"""
module = __import__(self.module_name, fromlist=['__name__'], level=0)
try:
return functools.reduce(getattr, self.attrs, module)
except AttributeError as exc:
raise ImportError(str(exc))
def require(self, env=None, installer=None):
if self.extras and not self.dist:
raise UnknownExtra("Can't require() without a distribution", self)
reqs = self.dist.requires(self.extras)
items = working_set.resolve(reqs, env, installer)
list(map(working_set.add, items))
pattern = re.compile(
r'\s*'
r'(?P<name>.+?)\s*'
r'=\s*'
r'(?P<module>[\w.]+)\s*'
r'(:\s*(?P<attr>[\w.]+))?\s*'
r'(?P<extras>\[.*\])?\s*$'
)
@classmethod
def parse(cls, src, dist=None):
"""Parse a single entry point from string `src`
Entry point syntax follows the form::
name = some.module:some.attr [extra1, extra2]
The entry name and module name are required, but the ``:attrs`` and
``[extras]`` parts are optional
"""
m = cls.pattern.match(src)
if not m:
msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
raise ValueError(msg, src)
res = m.groupdict()
extras = cls._parse_extras(res['extras'])
attrs = res['attr'].split('.') if res['attr'] else ()
return cls(res['name'], res['module'], attrs, extras, dist)
@classmethod
def _parse_extras(cls, extras_spec):
if not extras_spec:
return ()
req = Requirement.parse('x' + extras_spec)
if req.specs:
raise ValueError()
return req.extras
@classmethod
def parse_group(cls, group, lines, dist=None):
"""Parse an entry point group"""
if not MODULE(group):
raise ValueError("Invalid group name", group)
this = {}
for line in yield_lines(lines):
ep = cls.parse(line, dist)
if ep.name in this:
raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name]=ep
return this
@classmethod
def parse_map(cls, data, dist=None):
"""Parse a map of entry point groups"""
if isinstance(data, dict):
data = data.items()
else:
data = split_sections(data)
maps = {}
for group, lines in data:
if group is None:
if not lines:
continue
raise ValueError("Entry points must be listed in groups")
group = group.strip()
if group in maps:
raise ValueError("Duplicate group name", group)
maps[group] = cls.parse_group(group, lines, dist)
return maps
def _remove_md5_fragment(location):
if not location:
return ''
parsed = urlparse(location)
if parsed[-1].startswith('md5='):
return urlunparse(parsed[:-1] + ('',))
return location
class Distribution(object):
"""Wrap an actual or potential sys.path entry w/metadata"""
PKG_INFO = 'PKG-INFO'
def __init__(self, location=None, metadata=None, project_name=None,
version=None, py_version=PY_MAJOR, platform=None,
precedence=EGG_DIST):
self.project_name = safe_name(project_name or 'Unknown')
if version is not None:
self._version = safe_version(version)
self.py_version = py_version
self.platform = platform
self.location = location
self.precedence = precedence
self._provider = metadata or empty_provider
@classmethod
def from_location(cls, location, basename, metadata=None,**kw):
project_name, version, py_version, platform = [None]*4
basename, ext = os.path.splitext(basename)
if ext.lower() in _distributionImpl:
# .dist-info gets much metadata differently
match = EGG_NAME(basename)
if match:
project_name, version, py_version, platform = match.group(
'name','ver','pyver','plat'
)
cls = _distributionImpl[ext.lower()]
return cls(
location, metadata, project_name=project_name, version=version,
py_version=py_version, platform=platform, **kw
)
@property
def hashcmp(self):
return (
self.parsed_version,
self.precedence,
self.key,
_remove_md5_fragment(self.location),
self.py_version or '',
self.platform or '',
)
def __hash__(self):
return hash(self.hashcmp)
def __lt__(self, other):
return self.hashcmp < other.hashcmp
def __le__(self, other):
return self.hashcmp <= other.hashcmp
def __gt__(self, other):
return self.hashcmp > other.hashcmp
def __ge__(self, other):
return self.hashcmp >= other.hashcmp
def __eq__(self, other):
if not isinstance(other, self.__class__):
# It's not a Distribution, so they are not equal
return False
return self.hashcmp == other.hashcmp
def __ne__(self, other):
return not self == other
# These properties have to be lazy so that we don't have to load any
# metadata until/unless it's actually needed. (i.e., some distributions
# may not know their name or version without loading PKG-INFO)
@property
def key(self):
try:
return self._key
except AttributeError:
self._key = key = self.project_name.lower()
return key
@property
def parsed_version(self):
if not hasattr(self, "_parsed_version"):
self._parsed_version = parse_version(self.version)
return self._parsed_version
def _warn_legacy_version(self):
LV = packaging.version.LegacyVersion
is_legacy = isinstance(self._parsed_version, LV)
if not is_legacy:
return
# While an empty version is technically a legacy version and
# is not a valid PEP 440 version, it's also unlikely to
# actually come from someone and instead it is more likely that
# it comes from setuptools attempting to parse a filename and
# including it in the list. So for that we'll gate this warning
# on if the version is anything at all or not.
if not self.version:
return
tmpl = textwrap.dedent("""
'{project_name} ({version})' is being parsed as a legacy,
non PEP 440,
version. You may find odd behavior and sort order.
In particular it will be sorted as less than 0.0. It
is recommended to migrate to PEP 440 compatible
versions.
""").strip().replace('\n', ' ')
warnings.warn(tmpl.format(**vars(self)), PEP440Warning)
@property
def version(self):
try:
return self._version
except AttributeError:
for line in self._get_metadata(self.PKG_INFO):
if line.lower().startswith('version:'):
self._version = safe_version(line.split(':',1)[1].strip())
return self._version
else:
tmpl = "Missing 'Version:' header and/or %s file"
raise ValueError(tmpl % self.PKG_INFO, self)
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
dm = self.__dep_map = {None: []}
for name in 'requires.txt', 'depends.txt':
for extra, reqs in split_sections(self._get_metadata(name)):
if extra:
if ':' in extra:
extra, marker = extra.split(':', 1)
if invalid_marker(marker):
# XXX warn
reqs=[]
elif not evaluate_marker(marker):
reqs=[]
extra = safe_extra(extra) or None
dm.setdefault(extra,[]).extend(parse_requirements(reqs))
return dm
def requires(self, extras=()):
"""List of Requirements needed for this distro if `extras` are used"""
dm = self._dep_map
deps = []
deps.extend(dm.get(None, ()))
for ext in extras:
try:
deps.extend(dm[safe_extra(ext)])
except KeyError:
raise UnknownExtra(
"%s has no such extra feature %r" % (self, ext)
)
return deps
def _get_metadata(self, name):
if self.has_metadata(name):
for line in self.get_metadata_lines(name):
yield line
def activate(self, path=None):
"""Ensure distribution is importable on `path` (default=sys.path)"""
if path is None:
path = sys.path
self.insert_on(path)
if path is sys.path:
fixup_namespace_packages(self.location)
for pkg in self._get_metadata('namespace_packages.txt'):
if pkg in sys.modules:
declare_namespace(pkg)
def egg_name(self):
"""Return what this distribution's standard .egg filename should be"""
filename = "%s-%s-py%s" % (
to_filename(self.project_name), to_filename(self.version),
self.py_version or PY_MAJOR
)
if self.platform:
filename += '-' + self.platform
return filename
def __repr__(self):
if self.location:
return "%s (%s)" % (self, self.location)
else:
return str(self)
def __str__(self):
try:
version = getattr(self, 'version', None)
except ValueError:
version = None
version = version or "[unknown version]"
return "%s %s" % (self.project_name, version)
def __getattr__(self, attr):
"""Delegate all unrecognized public attributes to .metadata provider"""
if attr.startswith('_'):
raise AttributeError(attr)
return getattr(self._provider, attr)
@classmethod
def from_filename(cls, filename, metadata=None, **kw):
return cls.from_location(
_normalize_cached(filename), os.path.basename(filename), metadata,
**kw
)
def as_requirement(self):
"""Return a ``Requirement`` that matches this distribution exactly"""
if isinstance(self.parsed_version, packaging.version.Version):
spec = "%s==%s" % (self.project_name, self.parsed_version)
else:
spec = "%s===%s" % (self.project_name, self.parsed_version)
return Requirement.parse(spec)
def load_entry_point(self, group, name):
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group, name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group, name),))
return ep.load()
def get_entry_map(self, group=None):
"""Return the entry point map for `group`, or the full entry map"""
try:
ep_map = self._ep_map
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(
self._get_metadata('entry_points.txt'), self
)
if group is not None:
return ep_map.get(group,{})
return ep_map
def get_entry_info(self, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return self.get_entry_map(group).get(name)
def insert_on(self, path, loc = None):
"""Insert self.location in path before its nearest parent directory"""
loc = loc or self.location
if not loc:
return
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath= [(p and _normalize_cached(p) or p) for p in path]
for p, item in enumerate(npath):
if item == nloc:
break
elif item == bdir and self.precedence == EGG_DIST:
# if it's an .egg, give it precedence over its directory
if path is sys.path:
self.check_version_conflict()
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
if path is sys.path:
self.check_version_conflict()
path.append(loc)
return
# p is the spot where we found or inserted loc; now remove duplicates
while True:
try:
np = npath.index(nloc, p+1)
except ValueError:
break
else:
del npath[np], path[np]
# ha!
p = np
return
def check_version_conflict(self):
if self.key == 'setuptools':
# ignore the inevitable setuptools self-conflicts :(
return
nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
loc = normalize_path(self.location)
for modname in self._get_metadata('top_level.txt'):
if (modname not in sys.modules or modname in nsp
or modname in _namespace_packages):
continue
if modname in ('pkg_resources', 'setuptools', 'site'):
continue
fn = getattr(sys.modules[modname], '__file__', None)
if fn and (normalize_path(fn).startswith(loc) or
fn.startswith(self.location)):
continue
issue_warning(
"Module %s was already imported from %s, but %s is being added"
" to sys.path" % (modname, fn, self.location),
)
def has_version(self):
try:
self.version
except ValueError:
issue_warning("Unbuilt egg for " + repr(self))
return False
return True
def clone(self,**kw):
"""Copy this distribution, substituting in any changed keyword args"""
names = 'project_name version py_version platform location precedence'
for attr in names.split():
kw.setdefault(attr, getattr(self, attr, None))
kw.setdefault('metadata', self._provider)
return self.__class__(**kw)
@property
def extras(self):
return [dep for dep in self._dep_map if dep]
class DistInfoDistribution(Distribution):
"""Wrap an actual or potential sys.path entry w/metadata, .dist-info style"""
PKG_INFO = 'METADATA'
EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
@property
def _parsed_pkg_info(self):
"""Parse and cache metadata"""
try:
return self._pkg_info
except AttributeError:
metadata = self.get_metadata(self.PKG_INFO)
self._pkg_info = email.parser.Parser().parsestr(metadata)
return self._pkg_info
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._compute_dependencies()
return self.__dep_map
def _preparse_requirement(self, requires_dist):
"""Convert 'Foobar (1); baz' to ('Foobar ==1', 'baz')
Split environment marker, add == prefix to version specifiers as
necessary, and remove parenthesis.
"""
parts = requires_dist.split(';', 1) + ['']
distvers = parts[0].strip()
mark = parts[1].strip()
distvers = re.sub(self.EQEQ, r"\1==\2\3", distvers)
distvers = distvers.replace('(', '').replace(')', '')
return (distvers, mark)
def _compute_dependencies(self):
"""Recompute this distribution's dependencies."""
from pip._vendor._markerlib import compile as compile_marker
dm = self.__dep_map = {None: []}
reqs = []
# Including any condition expressions
for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
distvers, mark = self._preparse_requirement(req)
parsed = next(parse_requirements(distvers))
parsed.marker_fn = compile_marker(mark)
reqs.append(parsed)
def reqs_for_extra(extra):
for req in reqs:
if req.marker_fn(override={'extra':extra}):
yield req
common = frozenset(reqs_for_extra(None))
dm[None].extend(common)
for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
extra = safe_extra(extra.strip())
dm[extra] = list(frozenset(reqs_for_extra(extra)) - common)
return dm
_distributionImpl = {
'.egg': Distribution,
'.egg-info': Distribution,
'.dist-info': DistInfoDistribution,
}
def issue_warning(*args,**kw):
level = 1
g = globals()
try:
# find the first stack frame that is *not* code in
# the pkg_resources module, to use for the warning
while sys._getframe(level).f_globals is g:
level += 1
except ValueError:
pass
warnings.warn(stacklevel=level + 1, *args, **kw)
class RequirementParseError(ValueError):
def __str__(self):
return ' '.join(self.args)
def parse_requirements(strs):
"""Yield ``Requirement`` objects for each specification in `strs`
`strs` must be a string, or a (possibly-nested) iterable thereof.
"""
# create a steppable iterator, so we can handle \-continuations
lines = iter(yield_lines(strs))
def scan_list(ITEM, TERMINATOR, line, p, groups, item_name):
items = []
while not TERMINATOR(line, p):
if CONTINUE(line, p):
try:
line = next(lines)
p = 0
except StopIteration:
msg = "\\ must not appear on the last nonblank line"
raise RequirementParseError(msg)
match = ITEM(line, p)
if not match:
msg = "Expected " + item_name + " in"
raise RequirementParseError(msg, line, "at", line[p:])
items.append(match.group(*groups))
p = match.end()
match = COMMA(line, p)
if match:
# skip the comma
p = match.end()
elif not TERMINATOR(line, p):
msg = "Expected ',' or end-of-list in"
raise RequirementParseError(msg, line, "at", line[p:])
match = TERMINATOR(line, p)
# skip the terminator, if any
if match:
p = match.end()
return line, p, items
for line in lines:
match = DISTRO(line)
if not match:
raise RequirementParseError("Missing distribution spec", line)
project_name = match.group(1)
p = match.end()
extras = []
match = OBRACKET(line, p)
if match:
p = match.end()
line, p, extras = scan_list(
DISTRO, CBRACKET, line, p, (1,), "'extra' name"
)
line, p, specs = scan_list(VERSION, LINE_END, line, p, (1, 2),
"version spec")
specs = [(op, val) for op, val in specs]
yield Requirement(project_name, specs, extras)
class Requirement:
def __init__(self, project_name, specs, extras):
"""DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
self.unsafe_name, project_name = project_name, safe_name(project_name)
self.project_name, self.key = project_name, project_name.lower()
self.specifier = packaging.specifiers.SpecifierSet(
",".join(["".join([x, y]) for x, y in specs])
)
self.specs = specs
self.extras = tuple(map(safe_extra, extras))
self.hashCmp = (
self.key,
self.specifier,
frozenset(self.extras),
)
self.__hash = hash(self.hashCmp)
def __str__(self):
extras = ','.join(self.extras)
if extras:
extras = '[%s]' % extras
return '%s%s%s' % (self.project_name, extras, self.specifier)
def __eq__(self, other):
return (
isinstance(other, Requirement) and
self.hashCmp == other.hashCmp
)
def __ne__(self, other):
return not self == other
def __contains__(self, item):
if isinstance(item, Distribution):
if item.key != self.key:
return False
item = item.version
# Allow prereleases always in order to match the previous behavior of
# this method. In the future this should be smarter and follow PEP 440
# more accurately.
return self.specifier.contains(item, prereleases=True)
def __hash__(self):
return self.__hash
def __repr__(self): return "Requirement.parse(%r)" % str(self)
@staticmethod
def parse(s):
reqs = list(parse_requirements(s))
if reqs:
if len(reqs) == 1:
return reqs[0]
raise ValueError("Expected only one requirement", s)
raise ValueError("No requirements found", s)
def _get_mro(cls):
"""Get an mro for a type or classic class"""
if not isinstance(cls, type):
class cls(cls, object): pass
return cls.__mro__[1:]
return cls.__mro__
def _find_adapter(registry, ob):
"""Return an adapter factory for `ob` from `registry`"""
for t in _get_mro(getattr(ob, '__class__', type(ob))):
if t in registry:
return registry[t]
def ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def _bypass_ensure_directory(path):
"""Sandbox-bypassing version of ensure_directory()"""
if not WRITE_SUPPORT:
raise IOError('"os.mkdir" not supported on this platform.')
dirname, filename = split(path)
if dirname and filename and not isdir(dirname):
_bypass_ensure_directory(dirname)
mkdir(dirname, 0o755)
def split_sections(s):
"""Split a string or iterable thereof into (section, content) pairs
Each ``section`` is a stripped version of the section header ("[section]")
and each ``content`` is a list of stripped lines excluding blank lines and
comment-only lines. If there are any such lines before the first section
header, they're returned in a first ``section`` of ``None``.
"""
section = None
content = []
for line in yield_lines(s):
if line.startswith("["):
if line.endswith("]"):
if section or content:
yield section, content
section = line[1:-1].strip()
content = []
else:
raise ValueError("Invalid section heading", line)
else:
content.append(line)
# wrap up last segment
yield section, content
def _mkstemp(*args,**kw):
old_open = os.open
try:
# temporarily bypass sandboxing
os.open = os_open
return tempfile.mkstemp(*args,**kw)
finally:
# and then put it back
os.open = old_open
# Silence the PEP440Warning by default, so that end users don't get hit by it
# randomly just because they use pkg_resources. We want to append the rule
# because we want earlier uses of filterwarnings to take precedence over this
# one.
warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
# from jaraco.functools 1.3
def _call_aside(f, *args, **kwargs):
f(*args, **kwargs)
return f
@_call_aside
def _initialize(g=globals()):
"Set up global resource manager (deliberately not state-saved)"
manager = ResourceManager()
g['_manager'] = manager
for name in dir(manager):
if not name.startswith('_'):
g[name] = getattr(manager, name)
@_call_aside
def _initialize_master_working_set():
"""
Prepare the master working set and make the ``require()``
API available.
This function has explicit effects on the global state
of pkg_resources. It is intended to be invoked once at
the initialization of this module.
Invocation by other packages is unsupported and done
at their own risk.
"""
working_set = WorkingSet._build_master()
_declare_state('object', working_set=working_set)
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
# backward compatibility
run_main = run_script
# Activate all distributions already on sys.path, and ensure that
# all distributions added to the working set in the future (e.g. by
# calling ``require()``) will get activated as well.
add_activation_listener(lambda dist: dist.activate())
working_set.entries=[]
# match order
list(map(working_set.add_entry, sys.path))
globals().update(locals())
| mit |
atplanet/ansible-modules-extras | packaging/os/pkg5.py | 75 | 4862 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2014 Peter Oliver <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: pkg5
author: "Peter Oliver (@mavit)"
short_description: Manages packages with the Solaris 11 Image Packaging System
version_added: 1.9
description:
- IPS packages are the native packages in Solaris 11 and higher.
notes:
- The naming of IPS packages is explained at U(http://www.oracle.com/technetwork/articles/servers-storage-admin/ips-package-versioning-2232906.html).
options:
name:
description:
- An FRMI of the package(s) to be installed/removed/updated.
- Multiple packages may be specified, separated by C(,).
required: true
state:
description:
- Whether to install (I(present), I(latest)), or remove (I(absent)) a
package.
required: false
default: present
choices: [ present, latest, absent ]
accept_licenses:
description:
- Accept any licences.
required: false
default: false
choices: [ true, false ]
aliases: [ accept_licences, accept ]
'''
EXAMPLES = '''
# Install Vim:
- pkg5: name=editor/vim
# Remove finger daemon:
- pkg5: name=service/network/finger state=absent
# Install several packages at once:
- pkg5:
name:
- /file/gnu-findutils
- /text/gnu-grep
'''
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, type='list'),
state=dict(
default='present',
choices=[
'present',
'installed',
'latest',
'absent',
'uninstalled',
'removed',
]
),
accept_licenses=dict(
type='bool',
default=False,
aliases=['accept_licences', 'accept'],
),
)
)
params = module.params
packages = []
# pkg(5) FRMIs include a comma before the release number, but
# AnsibleModule will have split this into multiple items for us.
# Try to spot where this has happened and fix it.
for fragment in params['name']:
if (
re.search('^\d+(?:\.\d+)*', fragment)
and packages and re.search('@[^,]*$', packages[-1])
):
packages[-1] += ',' + fragment
else:
packages.append(fragment)
if params['state'] in ['present', 'installed']:
ensure(module, 'present', packages, params)
elif params['state'] in ['latest']:
ensure(module, 'latest', packages, params)
elif params['state'] in ['absent', 'uninstalled', 'removed']:
ensure(module, 'absent', packages, params)
def ensure(module, state, packages, params):
response = {
'results': [],
'msg': '',
}
behaviour = {
'present': {
'filter': lambda p: not is_installed(module, p),
'subcommand': 'install',
},
'latest': {
'filter': lambda p: not is_latest(module, p),
'subcommand': 'install',
},
'absent': {
'filter': lambda p: is_installed(module, p),
'subcommand': 'uninstall',
},
}
if params['accept_licenses']:
accept_licenses = ['--accept']
else:
accept_licenses = []
to_modify = filter(behaviour[state]['filter'], packages)
if to_modify:
rc, out, err = module.run_command(
[
'pkg', behaviour[state]['subcommand']
]
+ accept_licenses
+ [
'-q', '--'
] + to_modify
)
response['rc'] = rc
response['results'].append(out)
response['msg'] += err
response['changed'] = True
if rc != 0:
module.fail_json(**response)
module.exit_json(**response)
def is_installed(module, package):
rc, out, err = module.run_command(['pkg', 'list', '--', package])
return not bool(int(rc))
def is_latest(module, package):
rc, out, err = module.run_command(['pkg', 'list', '-u', '--', package])
return bool(int(rc))
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
KohlsTechnology/ansible | lib/ansible/plugins/filter/ipaddr.py | 15 | 29588 | # (c) 2014, Maciej Delmanowski <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from functools import partial
import types
try:
import netaddr
except ImportError:
# in this case, we'll make the filters return error messages (see bottom)
netaddr = None
else:
class mac_linux(netaddr.mac_unix):
pass
mac_linux.word_fmt = '%.2x'
from ansible import errors
# ---- IP address and network query helpers ----
def _empty_ipaddr_query(v, vtype):
# We don't have any query to process, so just check what type the user
# expects, and return the IP address in a correct format
if v:
if vtype == 'address':
return str(v.ip)
elif vtype == 'network':
return str(v)
def _first_last(v):
if v.size == 2:
first_usable = int(netaddr.IPAddress(v.first))
last_usable = int(netaddr.IPAddress(v.last))
return first_usable, last_usable
elif v.size > 1:
first_usable = int(netaddr.IPAddress(v.first + 1))
last_usable = int(netaddr.IPAddress(v.last - 1))
return first_usable, last_usable
def _6to4_query(v, vtype, value):
if v.version == 4:
if v.size == 1:
ipconv = str(v.ip)
elif v.size > 1:
if v.ip != v.network:
ipconv = str(v.ip)
else:
ipconv = False
if ipaddr(ipconv, 'public'):
numbers = list(map(int, ipconv.split('.')))
try:
return '2002:{:02x}{:02x}:{:02x}{:02x}::1/48'.format(*numbers)
except:
return False
elif v.version == 6:
if vtype == 'address':
if ipaddr(str(v), '2002::/16'):
return value
elif vtype == 'network':
if v.ip != v.network:
if ipaddr(str(v.ip), '2002::/16'):
return value
else:
return False
def _ip_query(v):
if v.size == 1:
return str(v.ip)
if v.size > 1:
# /31 networks in netaddr have no broadcast address
if v.ip != v.network or not v.broadcast:
return str(v.ip)
def _gateway_query(v):
if v.size > 1:
if v.ip != v.network:
return str(v.ip) + '/' + str(v.prefixlen)
def _address_prefix_query(v):
if v.size > 1:
if v.ip != v.network:
return str(v.ip) + '/' + str(v.prefixlen)
def _bool_ipaddr_query(v):
if v:
return True
def _broadcast_query(v):
if v.size > 2:
return str(v.broadcast)
def _cidr_query(v):
return str(v)
def _cidr_lookup_query(v, iplist, value):
try:
if v in iplist:
return value
except:
return False
def _first_usable_query(v, vtype):
if vtype == 'address':
"Does it make sense to raise an error"
raise errors.AnsibleFilterError('Not a network address')
elif vtype == 'network':
if v.size == 2:
return str(netaddr.IPAddress(int(v.network)))
elif v.size > 1:
return str(netaddr.IPAddress(int(v.network) + 1))
def _host_query(v):
if v.size == 1:
return str(v)
elif v.size > 1:
if v.ip != v.network:
return str(v.ip) + '/' + str(v.prefixlen)
def _hostmask_query(v):
return str(v.hostmask)
def _int_query(v, vtype):
if vtype == 'address':
return int(v.ip)
elif vtype == 'network':
return str(int(v.ip)) + '/' + str(int(v.prefixlen))
def _ip_prefix_query(v):
if v.size == 2:
return str(v.ip) + '/' + str(v.prefixlen)
elif v.size > 1:
if v.ip != v.network:
return str(v.ip) + '/' + str(v.prefixlen)
def _ip_netmask_query(v):
if v.size == 2:
return str(v.ip) + ' ' + str(v.netmask)
elif v.size > 1:
if v.ip != v.network:
return str(v.ip) + ' ' + str(v.netmask)
'''
def _ip_wildcard_query(v):
if v.size == 2:
return str(v.ip) + ' ' + str(v.hostmask)
elif v.size > 1:
if v.ip != v.network:
return str(v.ip) + ' ' + str(v.hostmask)
'''
def _ipv4_query(v, value):
if v.version == 6:
try:
return str(v.ipv4())
except:
return False
else:
return value
def _ipv6_query(v, value):
if v.version == 4:
return str(v.ipv6())
else:
return value
def _last_usable_query(v, vtype):
if vtype == 'address':
"Does it make sense to raise an error"
raise errors.AnsibleFilterError('Not a network address')
elif vtype == 'network':
if v.size > 1:
first_usable, last_usable = _first_last(v)
return str(netaddr.IPAddress(last_usable))
def _link_local_query(v, value):
v_ip = netaddr.IPAddress(str(v.ip))
if v.version == 4:
if ipaddr(str(v_ip), '169.254.0.0/24'):
return value
elif v.version == 6:
if ipaddr(str(v_ip), 'fe80::/10'):
return value
def _loopback_query(v, value):
v_ip = netaddr.IPAddress(str(v.ip))
if v_ip.is_loopback():
return value
def _multicast_query(v, value):
if v.is_multicast():
return value
def _net_query(v):
if v.size > 1:
if v.ip == v.network:
return str(v.network) + '/' + str(v.prefixlen)
def _netmask_query(v):
return str(v.netmask)
def _network_query(v):
'''Return the network of a given IP or subnet'''
if v.size > 1:
return str(v.network)
def _network_id_query(v):
'''Return the network of a given IP or subnet'''
return str(v.network)
def _network_netmask_query(v):
return str(v.network) + ' ' + str(v.netmask)
def _network_wildcard_query(v):
return str(v.network) + ' ' + str(v.hostmask)
def _next_usable_query(v, vtype):
if vtype == 'address':
"Does it make sense to raise an error"
raise errors.AnsibleFilterError('Not a network address')
elif vtype == 'network':
if v.size > 1:
first_usable, last_usable = _first_last(v)
next_ip = int(netaddr.IPAddress(int(v.ip) + 1))
if next_ip >= first_usable and next_ip <= last_usable:
return str(netaddr.IPAddress(int(v.ip) + 1))
def _prefix_query(v):
return int(v.prefixlen)
def _previous_usable_query(v, vtype):
if vtype == 'address':
"Does it make sense to raise an error"
raise errors.AnsibleFilterError('Not a network address')
elif vtype == 'network':
if v.size > 1:
first_usable, last_usable = _first_last(v)
previous_ip = int(netaddr.IPAddress(int(v.ip) - 1))
if previous_ip >= first_usable and previous_ip <= last_usable:
return str(netaddr.IPAddress(int(v.ip) - 1))
def _private_query(v, value):
if v.is_private():
return value
def _public_query(v, value):
v_ip = netaddr.IPAddress(str(v.ip))
if (v_ip.is_unicast() and not v_ip.is_private() and
not v_ip.is_loopback() and not v_ip.is_netmask() and
not v_ip.is_hostmask()):
return value
def _range_usable_query(v, vtype):
if vtype == 'address':
"Does it make sense to raise an error"
raise errors.AnsibleFilterError('Not a network address')
elif vtype == 'network':
if v.size > 1:
first_usable, last_usable = _first_last(v)
first_usable = str(netaddr.IPAddress(first_usable))
last_usable = str(netaddr.IPAddress(last_usable))
return "{0}-{1}".format(first_usable, last_usable)
def _revdns_query(v):
v_ip = netaddr.IPAddress(str(v.ip))
return v_ip.reverse_dns
def _size_query(v):
return v.size
def _size_usable_query(v):
if v.size == 1:
return 0
elif v.size == 2:
return 2
return v.size - 2
def _subnet_query(v):
return str(v.cidr)
def _type_query(v):
if v.size == 1:
return 'address'
if v.size > 1:
if v.ip != v.network:
return 'address'
else:
return 'network'
def _unicast_query(v, value):
if v.is_unicast():
return value
def _version_query(v):
return v.version
def _wrap_query(v, vtype, value):
if v.version == 6:
if vtype == 'address':
return '[' + str(v.ip) + ']'
elif vtype == 'network':
return '[' + str(v.ip) + ']/' + str(v.prefixlen)
else:
return value
# ---- HWaddr query helpers ----
def _bare_query(v):
v.dialect = netaddr.mac_bare
return str(v)
def _bool_hwaddr_query(v):
if v:
return True
def _int_hwaddr_query(v):
return int(v)
def _cisco_query(v):
v.dialect = netaddr.mac_cisco
return str(v)
def _empty_hwaddr_query(v, value):
if v:
return value
def _linux_query(v):
v.dialect = mac_linux
return str(v)
def _postgresql_query(v):
v.dialect = netaddr.mac_pgsql
return str(v)
def _unix_query(v):
v.dialect = netaddr.mac_unix
return str(v)
def _win_query(v):
v.dialect = netaddr.mac_eui48
return str(v)
# ---- IP address and network filters ----
def ipaddr(value, query='', version=False, alias='ipaddr'):
''' Check if string is an IP address or network and filter it '''
query_func_extra_args = {
'': ('vtype',),
'6to4': ('vtype', 'value'),
'cidr_lookup': ('iplist', 'value'),
'first_usable': ('vtype',),
'int': ('vtype',),
'ipv4': ('value',),
'ipv6': ('value',),
'last_usable': ('vtype',),
'link-local': ('value',),
'loopback': ('value',),
'lo': ('value',),
'multicast': ('value',),
'next_usable': ('vtype',),
'previous_usable': ('vtype',),
'private': ('value',),
'public': ('value',),
'unicast': ('value',),
'range_usable': ('vtype',),
'wrap': ('vtype', 'value'),
}
query_func_map = {
'': _empty_ipaddr_query,
'6to4': _6to4_query,
'address': _ip_query,
'address/prefix': _address_prefix_query, # deprecate
'bool': _bool_ipaddr_query,
'broadcast': _broadcast_query,
'cidr': _cidr_query,
'cidr_lookup': _cidr_lookup_query,
'first_usable': _first_usable_query,
'gateway': _gateway_query, # deprecate
'gw': _gateway_query, # deprecate
'host': _host_query,
'host/prefix': _address_prefix_query, # deprecate
'hostmask': _hostmask_query,
'hostnet': _gateway_query, # deprecate
'int': _int_query,
'ip': _ip_query,
'ip/prefix': _ip_prefix_query,
'ip_netmask': _ip_netmask_query,
# 'ip_wildcard': _ip_wildcard_query, built then could not think of use case
'ipv4': _ipv4_query,
'ipv6': _ipv6_query,
'last_usable': _last_usable_query,
'link-local': _link_local_query,
'lo': _loopback_query,
'loopback': _loopback_query,
'multicast': _multicast_query,
'net': _net_query,
'next_usable': _next_usable_query,
'netmask': _netmask_query,
'network': _network_query,
'network_id': _network_id_query,
'network/prefix': _subnet_query,
'network_netmask': _network_netmask_query,
'network_wildcard': _network_wildcard_query,
'prefix': _prefix_query,
'previous_usable': _previous_usable_query,
'private': _private_query,
'public': _public_query,
'range_usable': _range_usable_query,
'revdns': _revdns_query,
'router': _gateway_query, # deprecate
'size': _size_query,
'size_usable': _size_usable_query,
'subnet': _subnet_query,
'type': _type_query,
'unicast': _unicast_query,
'v4': _ipv4_query,
'v6': _ipv6_query,
'version': _version_query,
'wildcard': _hostmask_query,
'wrap': _wrap_query,
}
vtype = None
if not value:
return False
elif value is True:
return False
# Check if value is a list and parse each element
elif isinstance(value, (list, tuple, types.GeneratorType)):
_ret = []
for element in value:
if ipaddr(element, str(query), version):
_ret.append(ipaddr(element, str(query), version))
if _ret:
return _ret
else:
return list()
# Check if value is a number and convert it to an IP address
elif str(value).isdigit():
# We don't know what IP version to assume, so let's check IPv4 first,
# then IPv6
try:
if ((not version) or (version and version == 4)):
v = netaddr.IPNetwork('0.0.0.0/0')
v.value = int(value)
v.prefixlen = 32
elif version and version == 6:
v = netaddr.IPNetwork('::/0')
v.value = int(value)
v.prefixlen = 128
# IPv4 didn't work the first time, so it definitely has to be IPv6
except:
try:
v = netaddr.IPNetwork('::/0')
v.value = int(value)
v.prefixlen = 128
# The value is too big for IPv6. Are you a nanobot?
except:
return False
# We got an IP address, let's mark it as such
value = str(v)
vtype = 'address'
# value has not been recognized, check if it's a valid IP string
else:
try:
v = netaddr.IPNetwork(value)
# value is a valid IP string, check if user specified
# CIDR prefix or just an IP address, this will indicate default
# output format
try:
address, prefix = value.split('/')
vtype = 'network'
except:
vtype = 'address'
# value hasn't been recognized, maybe it's a numerical CIDR?
except:
try:
address, prefix = value.split('/')
address.isdigit()
address = int(address)
prefix.isdigit()
prefix = int(prefix)
# It's not numerical CIDR, give up
except:
return False
# It is something, so let's try and build a CIDR from the parts
try:
v = netaddr.IPNetwork('0.0.0.0/0')
v.value = address
v.prefixlen = prefix
# It's not a valid IPv4 CIDR
except:
try:
v = netaddr.IPNetwork('::/0')
v.value = address
v.prefixlen = prefix
# It's not a valid IPv6 CIDR. Give up.
except:
return False
# We have a valid CIDR, so let's write it in correct format
value = str(v)
vtype = 'network'
# We have a query string but it's not in the known query types. Check if
# that string is a valid subnet, if so, we can check later if given IP
# address/network is inside that specific subnet
try:
# ?? 6to4 and link-local were True here before. Should they still?
if query and (query not in query_func_map or query == 'cidr_lookup') and ipaddr(query, 'network'):
iplist = netaddr.IPSet([netaddr.IPNetwork(query)])
query = 'cidr_lookup'
except:
pass
# This code checks if value maches the IP version the user wants, ie. if
# it's any version ("ipaddr()"), IPv4 ("ipv4()") or IPv6 ("ipv6()")
# If version does not match, return False
if version and v.version != version:
return False
extras = []
for arg in query_func_extra_args.get(query, tuple()):
extras.append(locals()[arg])
try:
return query_func_map[query](v, *extras)
except KeyError:
try:
float(query)
if v.size == 1:
if vtype == 'address':
return str(v.ip)
elif vtype == 'network':
return str(v)
elif v.size > 1:
try:
return str(v[query]) + '/' + str(v.prefixlen)
except:
return False
else:
return value
except:
raise errors.AnsibleFilterError(alias + ': unknown filter type: %s' % query)
return False
def ipwrap(value, query=''):
try:
if isinstance(value, (list, tuple, types.GeneratorType)):
_ret = []
for element in value:
if ipaddr(element, query, version=False, alias='ipwrap'):
_ret.append(ipaddr(element, 'wrap'))
else:
_ret.append(element)
return _ret
else:
_ret = ipaddr(value, query, version=False, alias='ipwrap')
if _ret:
return ipaddr(_ret, 'wrap')
else:
return value
except:
return value
def ipv4(value, query=''):
return ipaddr(value, query, version=4, alias='ipv4')
def ipv6(value, query=''):
return ipaddr(value, query, version=6, alias='ipv6')
# Split given subnet into smaller subnets or find out the biggest subnet of
# a given IP address with given CIDR prefix
# Usage:
#
# - address or address/prefix | ipsubnet
# returns CIDR subnet of a given input
#
# - address/prefix | ipsubnet(cidr)
# returns number of possible subnets for given CIDR prefix
#
# - address/prefix | ipsubnet(cidr, index)
# returns new subnet with given CIDR prefix
#
# - address | ipsubnet(cidr)
# returns biggest subnet with given CIDR prefix that address belongs to
#
# - address | ipsubnet(cidr, index)
# returns next indexed subnet which contains given address
def ipsubnet(value, query='', index='x'):
''' Manipulate IPv4/IPv6 subnets '''
try:
vtype = ipaddr(value, 'type')
if vtype == 'address':
v = ipaddr(value, 'cidr')
elif vtype == 'network':
v = ipaddr(value, 'subnet')
value = netaddr.IPNetwork(v)
except:
return False
if not query:
return str(value)
elif str(query).isdigit():
vsize = ipaddr(v, 'size')
query = int(query)
try:
float(index)
index = int(index)
if vsize > 1:
try:
return str(list(value.subnet(query))[index])
except:
return False
elif vsize == 1:
try:
return str(value.supernet(query)[index])
except:
return False
except:
if vsize > 1:
try:
return str(len(list(value.subnet(query))))
except:
return False
elif vsize == 1:
try:
return str(value.supernet(query)[0])
except:
return False
return False
# Returns the nth host within a network described by value.
# Usage:
#
# - address or address/prefix | nthhost(nth)
# returns the nth host within the given network
def nthhost(value, query=''):
''' Get the nth host within a given network '''
try:
vtype = ipaddr(value, 'type')
if vtype == 'address':
v = ipaddr(value, 'cidr')
elif vtype == 'network':
v = ipaddr(value, 'subnet')
value = netaddr.IPNetwork(v)
except:
return False
if not query:
return False
try:
nth = int(query)
if value.size > nth:
return value[nth]
except ValueError:
return False
return False
# Returns the next nth usable ip within a network described by value.
def next_nth_usable(value, offset):
try:
vtype = ipaddr(value, 'type')
if vtype == 'address':
v = ipaddr(value, 'cidr')
elif vtype == 'network':
v = ipaddr(value, 'subnet')
v = netaddr.IPNetwork(v)
except:
return False
if type(offset) != int:
raise errors.AnsibleFilterError('Must pass in an integer')
if v.size > 1:
first_usable, last_usable = _first_last(v)
nth_ip = int(netaddr.IPAddress(int(v.ip) + offset))
if nth_ip >= first_usable and nth_ip <= last_usable:
return str(netaddr.IPAddress(int(v.ip) + offset))
# Returns the previous nth usable ip within a network described by value.
def previous_nth_usable(value, offset):
try:
vtype = ipaddr(value, 'type')
if vtype == 'address':
v = ipaddr(value, 'cidr')
elif vtype == 'network':
v = ipaddr(value, 'subnet')
v = netaddr.IPNetwork(v)
except:
return False
if type(offset) != int:
raise errors.AnsibleFilterError('Must pass in an integer')
if v.size > 1:
first_usable, last_usable = _first_last(v)
nth_ip = int(netaddr.IPAddress(int(v.ip) - offset))
if nth_ip >= first_usable and nth_ip <= last_usable:
return str(netaddr.IPAddress(int(v.ip) - offset))
def _range_checker(ip_check, first, last):
'''
Tests whether an ip address is within the bounds of the first and last address.
:param ip_check: The ip to test if it is within first and last.
:param first: The first IP in the range to test against.
:param last: The last IP in the range to test against.
:return: bool
'''
if ip_check >= first and ip_check <= last:
return True
else:
return False
def _address_normalizer(value):
'''
Used to validate an address or network type and return it in a consistent format.
This is being used for future use cases not currently available such as an address range.
:param value: The string representation of an address or network.
:return: The address or network in the normalized form.
'''
try:
vtype = ipaddr(value, 'type')
if vtype == 'address' or vtype == "network":
v = ipaddr(value, 'subnet')
except:
return False
return v
def network_in_usable(value, test):
'''
Checks whether 'test' is a useable address or addresses in 'value'
:param: value: The string representation of an address or network to test against.
:param test: The string representation of an address or network to validate if it is within the range of 'value'.
:return: bool
'''
# normalize value and test variables into an ipaddr
v = _address_normalizer(value)
w = _address_normalizer(test)
# get first and last addresses as integers to compare value and test; or cathes value when case is /32
v_first = ipaddr(ipaddr(v, 'first_usable') or ipaddr(v, 'address'), 'int')
v_last = ipaddr(ipaddr(v, 'last_usable') or ipaddr(v, 'address'), 'int')
w_first = ipaddr(ipaddr(w, 'network') or ipaddr(w, 'address'), 'int')
w_last = ipaddr(ipaddr(w, 'broadcast') or ipaddr(w, 'address'), 'int')
if _range_checker(w_first, v_first, v_last) and _range_checker(w_last, v_first, v_last):
return True
else:
return False
def network_in_network(value, test):
'''
Checks whether the 'test' address or addresses are in 'value', including broadcast and network
:param: value: The network address or range to test against.
:param test: The address or network to validate if it is within the range of 'value'.
:return: bool
'''
# normalize value and test variables into an ipaddr
v = _address_normalizer(value)
w = _address_normalizer(test)
# get first and last addresses as integers to compare value and test; or cathes value when case is /32
v_first = ipaddr(ipaddr(v, 'network') or ipaddr(v, 'address'), 'int')
v_last = ipaddr(ipaddr(v, 'broadcast') or ipaddr(v, 'address'), 'int')
w_first = ipaddr(ipaddr(w, 'network') or ipaddr(w, 'address'), 'int')
w_last = ipaddr(ipaddr(w, 'broadcast') or ipaddr(w, 'address'), 'int')
if _range_checker(w_first, v_first, v_last) and _range_checker(w_last, v_first, v_last):
return True
else:
return False
def reduce_on_network(value, network):
'''
Reduces a list of addresses to only the addresses that match a given network.
:param: value: The list of addresses to filter on.
:param: network: The network to validate against.
:return: The reduced list of addresses.
'''
# normalize network variable into an ipaddr
n = _address_normalizer(network)
# get first and last addresses as integers to compare value and test; or cathes value when case is /32
n_first = ipaddr(ipaddr(n, 'network') or ipaddr(n, 'address'), 'int')
n_last = ipaddr(ipaddr(n, 'broadcast') or ipaddr(n, 'address'), 'int')
# create an empty list to fill and return
r = []
for address in value:
# normalize address variables into an ipaddr
a = _address_normalizer(address)
# get first and last addresses as integers to compare value and test; or cathes value when case is /32
a_first = ipaddr(ipaddr(a, 'network') or ipaddr(a, 'address'), 'int')
a_last = ipaddr(ipaddr(a, 'broadcast') or ipaddr(a, 'address'), 'int')
if _range_checker(a_first, n_first, n_last) and _range_checker(a_last, n_first, n_last):
r.append(address)
return r
# Returns the SLAAC address within a network for a given HW/MAC address.
# Usage:
#
# - prefix | slaac(mac)
def slaac(value, query=''):
''' Get the SLAAC address within given network '''
try:
vtype = ipaddr(value, 'type')
if vtype == 'address':
v = ipaddr(value, 'cidr')
elif vtype == 'network':
v = ipaddr(value, 'subnet')
if ipaddr(value, 'version') != 6:
return False
value = netaddr.IPNetwork(v)
except:
return False
if not query:
return False
try:
mac = hwaddr(query, alias='slaac')
eui = netaddr.EUI(mac)
except:
return False
return eui.ipv6(value.network)
# ---- HWaddr / MAC address filters ----
def hwaddr(value, query='', alias='hwaddr'):
''' Check if string is a HW/MAC address and filter it '''
query_func_extra_args = {
'': ('value',),
}
query_func_map = {
'': _empty_hwaddr_query,
'bare': _bare_query,
'bool': _bool_hwaddr_query,
'int': _int_hwaddr_query,
'cisco': _cisco_query,
'eui48': _win_query,
'linux': _linux_query,
'pgsql': _postgresql_query,
'postgresql': _postgresql_query,
'psql': _postgresql_query,
'unix': _unix_query,
'win': _win_query,
}
try:
v = netaddr.EUI(value)
except:
if query and query != 'bool':
raise errors.AnsibleFilterError(alias + ': not a hardware address: %s' % value)
extras = []
for arg in query_func_extra_args.get(query, tuple()):
extras.append(locals()[arg])
try:
return query_func_map[query](v, *extras)
except KeyError:
raise errors.AnsibleFilterError(alias + ': unknown filter type: %s' % query)
return False
def macaddr(value, query=''):
return hwaddr(value, query, alias='macaddr')
def _need_netaddr(f_name, *args, **kwargs):
raise errors.AnsibleFilterError('The %s filter requires python-netaddr be '
'installed on the ansible controller' % f_name)
def ip4_hex(arg, delimiter=''):
''' Convert an IPv4 address to Hexadecimal notation '''
numbers = list(map(int, arg.split('.')))
return '{0:02x}{sep}{1:02x}{sep}{2:02x}{sep}{3:02x}'.format(*numbers, sep=delimiter)
# ---- Ansible filters ----
class FilterModule(object):
''' IP address and network manipulation filters '''
filter_map = {
# IP addresses and networks
'ipaddr': ipaddr,
'ipwrap': ipwrap,
'ip4_hex': ip4_hex,
'ipv4': ipv4,
'ipv6': ipv6,
'ipsubnet': ipsubnet,
'next_nth_usable': next_nth_usable,
'network_in_network': network_in_network,
'network_in_usable': network_in_usable,
'reduce_on_network': reduce_on_network,
'nthhost': nthhost,
'previous_nth_usable': previous_nth_usable,
'slaac': slaac,
# MAC / HW addresses
'hwaddr': hwaddr,
'macaddr': macaddr
}
def filters(self):
if netaddr:
return self.filter_map
else:
# Need to install python-netaddr for these filters to work
return dict((f, partial(_need_netaddr, f)) for f in self.filter_map)
| gpl-3.0 |
varunnaganathan/django | tests/admin_widgets/tests.py | 2 | 61214 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import gettext
import os
from datetime import datetime, timedelta
from importlib import import_module
from unittest import skipIf
from django import forms
from django.conf import settings
from django.contrib import admin
from django.contrib.admin import widgets
from django.contrib.admin.tests import AdminSeleniumWebDriverTestCase
from django.contrib.auth.models import User
from django.core.files.storage import default_storage
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db.models import CharField, DateField
from django.test import SimpleTestCase, TestCase, override_settings
from django.urls import reverse
from django.utils import six, translation
from . import models
from .widgetadmin import site as widget_admin_site
try:
import pytz
except ImportError:
pytz = None
class TestDataMixin(object):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
pk=100, username='super', first_name='Super', last_name='User', email='[email protected]',
password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158', is_active=True, is_superuser=True,
is_staff=True, last_login=datetime(2007, 5, 30, 13, 20, 10),
date_joined=datetime(2007, 5, 30, 13, 20, 10)
)
cls.u2 = User.objects.create(
pk=101, username='testser', first_name='Add', last_name='User', email='[email protected]',
password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158', is_active=True, is_superuser=False,
is_staff=True, last_login=datetime(2007, 5, 30, 13, 20, 10),
date_joined=datetime(2007, 5, 30, 13, 20, 10)
)
models.Car.objects.create(id=1, owner=cls.u1, make='Volkswagen', model='Passat')
models.Car.objects.create(id=2, owner=cls.u2, make='BMW', model='M3')
class SeleniumDataMixin(object):
def setUp(self):
self.u1 = User.objects.create(
pk=100, username='super', first_name='Super', last_name='User', email='[email protected]',
password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158', is_active=True, is_superuser=True,
is_staff=True, last_login=datetime(2007, 5, 30, 13, 20, 10),
date_joined=datetime(2007, 5, 30, 13, 20, 10)
)
class AdminFormfieldForDBFieldTests(SimpleTestCase):
"""
Tests for correct behavior of ModelAdmin.formfield_for_dbfield
"""
def assertFormfield(self, model, fieldname, widgetclass, **admin_overrides):
"""
Helper to call formfield_for_dbfield for a given model and field name
and verify that the returned formfield is appropriate.
"""
# Override any settings on the model admin
class MyModelAdmin(admin.ModelAdmin):
pass
for k in admin_overrides:
setattr(MyModelAdmin, k, admin_overrides[k])
# Construct the admin, and ask it for a formfield
ma = MyModelAdmin(model, admin.site)
ff = ma.formfield_for_dbfield(model._meta.get_field(fieldname), request=None)
# "unwrap" the widget wrapper, if needed
if isinstance(ff.widget, widgets.RelatedFieldWidgetWrapper):
widget = ff.widget.widget
else:
widget = ff.widget
# Check that we got a field of the right type
self.assertTrue(
isinstance(widget, widgetclass),
"Wrong widget for %s.%s: expected %s, got %s" % (
model.__class__.__name__,
fieldname,
widgetclass,
type(widget),
)
)
# Return the formfield so that other tests can continue
return ff
def test_DateField(self):
self.assertFormfield(models.Event, 'start_date', widgets.AdminDateWidget)
def test_DateTimeField(self):
self.assertFormfield(models.Member, 'birthdate', widgets.AdminSplitDateTime)
def test_TimeField(self):
self.assertFormfield(models.Event, 'start_time', widgets.AdminTimeWidget)
def test_TextField(self):
self.assertFormfield(models.Event, 'description', widgets.AdminTextareaWidget)
def test_URLField(self):
self.assertFormfield(models.Event, 'link', widgets.AdminURLFieldWidget)
def test_IntegerField(self):
self.assertFormfield(models.Event, 'min_age', widgets.AdminIntegerFieldWidget)
def test_CharField(self):
self.assertFormfield(models.Member, 'name', widgets.AdminTextInputWidget)
def test_EmailField(self):
self.assertFormfield(models.Member, 'email', widgets.AdminEmailInputWidget)
def test_FileField(self):
self.assertFormfield(models.Album, 'cover_art', widgets.AdminFileWidget)
def test_ForeignKey(self):
self.assertFormfield(models.Event, 'main_band', forms.Select)
def test_raw_id_ForeignKey(self):
self.assertFormfield(models.Event, 'main_band', widgets.ForeignKeyRawIdWidget,
raw_id_fields=['main_band'])
def test_radio_fields_ForeignKey(self):
ff = self.assertFormfield(models.Event, 'main_band', widgets.AdminRadioSelect,
radio_fields={'main_band': admin.VERTICAL})
self.assertEqual(ff.empty_label, None)
def test_many_to_many(self):
self.assertFormfield(models.Band, 'members', forms.SelectMultiple)
def test_raw_id_many_to_many(self):
self.assertFormfield(models.Band, 'members', widgets.ManyToManyRawIdWidget,
raw_id_fields=['members'])
def test_filtered_many_to_many(self):
self.assertFormfield(models.Band, 'members', widgets.FilteredSelectMultiple,
filter_vertical=['members'])
def test_formfield_overrides(self):
self.assertFormfield(models.Event, 'start_date', forms.TextInput,
formfield_overrides={DateField: {'widget': forms.TextInput}})
def test_formfield_overrides_widget_instances(self):
"""
Test that widget instances in formfield_overrides are not shared between
different fields. (#19423)
"""
class BandAdmin(admin.ModelAdmin):
formfield_overrides = {
CharField: {'widget': forms.TextInput(attrs={'size': '10'})}
}
ma = BandAdmin(models.Band, admin.site)
f1 = ma.formfield_for_dbfield(models.Band._meta.get_field('name'), request=None)
f2 = ma.formfield_for_dbfield(models.Band._meta.get_field('style'), request=None)
self.assertNotEqual(f1.widget, f2.widget)
self.assertEqual(f1.widget.attrs['maxlength'], '100')
self.assertEqual(f2.widget.attrs['maxlength'], '20')
self.assertEqual(f2.widget.attrs['size'], '10')
def test_field_with_choices(self):
self.assertFormfield(models.Member, 'gender', forms.Select)
def test_choices_with_radio_fields(self):
self.assertFormfield(models.Member, 'gender', widgets.AdminRadioSelect,
radio_fields={'gender': admin.VERTICAL})
def test_inheritance(self):
self.assertFormfield(models.Album, 'backside_art', widgets.AdminFileWidget)
def test_m2m_widgets(self):
"""m2m fields help text as it applies to admin app (#9321)."""
class AdvisorAdmin(admin.ModelAdmin):
filter_vertical = ['companies']
self.assertFormfield(models.Advisor, 'companies', widgets.FilteredSelectMultiple,
filter_vertical=['companies'])
ma = AdvisorAdmin(models.Advisor, admin.site)
f = ma.formfield_for_dbfield(models.Advisor._meta.get_field('companies'), request=None)
self.assertEqual(
six.text_type(f.help_text),
'Hold down "Control", or "Command" on a Mac, to select more than one.'
)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='admin_widgets.urls')
class AdminFormfieldForDBFieldWithRequestTests(TestDataMixin, TestCase):
def test_filter_choices_by_request_user(self):
"""
Ensure the user can only see their own cars in the foreign key dropdown.
"""
self.client.login(username="super", password="secret")
response = self.client.get(reverse('admin:admin_widgets_cartire_add'))
self.assertNotContains(response, "BMW M3")
self.assertContains(response, "Volkswagen Passat")
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='admin_widgets.urls')
class AdminForeignKeyWidgetChangeList(TestDataMixin, TestCase):
def setUp(self):
self.client.login(username="super", password="secret")
def test_changelist_ForeignKey(self):
response = self.client.get(reverse('admin:admin_widgets_car_changelist'))
self.assertContains(response, '/auth/user/add/')
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='admin_widgets.urls')
class AdminForeignKeyRawIdWidget(TestDataMixin, TestCase):
def setUp(self):
self.client.login(username="super", password="secret")
def test_nonexistent_target_id(self):
band = models.Band.objects.create(name='Bogey Blues')
pk = band.pk
band.delete()
post_data = {
"main_band": '%s' % pk,
}
# Try posting with a non-existent pk in a raw id field: this
# should result in an error message, not a server exception.
response = self.client.post(reverse('admin:admin_widgets_event_add'), post_data)
self.assertContains(response,
'Select a valid choice. That choice is not one of the available choices.')
def test_invalid_target_id(self):
for test_str in ('Iñtërnâtiônàlizætiøn', "1234'", -1234):
# This should result in an error message, not a server exception.
response = self.client.post(reverse('admin:admin_widgets_event_add'),
{"main_band": test_str})
self.assertContains(response,
'Select a valid choice. That choice is not one of the available choices.')
def test_url_params_from_lookup_dict_any_iterable(self):
lookup1 = widgets.url_params_from_lookup_dict({'color__in': ('red', 'blue')})
lookup2 = widgets.url_params_from_lookup_dict({'color__in': ['red', 'blue']})
self.assertEqual(lookup1, {'color__in': 'red,blue'})
self.assertEqual(lookup1, lookup2)
def test_url_params_from_lookup_dict_callable(self):
def my_callable():
return 'works'
lookup1 = widgets.url_params_from_lookup_dict({'myfield': my_callable})
lookup2 = widgets.url_params_from_lookup_dict({'myfield': my_callable()})
self.assertEqual(lookup1, lookup2)
class FilteredSelectMultipleWidgetTest(SimpleTestCase):
def test_render(self):
# Backslash in verbose_name to ensure it is JavaScript escaped.
w = widgets.FilteredSelectMultiple('test\\', False)
self.assertHTMLEqual(
w.render('test', 'test'),
'<select multiple="multiple" name="test" class="selectfilter" '
'data-field-name="test\\" data-is-stacked="0">\n</select>'
)
def test_stacked_render(self):
# Backslash in verbose_name to ensure it is JavaScript escaped.
w = widgets.FilteredSelectMultiple('test\\', True)
self.assertHTMLEqual(
w.render('test', 'test'),
'<select multiple="multiple" name="test" class="selectfilterstacked" '
'data-field-name="test\\" data-is-stacked="1">\n</select>'
)
class AdminDateWidgetTest(SimpleTestCase):
def test_attrs(self):
"""
Ensure that user-supplied attrs are used.
Refs #12073.
"""
w = widgets.AdminDateWidget()
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<input value="2007-12-01" type="text" class="vDateField" name="test" size="10" />',
)
# pass attrs to widget
w = widgets.AdminDateWidget(attrs={'size': 20, 'class': 'myDateField'})
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<input value="2007-12-01" type="text" class="myDateField" name="test" size="20" />',
)
class AdminTimeWidgetTest(SimpleTestCase):
def test_attrs(self):
"""
Ensure that user-supplied attrs are used.
Refs #12073.
"""
w = widgets.AdminTimeWidget()
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<input value="09:30:00" type="text" class="vTimeField" name="test" size="8" />',
)
# pass attrs to widget
w = widgets.AdminTimeWidget(attrs={'size': 20, 'class': 'myTimeField'})
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<input value="09:30:00" type="text" class="myTimeField" name="test" size="20" />',
)
class AdminSplitDateTimeWidgetTest(SimpleTestCase):
def test_render(self):
w = widgets.AdminSplitDateTime()
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<p class="datetime">'
'Date: <input value="2007-12-01" type="text" class="vDateField" '
'name="test_0" size="10" /><br />'
'Time: <input value="09:30:00" type="text" class="vTimeField" '
'name="test_1" size="8" /></p>'
)
def test_localization(self):
w = widgets.AdminSplitDateTime()
with self.settings(USE_L10N=True), translation.override('de-at'):
w.is_localized = True
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<p class="datetime">'
'Datum: <input value="01.12.2007" type="text" '
'class="vDateField" name="test_0"size="10" /><br />'
'Zeit: <input value="09:30:00" type="text" class="vTimeField" '
'name="test_1" size="8" /></p>'
)
class AdminURLWidgetTest(SimpleTestCase):
def test_render(self):
w = widgets.AdminURLFieldWidget()
self.assertHTMLEqual(
w.render('test', ''),
'<input class="vURLField" name="test" type="url" />'
)
self.assertHTMLEqual(
w.render('test', 'http://example.com'),
'<p class="url">Currently:<a href="http://example.com">'
'http://example.com</a><br />'
'Change:<input class="vURLField" name="test" type="url" '
'value="http://example.com" /></p>'
)
def test_render_idn(self):
w = widgets.AdminURLFieldWidget()
self.assertHTMLEqual(
w.render('test', 'http://example-äüö.com'),
'<p class="url">Currently: <a href="http://xn--example--7za4pnc.com">'
'http://example-äüö.com</a><br />'
'Change:<input class="vURLField" name="test" type="url" '
'value="http://example-äüö.com" /></p>'
)
def test_render_quoting(self):
# WARNING: Don't use assertHTMLEqual in that testcase!
# assertHTMLEqual will get rid of some escapes which are tested here!
w = widgets.AdminURLFieldWidget()
self.assertEqual(
w.render('test', 'http://example.com/<sometag>some text</sometag>'),
'<p class="url">Currently: '
'<a href="http://example.com/%3Csometag%3Esome%20text%3C/sometag%3E">'
'http://example.com/<sometag>some text</sometag></a><br />'
'Change: <input class="vURLField" name="test" type="url" '
'value="http://example.com/<sometag>some text</sometag>" /></p>'
)
self.assertEqual(
w.render('test', 'http://example-äüö.com/<sometag>some text</sometag>'),
'<p class="url">Currently: '
'<a href="http://xn--example--7za4pnc.com/%3Csometag%3Esome%20text%3C/sometag%3E">'
'http://example-äüö.com/<sometag>some text</sometag></a><br />'
'Change: <input class="vURLField" name="test" type="url" '
'value="http://example-äüö.com/<sometag>some text</sometag>" /></p>'
)
self.assertEqual(
w.render('test', 'http://www.example.com/%C3%A4"><script>alert("XSS!")</script>"'),
'<p class="url">Currently: '
'<a href="http://www.example.com/%C3%A4%22%3E%3Cscript%3Ealert(%22XSS!%22)%3C/script%3E%22">'
'http://www.example.com/%C3%A4"><script>'
'alert("XSS!")</script>"</a><br />'
'Change: <input class="vURLField" name="test" type="url" '
'value="http://www.example.com/%C3%A4"><script>'
'alert("XSS!")</script>"" /></p>'
)
@override_settings(
PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='admin_widgets.urls',
)
class AdminFileWidgetTests(TestDataMixin, TestCase):
@classmethod
def setUpTestData(cls):
super(AdminFileWidgetTests, cls).setUpTestData()
band = models.Band.objects.create(name='Linkin Park')
cls.album = band.album_set.create(
name='Hybrid Theory', cover_art=r'albums\hybrid_theory.jpg'
)
def test_render(self):
w = widgets.AdminFileWidget()
self.assertHTMLEqual(
w.render('test', self.album.cover_art),
'<p class="file-upload">Currently: <a href="%(STORAGE_URL)salbums/'
'hybrid_theory.jpg">albums\hybrid_theory.jpg</a> '
'<span class="clearable-file-input">'
'<input type="checkbox" name="test-clear" id="test-clear_id" /> '
'<label for="test-clear_id">Clear</label></span><br />'
'Change: <input type="file" name="test" /></p>' % {
'STORAGE_URL': default_storage.url(''),
},
)
self.assertHTMLEqual(
w.render('test', SimpleUploadedFile('test', b'content')),
'<input type="file" name="test" />',
)
def test_readonly_fields(self):
"""
File widgets should render as a link when they're marked "read only."
"""
self.client.login(username="super", password="secret")
response = self.client.get(reverse('admin:admin_widgets_album_change', args=(self.album.id,)))
self.assertContains(
response,
'<p><a href="%(STORAGE_URL)salbums/hybrid_theory.jpg">'
'albums\hybrid_theory.jpg</a></p>' % {'STORAGE_URL': default_storage.url('')},
html=True,
)
self.assertNotContains(
response,
'<input type="file" name="cover_art" id="id_cover_art" />',
html=True,
)
response = self.client.get(reverse('admin:admin_widgets_album_add'))
self.assertContains(
response,
'<p></p>',
html=True,
)
@override_settings(ROOT_URLCONF='admin_widgets.urls')
class ForeignKeyRawIdWidgetTest(TestCase):
def test_render(self):
band = models.Band.objects.create(name='Linkin Park')
band.album_set.create(
name='Hybrid Theory', cover_art=r'albums\hybrid_theory.jpg'
)
rel = models.Album._meta.get_field('band').remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('test', band.pk, attrs={}),
'<input type="text" name="test" value="%(bandpk)s" '
'class="vForeignKeyRawIdAdminField" />'
'<a href="/admin_widgets/band/?_to_field=id" class="related-lookup" '
'id="lookup_id_test" title="Lookup"></a> <strong>'
'<a href="/admin_widgets/band/%(bandpk)s/change/">Linkin Park</a>'
'</strong>' % {'bandpk': band.pk}
)
def test_relations_to_non_primary_key(self):
# Check that ForeignKeyRawIdWidget works with fields which aren't
# related to the model's primary key.
apple = models.Inventory.objects.create(barcode=86, name='Apple')
models.Inventory.objects.create(barcode=22, name='Pear')
core = models.Inventory.objects.create(
barcode=87, name='Core', parent=apple
)
rel = models.Inventory._meta.get_field('parent').remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('test', core.parent_id, attrs={}),
'<input type="text" name="test" value="86" '
'class="vForeignKeyRawIdAdminField" />'
'<a href="/admin_widgets/inventory/?_to_field=barcode" '
'class="related-lookup" id="lookup_id_test" title="Lookup"></a>'
' <strong><a href="/admin_widgets/inventory/%(pk)s/change/">'
'Apple</a></strong>' % {'pk': apple.pk}
)
def test_fk_related_model_not_in_admin(self):
# FK to a model not registered with admin site. Raw ID widget should
# have no magnifying glass link. See #16542
big_honeycomb = models.Honeycomb.objects.create(location='Old tree')
big_honeycomb.bee_set.create()
rel = models.Bee._meta.get_field('honeycomb').remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('honeycomb_widget', big_honeycomb.pk, attrs={}),
'<input type="text" name="honeycomb_widget" value="%(hcombpk)s" />'
' <strong>Honeycomb object</strong>'
% {'hcombpk': big_honeycomb.pk}
)
def test_fk_to_self_model_not_in_admin(self):
# FK to self, not registered with admin site. Raw ID widget should have
# no magnifying glass link. See #16542
subject1 = models.Individual.objects.create(name='Subject #1')
models.Individual.objects.create(name='Child', parent=subject1)
rel = models.Individual._meta.get_field('parent').remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('individual_widget', subject1.pk, attrs={}),
'<input type="text" name="individual_widget" value="%(subj1pk)s" />'
' <strong>Individual object</strong>'
% {'subj1pk': subject1.pk}
)
def test_proper_manager_for_label_lookup(self):
# see #9258
rel = models.Inventory._meta.get_field('parent').remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
hidden = models.Inventory.objects.create(
barcode=93, name='Hidden', hidden=True
)
child_of_hidden = models.Inventory.objects.create(
barcode=94, name='Child of hidden', parent=hidden
)
self.assertHTMLEqual(
w.render('test', child_of_hidden.parent_id, attrs={}),
'<input type="text" name="test" value="93" class="vForeignKeyRawIdAdminField" />'
'<a href="/admin_widgets/inventory/?_to_field=barcode" '
'class="related-lookup" id="lookup_id_test" title="Lookup"></a>'
' <strong><a href="/admin_widgets/inventory/%(pk)s/change/">'
'Hidden</a></strong>' % {'pk': hidden.pk}
)
@override_settings(ROOT_URLCONF='admin_widgets.urls')
class ManyToManyRawIdWidgetTest(TestCase):
def test_render(self):
band = models.Band.objects.create(name='Linkin Park')
m1 = models.Member.objects.create(name='Chester')
m2 = models.Member.objects.create(name='Mike')
band.members.add(m1, m2)
rel = models.Band._meta.get_field('members').remote_field
w = widgets.ManyToManyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('test', [m1.pk, m2.pk], attrs={}), (
'<input type="text" name="test" value="%(m1pk)s,%(m2pk)s" class="vManyToManyRawIdAdminField" />'
'<a href="/admin_widgets/member/" class="related-lookup" id="lookup_id_test" title="Lookup"></a>'
) % dict(m1pk=m1.pk, m2pk=m2.pk)
)
self.assertHTMLEqual(
w.render('test', [m1.pk]), (
'<input type="text" name="test" value="%(m1pk)s" class="vManyToManyRawIdAdminField">'
'<a href="/admin_widgets/member/" class="related-lookup" id="lookup_id_test" title="Lookup"></a>'
) % dict(m1pk=m1.pk)
)
def test_m2m_related_model_not_in_admin(self):
# M2M relationship with model not registered with admin site. Raw ID
# widget should have no magnifying glass link. See #16542
consultor1 = models.Advisor.objects.create(name='Rockstar Techie')
c1 = models.Company.objects.create(name='Doodle')
c2 = models.Company.objects.create(name='Pear')
consultor1.companies.add(c1, c2)
rel = models.Advisor._meta.get_field('companies').remote_field
w = widgets.ManyToManyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('company_widget1', [c1.pk, c2.pk], attrs={}),
'<input type="text" name="company_widget1" value="%(c1pk)s,%(c2pk)s" />' % {'c1pk': c1.pk, 'c2pk': c2.pk}
)
self.assertHTMLEqual(
w.render('company_widget2', [c1.pk]),
'<input type="text" name="company_widget2" value="%(c1pk)s" />' % {'c1pk': c1.pk}
)
class RelatedFieldWidgetWrapperTests(SimpleTestCase):
def test_no_can_add_related(self):
rel = models.Individual._meta.get_field('parent').remote_field
w = widgets.AdminRadioSelect()
# Used to fail with a name error.
w = widgets.RelatedFieldWidgetWrapper(w, rel, widget_admin_site)
self.assertFalse(w.can_add_related)
def test_select_multiple_widget_cant_change_delete_related(self):
rel = models.Individual._meta.get_field('parent').remote_field
widget = forms.SelectMultiple()
wrapper = widgets.RelatedFieldWidgetWrapper(
widget, rel, widget_admin_site,
can_add_related=True,
can_change_related=True,
can_delete_related=True,
)
self.assertTrue(wrapper.can_add_related)
self.assertFalse(wrapper.can_change_related)
self.assertFalse(wrapper.can_delete_related)
def test_on_delete_cascade_rel_cant_delete_related(self):
rel = models.Individual._meta.get_field('soulmate').remote_field
widget = forms.Select()
wrapper = widgets.RelatedFieldWidgetWrapper(
widget, rel, widget_admin_site,
can_add_related=True,
can_change_related=True,
can_delete_related=True,
)
self.assertTrue(wrapper.can_add_related)
self.assertTrue(wrapper.can_change_related)
self.assertFalse(wrapper.can_delete_related)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='admin_widgets.urls')
class DateTimePickerSeleniumFirefoxTests(SeleniumDataMixin, AdminSeleniumWebDriverTestCase):
available_apps = ['admin_widgets'] + AdminSeleniumWebDriverTestCase.available_apps
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
def test_show_hide_date_time_picker_widgets(self):
"""
Ensure that pressing the ESC key closes the date and time picker
widgets.
Refs #17064.
"""
from selenium.webdriver.common.keys import Keys
self.admin_login(username='super', password='secret', login_url='/')
# Open a page that has a date and time picker widgets
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:admin_widgets_member_add')))
# First, with the date picker widget ---------------------------------
# Check that the date picker is hidden
self.assertEqual(
self.get_css_value('#calendarbox0', 'display'), 'none')
# Click the calendar icon
self.selenium.find_element_by_id('calendarlink0').click()
# Check that the date picker is visible
self.assertEqual(
self.get_css_value('#calendarbox0', 'display'), 'block')
# Press the ESC key
self.selenium.find_element_by_tag_name('body').send_keys([Keys.ESCAPE])
# Check that the date picker is hidden again
self.assertEqual(
self.get_css_value('#calendarbox0', 'display'), 'none')
# Then, with the time picker widget ----------------------------------
# Check that the time picker is hidden
self.assertEqual(
self.get_css_value('#clockbox0', 'display'), 'none')
# Click the time icon
self.selenium.find_element_by_id('clocklink0').click()
# Check that the time picker is visible
self.assertEqual(
self.get_css_value('#clockbox0', 'display'), 'block')
self.assertEqual(
[
x.text for x in
self.selenium.find_elements_by_xpath("//ul[@class='timelist']/li/a")
],
['Now', 'Midnight', '6 a.m.', 'Noon', '6 p.m.']
)
# Press the ESC key
self.selenium.find_element_by_tag_name('body').send_keys([Keys.ESCAPE])
# Check that the time picker is hidden again
self.assertEqual(
self.get_css_value('#clockbox0', 'display'), 'none')
def test_calendar_nonday_class(self):
"""
Ensure cells that are not days of the month have the `nonday` CSS class.
Refs #4574.
"""
self.admin_login(username='super', password='secret', login_url='/')
# Open a page that has a date and time picker widgets
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:admin_widgets_member_add')))
# fill in the birth date.
self.selenium.find_element_by_id('id_birthdate_0').send_keys('2013-06-01')
# Click the calendar icon
self.selenium.find_element_by_id('calendarlink0').click()
# get all the tds within the calendar
calendar0 = self.selenium.find_element_by_id('calendarin0')
tds = calendar0.find_elements_by_tag_name('td')
# make sure the first and last 6 cells have class nonday
for td in tds[:6] + tds[-6:]:
self.assertEqual(td.get_attribute('class'), 'nonday')
def test_calendar_selected_class(self):
"""
Ensure cell for the day in the input has the `selected` CSS class.
Refs #4574.
"""
self.admin_login(username='super', password='secret', login_url='/')
# Open a page that has a date and time picker widgets
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:admin_widgets_member_add')))
# fill in the birth date.
self.selenium.find_element_by_id('id_birthdate_0').send_keys('2013-06-01')
# Click the calendar icon
self.selenium.find_element_by_id('calendarlink0').click()
# get all the tds within the calendar
calendar0 = self.selenium.find_element_by_id('calendarin0')
tds = calendar0.find_elements_by_tag_name('td')
# verify the selected cell
selected = tds[6]
self.assertEqual(selected.get_attribute('class'), 'selected')
self.assertEqual(selected.text, '1')
def test_calendar_no_selected_class(self):
"""
Ensure no cells are given the selected class when the field is empty.
Refs #4574.
"""
self.admin_login(username='super', password='secret', login_url='/')
# Open a page that has a date and time picker widgets
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:admin_widgets_member_add')))
# Click the calendar icon
self.selenium.find_element_by_id('calendarlink0').click()
# get all the tds within the calendar
calendar0 = self.selenium.find_element_by_id('calendarin0')
tds = calendar0.find_elements_by_tag_name('td')
# verify there are no cells with the selected class
selected = [td for td in tds if td.get_attribute('class') == 'selected']
self.assertEqual(len(selected), 0)
def test_calendar_show_date_from_input(self):
"""
Ensure that the calendar show the date from the input field for every
locale supported by django.
"""
self.admin_login(username='super', password='secret', login_url='/')
# Enter test data
member = models.Member.objects.create(name='Bob', birthdate=datetime(1984, 5, 15), gender='M')
# Get month name translations for every locale
month_string = 'May'
path = os.path.join(os.path.dirname(import_module('django.contrib.admin').__file__), 'locale')
for language_code, language_name in settings.LANGUAGES:
try:
catalog = gettext.translation('djangojs', path, [language_code])
except IOError:
continue
if month_string in catalog._catalog:
month_name = catalog._catalog[month_string]
else:
month_name = month_string
# Get the expected caption
may_translation = month_name
expected_caption = '{0:s} {1:d}'.format(may_translation.upper(), 1984)
# Test with every locale
with override_settings(LANGUAGE_CODE=language_code, USE_L10N=True):
# Open a page that has a date picker widget
self.selenium.get('{}{}'.format(self.live_server_url,
reverse('admin:admin_widgets_member_change', args=(member.pk,))))
# Click on the calendar icon
self.selenium.find_element_by_id('calendarlink0').click()
# Make sure that the right month and year are displayed
self.wait_for_text('#calendarin0 caption', expected_caption)
class DateTimePickerSeleniumChromeTests(DateTimePickerSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class DateTimePickerSeleniumIETests(DateTimePickerSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
@skipIf(pytz is None, "this test requires pytz")
@override_settings(TIME_ZONE='Asia/Singapore')
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='admin_widgets.urls')
class DateTimePickerShortcutsSeleniumFirefoxTests(SeleniumDataMixin, AdminSeleniumWebDriverTestCase):
available_apps = ['admin_widgets'] + AdminSeleniumWebDriverTestCase.available_apps
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
def test_date_time_picker_shortcuts(self):
"""
Ensure that date/time/datetime picker shortcuts work in the current time zone.
Refs #20663.
This test case is fairly tricky, it relies on selenium still running the browser
in the default time zone "America/Chicago" despite `override_settings` changing
the time zone to "Asia/Singapore".
"""
self.admin_login(username='super', password='secret', login_url='/')
error_margin = timedelta(seconds=10)
# If we are neighbouring a DST, we add an hour of error margin.
tz = pytz.timezone('America/Chicago')
utc_now = datetime.now(pytz.utc)
tz_yesterday = (utc_now - timedelta(days=1)).astimezone(tz).tzname()
tz_tomorrow = (utc_now + timedelta(days=1)).astimezone(tz).tzname()
if tz_yesterday != tz_tomorrow:
error_margin += timedelta(hours=1)
now = datetime.now()
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:admin_widgets_member_add')))
self.selenium.find_element_by_id('id_name').send_keys('test')
# Click on the "today" and "now" shortcuts.
shortcuts = self.selenium.find_elements_by_css_selector(
'.field-birthdate .datetimeshortcuts')
for shortcut in shortcuts:
shortcut.find_element_by_tag_name('a').click()
# Check that there is a time zone mismatch warning.
# Warning: This would effectively fail if the TIME_ZONE defined in the
# settings has the same UTC offset as "Asia/Singapore" because the
# mismatch warning would be rightfully missing from the page.
self.selenium.find_elements_by_css_selector(
'.field-birthdate .timezonewarning')
# Submit the form.
self.selenium.find_element_by_tag_name('form').submit()
self.wait_page_loaded()
# Make sure that "now" in javascript is within 10 seconds
# from "now" on the server side.
member = models.Member.objects.get(name='test')
self.assertGreater(member.birthdate, now - error_margin)
self.assertLess(member.birthdate, now + error_margin)
class DateTimePickerShortcutsSeleniumChromeTests(DateTimePickerShortcutsSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class DateTimePickerShortcutsSeleniumIETests(DateTimePickerShortcutsSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
# The above tests run with Asia/Singapore which are on the positive side of
# UTC. Here we test with a timezone on the negative side.
@override_settings(TIME_ZONE='US/Eastern')
class DateTimePickerAltTimezoneSeleniumFirefoxTests(DateTimePickerShortcutsSeleniumFirefoxTests):
pass
class DateTimePickerAltTimezoneSeleniumChromeTests(DateTimePickerAltTimezoneSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class DateTimePickerAltTimezoneSeleniumIETests(DateTimePickerAltTimezoneSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='admin_widgets.urls')
class HorizontalVerticalFilterSeleniumFirefoxTests(SeleniumDataMixin, AdminSeleniumWebDriverTestCase):
available_apps = ['admin_widgets'] + AdminSeleniumWebDriverTestCase.available_apps
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
def setUp(self):
super(HorizontalVerticalFilterSeleniumFirefoxTests, self).setUp()
self.lisa = models.Student.objects.create(name='Lisa')
self.john = models.Student.objects.create(name='John')
self.bob = models.Student.objects.create(name='Bob')
self.peter = models.Student.objects.create(name='Peter')
self.jenny = models.Student.objects.create(name='Jenny')
self.jason = models.Student.objects.create(name='Jason')
self.cliff = models.Student.objects.create(name='Cliff')
self.arthur = models.Student.objects.create(name='Arthur')
self.school = models.School.objects.create(name='School of Awesome')
def assertActiveButtons(self, mode, field_name, choose, remove,
choose_all=None, remove_all=None):
choose_link = '#id_%s_add_link' % field_name
choose_all_link = '#id_%s_add_all_link' % field_name
remove_link = '#id_%s_remove_link' % field_name
remove_all_link = '#id_%s_remove_all_link' % field_name
self.assertEqual(self.has_css_class(choose_link, 'active'), choose)
self.assertEqual(self.has_css_class(remove_link, 'active'), remove)
if mode == 'horizontal':
self.assertEqual(self.has_css_class(choose_all_link, 'active'), choose_all)
self.assertEqual(self.has_css_class(remove_all_link, 'active'), remove_all)
def execute_basic_operations(self, mode, field_name):
from_box = '#id_%s_from' % field_name
to_box = '#id_%s_to' % field_name
choose_link = 'id_%s_add_link' % field_name
choose_all_link = 'id_%s_add_all_link' % field_name
remove_link = 'id_%s_remove_link' % field_name
remove_all_link = 'id_%s_remove_all_link' % field_name
# Initial positions ---------------------------------------------------
self.assertSelectOptions(from_box,
[str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id)])
self.assertSelectOptions(to_box,
[str(self.lisa.id), str(self.peter.id)])
self.assertActiveButtons(mode, field_name, False, False, True, True)
# Click 'Choose all' --------------------------------------------------
if mode == 'horizontal':
self.selenium.find_element_by_id(choose_all_link).click()
elif mode == 'vertical':
# There 's no 'Choose all' button in vertical mode, so individually
# select all options and click 'Choose'.
for option in self.selenium.find_elements_by_css_selector(from_box + ' > option'):
option.click()
self.selenium.find_element_by_id(choose_link).click()
self.assertSelectOptions(from_box, [])
self.assertSelectOptions(to_box,
[str(self.lisa.id), str(self.peter.id),
str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id)])
self.assertActiveButtons(mode, field_name, False, False, False, True)
# Click 'Remove all' --------------------------------------------------
if mode == 'horizontal':
self.selenium.find_element_by_id(remove_all_link).click()
elif mode == 'vertical':
# There 's no 'Remove all' button in vertical mode, so individually
# select all options and click 'Remove'.
for option in self.selenium.find_elements_by_css_selector(to_box + ' > option'):
option.click()
self.selenium.find_element_by_id(remove_link).click()
self.assertSelectOptions(from_box,
[str(self.lisa.id), str(self.peter.id),
str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id)])
self.assertSelectOptions(to_box, [])
self.assertActiveButtons(mode, field_name, False, False, True, False)
# Choose some options ------------------------------------------------
from_lisa_select_option = self.get_select_option(from_box, str(self.lisa.id))
# Check the title attribute is there for tool tips: ticket #20821
self.assertEqual(from_lisa_select_option.get_attribute('title'), from_lisa_select_option.get_attribute('text'))
from_lisa_select_option.click()
self.get_select_option(from_box, str(self.jason.id)).click()
self.get_select_option(from_box, str(self.bob.id)).click()
self.get_select_option(from_box, str(self.john.id)).click()
self.assertActiveButtons(mode, field_name, True, False, True, False)
self.selenium.find_element_by_id(choose_link).click()
self.assertActiveButtons(mode, field_name, False, False, True, True)
self.assertSelectOptions(from_box,
[str(self.peter.id), str(self.arthur.id),
str(self.cliff.id), str(self.jenny.id)])
self.assertSelectOptions(to_box,
[str(self.lisa.id), str(self.bob.id),
str(self.jason.id), str(self.john.id)])
# Check the tooltip is still there after moving: ticket #20821
to_lisa_select_option = self.get_select_option(to_box, str(self.lisa.id))
self.assertEqual(to_lisa_select_option.get_attribute('title'), to_lisa_select_option.get_attribute('text'))
# Remove some options -------------------------------------------------
self.get_select_option(to_box, str(self.lisa.id)).click()
self.get_select_option(to_box, str(self.bob.id)).click()
self.assertActiveButtons(mode, field_name, False, True, True, True)
self.selenium.find_element_by_id(remove_link).click()
self.assertActiveButtons(mode, field_name, False, False, True, True)
self.assertSelectOptions(from_box,
[str(self.peter.id), str(self.arthur.id),
str(self.cliff.id), str(self.jenny.id),
str(self.lisa.id), str(self.bob.id)])
self.assertSelectOptions(to_box,
[str(self.jason.id), str(self.john.id)])
# Choose some more options --------------------------------------------
self.get_select_option(from_box, str(self.arthur.id)).click()
self.get_select_option(from_box, str(self.cliff.id)).click()
self.selenium.find_element_by_id(choose_link).click()
self.assertSelectOptions(from_box,
[str(self.peter.id), str(self.jenny.id),
str(self.lisa.id), str(self.bob.id)])
self.assertSelectOptions(to_box,
[str(self.jason.id), str(self.john.id),
str(self.arthur.id), str(self.cliff.id)])
def test_basic(self):
self.school.students.set([self.lisa, self.peter])
self.school.alumni.set([self.lisa, self.peter])
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get('%s%s' % (
self.live_server_url, reverse('admin:admin_widgets_school_change', args=(self.school.id,))))
self.wait_page_loaded()
self.execute_basic_operations('vertical', 'students')
self.execute_basic_operations('horizontal', 'alumni')
# Save and check that everything is properly stored in the database ---
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
self.wait_page_loaded()
self.school = models.School.objects.get(id=self.school.id) # Reload from database
self.assertEqual(list(self.school.students.all()),
[self.arthur, self.cliff, self.jason, self.john])
self.assertEqual(list(self.school.alumni.all()),
[self.arthur, self.cliff, self.jason, self.john])
def test_filter(self):
"""
Ensure that typing in the search box filters out options displayed in
the 'from' box.
"""
from selenium.webdriver.common.keys import Keys
self.school.students.set([self.lisa, self.peter])
self.school.alumni.set([self.lisa, self.peter])
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get(
'%s%s' % (self.live_server_url, reverse('admin:admin_widgets_school_change', args=(self.school.id,))))
for field_name in ['students', 'alumni']:
from_box = '#id_%s_from' % field_name
to_box = '#id_%s_to' % field_name
choose_link = 'id_%s_add_link' % field_name
remove_link = 'id_%s_remove_link' % field_name
input = self.selenium.find_element_by_id('id_%s_input' % field_name)
# Initial values
self.assertSelectOptions(from_box,
[str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id)])
# Typing in some characters filters out non-matching options
input.send_keys('a')
self.assertSelectOptions(from_box, [str(self.arthur.id), str(self.jason.id)])
input.send_keys('R')
self.assertSelectOptions(from_box, [str(self.arthur.id)])
# Clearing the text box makes the other options reappear
input.send_keys([Keys.BACK_SPACE])
self.assertSelectOptions(from_box, [str(self.arthur.id), str(self.jason.id)])
input.send_keys([Keys.BACK_SPACE])
self.assertSelectOptions(from_box,
[str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id)])
# -----------------------------------------------------------------
# Check that choosing a filtered option sends it properly to the
# 'to' box.
input.send_keys('a')
self.assertSelectOptions(from_box, [str(self.arthur.id), str(self.jason.id)])
self.get_select_option(from_box, str(self.jason.id)).click()
self.selenium.find_element_by_id(choose_link).click()
self.assertSelectOptions(from_box, [str(self.arthur.id)])
self.assertSelectOptions(to_box,
[str(self.lisa.id), str(self.peter.id),
str(self.jason.id)])
self.get_select_option(to_box, str(self.lisa.id)).click()
self.selenium.find_element_by_id(remove_link).click()
self.assertSelectOptions(from_box,
[str(self.arthur.id), str(self.lisa.id)])
self.assertSelectOptions(to_box,
[str(self.peter.id), str(self.jason.id)])
input.send_keys([Keys.BACK_SPACE]) # Clear text box
self.assertSelectOptions(from_box,
[str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jenny.id),
str(self.john.id), str(self.lisa.id)])
self.assertSelectOptions(to_box,
[str(self.peter.id), str(self.jason.id)])
# -----------------------------------------------------------------
# Check that pressing enter on a filtered option sends it properly
# to the 'to' box.
self.get_select_option(to_box, str(self.jason.id)).click()
self.selenium.find_element_by_id(remove_link).click()
input.send_keys('ja')
self.assertSelectOptions(from_box, [str(self.jason.id)])
input.send_keys([Keys.ENTER])
self.assertSelectOptions(to_box, [str(self.peter.id), str(self.jason.id)])
input.send_keys([Keys.BACK_SPACE, Keys.BACK_SPACE])
# Save and check that everything is properly stored in the database ---
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
self.wait_page_loaded()
self.school = models.School.objects.get(id=self.school.id) # Reload from database
self.assertEqual(list(self.school.students.all()),
[self.jason, self.peter])
self.assertEqual(list(self.school.alumni.all()),
[self.jason, self.peter])
def test_back_button_bug(self):
"""
Some browsers had a bug where navigating away from the change page
and then clicking the browser's back button would clear the
filter_horizontal/filter_vertical widgets (#13614).
"""
self.school.students.set([self.lisa, self.peter])
self.school.alumni.set([self.lisa, self.peter])
self.admin_login(username='super', password='secret', login_url='/')
change_url = reverse('admin:admin_widgets_school_change', args=(self.school.id,))
self.selenium.get(self.live_server_url + change_url)
# Navigate away and go back to the change form page.
self.selenium.find_element_by_link_text('Home').click()
self.selenium.back()
expected_unselected_values = [
str(self.arthur.id), str(self.bob.id), str(self.cliff.id),
str(self.jason.id), str(self.jenny.id), str(self.john.id),
]
expected_selected_values = [str(self.lisa.id), str(self.peter.id)]
# Check that everything is still in place
self.assertSelectOptions('#id_students_from', expected_unselected_values)
self.assertSelectOptions('#id_students_to', expected_selected_values)
self.assertSelectOptions('#id_alumni_from', expected_unselected_values)
self.assertSelectOptions('#id_alumni_to', expected_selected_values)
def test_refresh_page(self):
"""
Horizontal and vertical filter widgets keep selected options on page
reload (#22955).
"""
self.school.students.add(self.arthur, self.jason)
self.school.alumni.add(self.arthur, self.jason)
self.admin_login(username='super', password='secret', login_url='/')
change_url = reverse('admin:admin_widgets_school_change', args=(self.school.id,))
self.selenium.get(self.live_server_url + change_url)
options_len = len(self.selenium.find_elements_by_css_selector('#id_students_to > option'))
self.assertEqual(options_len, 2)
# self.selenium.refresh() or send_keys(Keys.F5) does hard reload and
# doesn't replicate what happens when a user clicks the browser's
# 'Refresh' button.
self.selenium.execute_script("location.reload()")
self.wait_page_loaded()
options_len = len(self.selenium.find_elements_by_css_selector('#id_students_to > option'))
self.assertEqual(options_len, 2)
class HorizontalVerticalFilterSeleniumChromeTests(HorizontalVerticalFilterSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class HorizontalVerticalFilterSeleniumIETests(HorizontalVerticalFilterSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='admin_widgets.urls')
class AdminRawIdWidgetSeleniumFirefoxTests(SeleniumDataMixin, AdminSeleniumWebDriverTestCase):
available_apps = ['admin_widgets'] + AdminSeleniumWebDriverTestCase.available_apps
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
def setUp(self):
super(AdminRawIdWidgetSeleniumFirefoxTests, self).setUp()
models.Band.objects.create(id=42, name='Bogey Blues')
models.Band.objects.create(id=98, name='Green Potatoes')
def test_ForeignKey(self):
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get(
'%s%s' % (self.live_server_url, reverse('admin:admin_widgets_event_add')))
main_window = self.selenium.current_window_handle
# No value has been selected yet
self.assertEqual(
self.selenium.find_element_by_id('id_main_band').get_attribute('value'),
'')
# Open the popup window and click on a band
self.selenium.find_element_by_id('lookup_id_main_band').click()
self.wait_for_popup()
self.selenium.switch_to.window('id_main_band')
link = self.selenium.find_element_by_link_text('Bogey Blues')
self.assertIn('/band/42/', link.get_attribute('href'))
link.click()
# The field now contains the selected band's id
self.selenium.switch_to.window(main_window)
self.wait_for_value('#id_main_band', '42')
# Reopen the popup window and click on another band
self.selenium.find_element_by_id('lookup_id_main_band').click()
self.wait_for_popup()
self.selenium.switch_to.window('id_main_band')
link = self.selenium.find_element_by_link_text('Green Potatoes')
self.assertIn('/band/98/', link.get_attribute('href'))
link.click()
# The field now contains the other selected band's id
self.selenium.switch_to.window(main_window)
self.wait_for_value('#id_main_band', '98')
def test_many_to_many(self):
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get(
'%s%s' % (self.live_server_url, reverse('admin:admin_widgets_event_add')))
main_window = self.selenium.current_window_handle
# No value has been selected yet
self.assertEqual(
self.selenium.find_element_by_id('id_supporting_bands').get_attribute('value'),
'')
# Help text for the field is displayed
self.assertEqual(
self.selenium.find_element_by_css_selector('.field-supporting_bands p.help').text,
'Supporting Bands.'
)
# Open the popup window and click on a band
self.selenium.find_element_by_id('lookup_id_supporting_bands').click()
self.wait_for_popup()
self.selenium.switch_to.window('id_supporting_bands')
link = self.selenium.find_element_by_link_text('Bogey Blues')
self.assertIn('/band/42/', link.get_attribute('href'))
link.click()
# The field now contains the selected band's id
self.selenium.switch_to.window(main_window)
self.wait_for_value('#id_supporting_bands', '42')
# Reopen the popup window and click on another band
self.selenium.find_element_by_id('lookup_id_supporting_bands').click()
self.wait_for_popup()
self.selenium.switch_to.window('id_supporting_bands')
link = self.selenium.find_element_by_link_text('Green Potatoes')
self.assertIn('/band/98/', link.get_attribute('href'))
link.click()
# The field now contains the two selected bands' ids
self.selenium.switch_to.window(main_window)
self.wait_for_value('#id_supporting_bands', '42,98')
class AdminRawIdWidgetSeleniumChromeTests(AdminRawIdWidgetSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class AdminRawIdWidgetSeleniumIETests(AdminRawIdWidgetSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='admin_widgets.urls')
class RelatedFieldWidgetSeleniumFirefoxTests(SeleniumDataMixin, AdminSeleniumWebDriverTestCase):
available_apps = ['admin_widgets'] + AdminSeleniumWebDriverTestCase.available_apps
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
def test_ForeignKey_using_to_field(self):
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get('%s%s' % (
self.live_server_url,
reverse('admin:admin_widgets_profile_add')))
main_window = self.selenium.current_window_handle
# Click the Add User button to add new
self.selenium.find_element_by_id('add_id_user').click()
self.wait_for_popup()
self.selenium.switch_to.window('id_user')
password_field = self.selenium.find_element_by_id('id_password')
password_field.send_keys('password')
username_field = self.selenium.find_element_by_id('id_username')
username_value = 'newuser'
username_field.send_keys(username_value)
save_button_css_selector = '.submit-row > input[type=submit]'
self.selenium.find_element_by_css_selector(save_button_css_selector).click()
self.selenium.switch_to.window(main_window)
# The field now contains the new user
self.selenium.find_element_by_css_selector('#id_user option[value=newuser]')
# Click the Change User button to change it
self.selenium.find_element_by_id('change_id_user').click()
self.wait_for_popup()
self.selenium.switch_to.window('id_user')
username_field = self.selenium.find_element_by_id('id_username')
username_value = 'changednewuser'
username_field.clear()
username_field.send_keys(username_value)
save_button_css_selector = '.submit-row > input[type=submit]'
self.selenium.find_element_by_css_selector(save_button_css_selector).click()
self.selenium.switch_to.window(main_window)
self.selenium.find_element_by_css_selector('#id_user option[value=changednewuser]')
# Go ahead and submit the form to make sure it works
self.selenium.find_element_by_css_selector(save_button_css_selector).click()
self.wait_for_text('li.success', 'The profile "changednewuser" was added successfully.')
profiles = models.Profile.objects.all()
self.assertEqual(len(profiles), 1)
self.assertEqual(profiles[0].user.username, username_value)
class RelatedFieldWidgetSeleniumChromeTests(RelatedFieldWidgetSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class RelatedFieldWidgetSeleniumIETests(RelatedFieldWidgetSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
| bsd-3-clause |
home-assistant/home-assistant | homeassistant/components/yeelight/config_flow.py | 1 | 9179 | """Config flow for Yeelight integration."""
import logging
import voluptuous as vol
import yeelight
from homeassistant import config_entries, exceptions
from homeassistant.components.dhcp import IP_ADDRESS
from homeassistant.const import CONF_DEVICE, CONF_HOST, CONF_ID, CONF_NAME
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from . import (
CONF_MODE_MUSIC,
CONF_MODEL,
CONF_NIGHTLIGHT_SWITCH,
CONF_NIGHTLIGHT_SWITCH_TYPE,
CONF_SAVE_ON_CHANGE,
CONF_TRANSITION,
DOMAIN,
NIGHTLIGHT_SWITCH_TYPE_LIGHT,
_async_unique_name,
)
MODEL_UNKNOWN = "unknown"
_LOGGER = logging.getLogger(__name__)
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Yeelight."""
VERSION = 1
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Return the options flow."""
return OptionsFlowHandler(config_entry)
def __init__(self):
"""Initialize the config flow."""
self._discovered_devices = {}
self._discovered_model = None
self._discovered_ip = None
async def async_step_homekit(self, discovery_info):
"""Handle discovery from homekit."""
self._discovered_ip = discovery_info["host"]
return await self._async_handle_discovery()
async def async_step_dhcp(self, discovery_info):
"""Handle discovery from dhcp."""
self._discovered_ip = discovery_info[IP_ADDRESS]
return await self._async_handle_discovery()
async def _async_handle_discovery(self):
"""Handle any discovery."""
self.context[CONF_HOST] = self._discovered_ip
for progress in self._async_in_progress():
if progress.get("context", {}).get(CONF_HOST) == self._discovered_ip:
return self.async_abort(reason="already_in_progress")
try:
self._discovered_model = await self._async_try_connect(self._discovered_ip)
except CannotConnect:
return self.async_abort(reason="cannot_connect")
if not self.unique_id:
return self.async_abort(reason="cannot_connect")
self._abort_if_unique_id_configured(
updates={CONF_HOST: self._discovered_ip}, reload_on_update=False
)
return await self.async_step_discovery_confirm()
async def async_step_discovery_confirm(self, user_input=None):
"""Confirm discovery."""
if user_input is not None:
return self.async_create_entry(
title=f"{self._discovered_model} {self.unique_id}",
data={CONF_ID: self.unique_id, CONF_HOST: self._discovered_ip},
)
self._set_confirm_only()
placeholders = {
"model": self._discovered_model,
"host": self._discovered_ip,
}
self.context["title_placeholders"] = placeholders
return self.async_show_form(
step_id="discovery_confirm", description_placeholders=placeholders
)
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
if not user_input.get(CONF_HOST):
return await self.async_step_pick_device()
try:
model = await self._async_try_connect(user_input[CONF_HOST])
except CannotConnect:
errors["base"] = "cannot_connect"
else:
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=f"{model} {self.unique_id}",
data=user_input,
)
user_input = user_input or {}
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{vol.Optional(CONF_HOST, default=user_input.get(CONF_HOST, "")): str}
),
errors=errors,
)
async def async_step_pick_device(self, user_input=None):
"""Handle the step to pick discovered device."""
if user_input is not None:
unique_id = user_input[CONF_DEVICE]
capabilities = self._discovered_devices[unique_id]
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=_async_unique_name(capabilities),
data={CONF_ID: unique_id},
)
configured_devices = {
entry.data[CONF_ID]
for entry in self._async_current_entries()
if entry.data[CONF_ID]
}
devices_name = {}
# Run 3 times as packets can get lost
for _ in range(3):
devices = await self.hass.async_add_executor_job(yeelight.discover_bulbs)
for device in devices:
capabilities = device["capabilities"]
unique_id = capabilities["id"]
if unique_id in configured_devices:
continue # ignore configured devices
model = capabilities["model"]
host = device["ip"]
name = f"{host} {model} {unique_id}"
self._discovered_devices[unique_id] = capabilities
devices_name[unique_id] = name
# Check if there is at least one device
if not devices_name:
return self.async_abort(reason="no_devices_found")
return self.async_show_form(
step_id="pick_device",
data_schema=vol.Schema({vol.Required(CONF_DEVICE): vol.In(devices_name)}),
)
async def async_step_import(self, user_input=None):
"""Handle import step."""
host = user_input[CONF_HOST]
try:
await self._async_try_connect(host)
except CannotConnect:
_LOGGER.error("Failed to import %s: cannot connect", host)
return self.async_abort(reason="cannot_connect")
if CONF_NIGHTLIGHT_SWITCH_TYPE in user_input:
user_input[CONF_NIGHTLIGHT_SWITCH] = (
user_input.pop(CONF_NIGHTLIGHT_SWITCH_TYPE)
== NIGHTLIGHT_SWITCH_TYPE_LIGHT
)
self._abort_if_unique_id_configured()
return self.async_create_entry(title=user_input[CONF_NAME], data=user_input)
async def _async_try_connect(self, host):
"""Set up with options."""
self._async_abort_entries_match({CONF_HOST: host})
bulb = yeelight.Bulb(host)
try:
capabilities = await self.hass.async_add_executor_job(bulb.get_capabilities)
if capabilities is None: # timeout
_LOGGER.debug("Failed to get capabilities from %s: timeout", host)
else:
_LOGGER.debug("Get capabilities: %s", capabilities)
await self.async_set_unique_id(capabilities["id"])
return capabilities["model"]
except OSError as err:
_LOGGER.debug("Failed to get capabilities from %s: %s", host, err)
# Ignore the error since get_capabilities uses UDP discovery packet
# which does not work in all network environments
# Fallback to get properties
try:
await self.hass.async_add_executor_job(bulb.get_properties)
except yeelight.BulbException as err:
_LOGGER.error("Failed to get properties from %s: %s", host, err)
raise CannotConnect from err
_LOGGER.debug("Get properties: %s", bulb.last_properties)
return MODEL_UNKNOWN
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a option flow for Yeelight."""
def __init__(self, config_entry):
"""Initialize the option flow."""
self._config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Handle the initial step."""
if user_input is not None:
options = {**self._config_entry.options}
options.update(user_input)
return self.async_create_entry(title="", data=options)
options = self._config_entry.options
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Optional(CONF_MODEL, default=options[CONF_MODEL]): str,
vol.Required(
CONF_TRANSITION,
default=options[CONF_TRANSITION],
): cv.positive_int,
vol.Required(
CONF_MODE_MUSIC, default=options[CONF_MODE_MUSIC]
): bool,
vol.Required(
CONF_SAVE_ON_CHANGE,
default=options[CONF_SAVE_ON_CHANGE],
): bool,
vol.Required(
CONF_NIGHTLIGHT_SWITCH,
default=options[CONF_NIGHTLIGHT_SWITCH],
): bool,
}
),
)
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
| apache-2.0 |
GeographicaGS/moocng | moocng/courses/migrations/0015_calculate_stats.py | 2 | 14058 | # -*- coding: utf-8 -*-
# Copyright 2013 UNED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from south.v2 import DataMigration
from moocng.mongodb import get_db
from moocng.portal.stats import calculate_all_stats
class Migration(DataMigration):
def forwards(self, orm):
def callback(step='', counter=0, total=0):
step = max(1, int(total / 100))
if counter % step == 0:
if step == 'calculating':
print 'Processed %d of %d users' % (counter, total)
elif step == 'storing':
print 'Saved %d of %d statistics entries' % (counter, total)
calculate_all_stats(
user_objects=orm['auth.user'].objects,
kq_objects=orm['courses.knowledgequantum'].objects,
callback=callback)
def backwards(self, orm):
connector = get_db()
db = connector.get_database()
db.stats_course.drop()
db.stats_unit.drop()
db.stats_kq.drop()
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'badges.badge': {
'Meta': {'ordering': "['-modified', '-created']", 'object_name': 'Badge'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'courses.announcement': {
'Meta': {'object_name': 'Announcement'},
'content': ('tinymce.models.HTMLField', [], {}),
'course': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['courses.Course']"}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'courses.attachment': {
'Meta': {'object_name': 'Attachment'},
'attachment': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kq': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['courses.KnowledgeQuantum']"})
},
'courses.course': {
'Meta': {'ordering': "['order']", 'object_name': 'Course'},
'certification_available': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'certification_banner': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'completion_badge': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'course'", 'unique': 'True', 'null': 'True', 'to': "orm['badges.Badge']"}),
'description': ('tinymce.models.HTMLField', [], {}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'enrollment_method': ('django.db.models.fields.CharField', [], {'default': "'free'", 'max_length': '200'}),
'estimated_effort': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intended_audience': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'learning_goals': ('tinymce.models.HTMLField', [], {'blank': 'True'}),
'max_reservations_pending': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '8'}),
'max_reservations_total': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '8'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'courses_as_owner'", 'to': "orm['auth.User']"}),
'promotion_media_content_id': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'promotion_media_content_type': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'requirements': ('tinymce.models.HTMLField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'d'", 'max_length': '10'}),
'students': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'courses_as_student'", 'blank': 'True', 'to': "orm['auth.User']"}),
'teachers': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'courses_as_teacher'", 'symmetrical': 'False', 'through': "orm['courses.CourseTeacher']", 'to': "orm['auth.User']"}),
'threshold': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '4', 'decimal_places': '2', 'blank': 'True'})
},
'courses.courseteacher': {
'Meta': {'ordering': "['order']", 'object_name': 'CourseTeacher'},
'course': ('adminsortable.fields.SortableForeignKey', [], {'to': "orm['courses.Course']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'teacher': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'courses.knowledgequantum': {
'Meta': {'ordering': "['order']", 'object_name': 'KnowledgeQuantum'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'media_content_id': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'media_content_type': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'supplementary_material': ('tinymce.models.HTMLField', [], {'blank': 'True'}),
'teacher_comments': ('tinymce.models.HTMLField', [], {'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'unit': ('adminsortable.fields.SortableForeignKey', [], {'to': "orm['courses.Unit']"}),
'weight': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'})
},
'courses.option': {
'Meta': {'object_name': 'Option'},
'feedback': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'height': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '12'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'optiontype': ('django.db.models.fields.CharField', [], {'default': "'t'", 'max_length': '1'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['courses.Question']"}),
'solution': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '100'}),
'x': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'y': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'courses.question': {
'Meta': {'object_name': 'Question'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kq': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['courses.KnowledgeQuantum']", 'unique': 'True'}),
'last_frame': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'solution_media_content_id': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'solution_media_content_type': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'solution_text': ('tinymce.models.HTMLField', [], {'blank': 'True'}),
'use_last_frame': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'courses.unit': {
'Meta': {'ordering': "['order']", 'object_name': 'Unit'},
'course': ('adminsortable.fields.SortableForeignKey', [], {'to': "orm['courses.Course']"}),
'deadline': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'d'", 'max_length': '10'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'unittype': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'weight': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'})
}
}
complete_apps = ['courses']
symmetrical = True
| apache-2.0 |
aerickson/ansible | test/units/plugins/cache/test_cache.py | 91 | 4078 | # (c) 2012-2015, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest, mock
from ansible.errors import AnsibleError
from ansible.plugins.cache import FactCache
from ansible.plugins.cache.base import BaseCacheModule
from ansible.plugins.cache.memory import CacheModule as MemoryCache
HAVE_MEMCACHED = True
try:
import memcache
except ImportError:
HAVE_MEMCACHED = False
else:
# Use an else so that the only reason we skip this is for lack of
# memcached, not errors importing the plugin
from ansible.plugins.cache.memcached import CacheModule as MemcachedCache
HAVE_REDIS = True
try:
import redis
except ImportError:
HAVE_REDIS = False
else:
from ansible.plugins.cache.redis import CacheModule as RedisCache
class TestFactCache(unittest.TestCase):
def setUp(self):
with mock.patch('ansible.constants.CACHE_PLUGIN', 'memory'):
self.cache = FactCache()
def test_copy(self):
self.cache['avocado'] = 'fruit'
self.cache['daisy'] = 'flower'
a_copy = self.cache.copy()
self.assertEqual(type(a_copy), dict)
self.assertEqual(a_copy, dict(avocado='fruit', daisy='flower'))
def test_plugin_load_failure(self):
# See https://github.com/ansible/ansible/issues/18751
# Note no fact_connection config set, so this will fail
with mock.patch('ansible.constants.CACHE_PLUGIN', 'json'):
self.assertRaisesRegexp(AnsibleError,
"Unable to load the facts cache plugin.*json.*",
FactCache)
class TestAbstractClass(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_subclass_error(self):
class CacheModule1(BaseCacheModule):
pass
with self.assertRaises(TypeError):
CacheModule1()
class CacheModule2(BaseCacheModule):
def get(self, key):
super(CacheModule2, self).get(key)
with self.assertRaises(TypeError):
CacheModule2()
def test_subclass_success(self):
class CacheModule3(BaseCacheModule):
def get(self, key):
super(CacheModule3, self).get(key)
def set(self, key, value):
super(CacheModule3, self).set(key, value)
def keys(self):
super(CacheModule3, self).keys()
def contains(self, key):
super(CacheModule3, self).contains(key)
def delete(self, key):
super(CacheModule3, self).delete(key)
def flush(self):
super(CacheModule3, self).flush()
def copy(self):
super(CacheModule3, self).copy()
self.assertIsInstance(CacheModule3(), CacheModule3)
@unittest.skipUnless(HAVE_MEMCACHED, 'python-memcached module not installed')
def test_memcached_cachemodule(self):
self.assertIsInstance(MemcachedCache(), MemcachedCache)
def test_memory_cachemodule(self):
self.assertIsInstance(MemoryCache(), MemoryCache)
@unittest.skipUnless(HAVE_REDIS, 'Redis python module not installed')
def test_redis_cachemodule(self):
self.assertIsInstance(RedisCache(), RedisCache)
| gpl-3.0 |
j5shi/ST3_Config | Packages/Anaconda/commands/mccabe.py | 6 | 2445 |
# Copyright (C) 2013 - Oscar Campos <[email protected]>
# This program is Free Software see LICENSE file for details
import sublime
import sublime_plugin
from ..anaconda_lib.worker import Worker
from ..anaconda_lib.callback import Callback
from ..anaconda_lib.helpers import get_settings, active_view
class AnacondaMcCabe(sublime_plugin.WindowCommand):
"""Execute McCabe complexity checker
"""
def run(self):
view = self.window.active_view()
code = view.substr(sublime.Region(0, view.size()))
data = {
'code': code,
'threshold': get_settings(view, 'mccabe_threshold', 7),
'filename': active_view().file_name(),
'method': 'mccabe',
'handler': 'qa'
}
Worker().execute(Callback(on_success=self.prepare_data), **data)
def is_enabled(self):
"""Determine if this command is enabled or not
"""
view = self.window.active_view()
location = view.sel()[0].begin()
matcher = 'source.python'
return view.match_selector(location, matcher)
def prepare_data(self, data):
"""Prepare the data to present in the quick panel
"""
if not data['success'] or data['errors'] is None:
sublime.status_message('Unable to run McCabe checker...')
return
if len(data['errors']) == 0:
view = self.window.active_view()
threshold = get_settings(view, 'mccabe_threshold', 7)
sublime.status_message(
'No code complexity beyond {} was found'.format(threshold)
)
self._show_options(data['errors'])
def _show_options(self, options):
"""Show a dropdown quickpanel with options to jump
"""
self.options = []
for option in options:
self.options.append(
[option['message'], 'line: {}'.format(option['line'])]
)
self.window.show_quick_panel(self.options, self._jump)
def _jump(self, item):
"""Jump to a line in the view buffer
"""
if item == -1:
return
lineno = int(self.options[item][1].split(':')[1].strip()) - 1
pt = self.window.active_view().text_point(lineno, 0)
self.window.active_view().sel().clear()
self.window.active_view().sel().add(sublime.Region(pt))
self.window.active_view().show(pt)
| mit |
Noviat/odoo | addons/hr_recruitment/wizard/hr_recruitment_create_partner_job.py | 337 | 3434 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class hr_recruitment_partner_create(osv.osv_memory):
_name = 'hr.recruitment.partner.create'
_description = 'Create Partner from job application'
_columns = {
'close': fields.boolean('Close job request'),
}
def view_init(self, cr, uid, fields_list, context=None):
case_obj = self.pool.get('hr.applicant')
if context is None:
context = {}
for case in case_obj.browse(cr, uid, context['active_ids'], context=context):
if case.partner_id:
raise osv.except_osv(_('Error!'),
_('A contact is already defined on this job request.'))
pass
def make_order(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
partner_obj = self.pool.get('res.partner')
case_obj = self.pool.get('hr.applicant')
if context is None:
context = {}
data = self.read(cr, uid, ids, context=context)[0]
result = mod_obj._get_id(cr, uid, 'base', 'view_res_partner_filter')
res = mod_obj.read(cr, uid, result, ['res_id'], context=context)
for case in case_obj.browse(cr, uid, context['active_ids'], context=context):
partner_id = partner_obj.search(cr, uid, [('name', '=', case.partner_name or case.name)], context=context)
if partner_id:
raise osv.except_osv(_('Error!'),_('A contact is already existing with the same name.'))
partner_id = partner_obj.create(cr, uid, {
'name': case.partner_name or case.name,
'user_id': case.user_id.id,
'comment': case.description,
'phone': case.partner_phone,
'mobile': case.partner_mobile,
'email': case.email_from
}, context=context)
case_obj.write(cr, uid, [case.id], {
'partner_id': partner_id,
}, context=context)
return {
'domain': "[]",
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'res.partner',
'res_id': int(partner_id),
'view_id': False,
'type': 'ir.actions.act_window',
'search_view_id': res['res_id']
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
RobertWWong/WebDev | djangoApp/ENV/lib/python3.5/site-packages/django/contrib/gis/serializers/geojson.py | 36 | 2813 | from __future__ import unicode_literals
from django.contrib.gis.gdal import CoordTransform, SpatialReference
from django.core.serializers.base import SerializerDoesNotExist
from django.core.serializers.json import Serializer as JSONSerializer
class Serializer(JSONSerializer):
"""
Convert a queryset to GeoJSON, http://geojson.org/
"""
def _init_options(self):
super(Serializer, self)._init_options()
self.geometry_field = self.json_kwargs.pop('geometry_field', None)
self.srid = self.json_kwargs.pop('srid', 4326)
if (self.selected_fields is not None and self.geometry_field is not None and
self.geometry_field not in self.selected_fields):
self.selected_fields = list(self.selected_fields) + [self.geometry_field]
def start_serialization(self):
self._init_options()
self._cts = {} # cache of CoordTransform's
self.stream.write(
'{"type": "FeatureCollection", "crs": {"type": "name", "properties": {"name": "EPSG:%d"}},'
' "features": [' % self.srid)
def end_serialization(self):
self.stream.write(']}')
def start_object(self, obj):
super(Serializer, self).start_object(obj)
self._geometry = None
if self.geometry_field is None:
# Find the first declared geometry field
for field in obj._meta.fields:
if hasattr(field, 'geom_type'):
self.geometry_field = field.name
break
def get_dump_object(self, obj):
data = {
"type": "Feature",
"properties": self._current,
}
if ((self.selected_fields is None or 'pk' in self.selected_fields) and
'pk' not in data["properties"]):
data["properties"]["pk"] = obj._meta.pk.value_to_string(obj)
if self._geometry:
if self._geometry.srid != self.srid:
# If needed, transform the geometry in the srid of the global geojson srid
if self._geometry.srid not in self._cts:
srs = SpatialReference(self.srid)
self._cts[self._geometry.srid] = CoordTransform(self._geometry.srs, srs)
self._geometry.transform(self._cts[self._geometry.srid])
data["geometry"] = eval(self._geometry.geojson)
else:
data["geometry"] = None
return data
def handle_field(self, obj, field):
if field.name == self.geometry_field:
self._geometry = field.value_from_object(obj)
else:
super(Serializer, self).handle_field(obj, field)
class Deserializer(object):
def __init__(self, *args, **kwargs):
raise SerializerDoesNotExist("geojson is a serialization-only serializer")
| mit |
wenqf11/huhamhire-hosts | util/retrievedata.py | 24 | 10221 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# retrievedata.py : Retrieve data from the hosts data file.
#
# Copyleft (C) 2014 - huhamhire hosts team <[email protected]>
# =====================================================================
# Licensed under the GNU General Public License, version 3. You should
# have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING
# THE WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE.
# =====================================================================
__author__ = "huhamhire <[email protected]>"
import os
import sqlite3
import zipfile
DATAFILE = "./hostslist.data"
DATABASE = "./hostslist.s3db"
class RetrieveData(object):
"""
RetrieveData class contains a set of tools to retrieve information from
the hosts data file.
.. note:: All methods from this class are declared as `classmethod`.
:ivar sqlite3.connect conn: An instance of :class:`sqlite3.connect`
object to set up connection to a SQLite database.
:ivar sqlite3.connect.cursor _cur: An instance of
:class:`sqlite3.connect.cursor` object to process SQL queries in the
database.
:ivar str _db: Filename of a SQLite database file.
"""
conn = None
_cur = None
_database = None
@classmethod
def db_exists(cls, database=DATABASE):
"""
Check whether the :attr:`database` file exists or not.
.. note:: This is a `classmethod`.
:param database: Path to a SQLite database file.
`./hostslist.s3db` by default.
:type database: str
:return: A flag indicating whether the database file exists or not.
:rtype: bool
"""
return os.path.isfile(database)
@classmethod
def connect_db(cls, database=DATABASE):
"""
Set up connection with a SQLite :attr:`database`.
.. note:: This is a `classmethod`.
:param database: Path to a SQLite database file.
`./hostslist.s3db` by default.
:type database: str
"""
cls.conn = sqlite3.connect(database)
cls._cur = cls.conn.cursor()
cls._database = database
@classmethod
def disconnect_db(cls):
"""
Close the connection with a SQLite database.
.. note:: This is a `classmethod`.
"""
cls.conn.close()
@classmethod
def get_info(cls):
"""
Retrieve the metadata of current data file.
.. note:: This is a `classmethod`.
:return: Metadata of current data file. The metadata here is a
dictionary while the `Keys` are made of `Section Name` and
`Values` are made of `Information` defined in the hosts data file.
:rtype: dict
"""
cls._cur.execute("SELECT sect, info FROM info;")
info = dict(cls._cur.fetchall())
return info
@classmethod
def get_head(cls):
"""
Retrieve the head information from hosts data file.
.. note:: This is a `classmethod`.
:return: Lines of hosts head information.
:rtype: list
"""
cls._cur.execute("SELECT str FROM hosts_head ORDER BY ln;")
head = cls._cur.fetchall()
return head
@classmethod
def get_ids(cls, id_cfg):
"""
Calculate the id numbers covered by config word :attr:`id_cfg`.
.. note:: This is a `classmethod`.
:param id_cfg: A hexadecimal config word of id selections.
:type id_cfg: int
:return: ID numbers covered by config word.
:rtype: list
"""
cfg = bin(id_cfg)[:1:-1]
ids = []
for i, id_en in enumerate(cfg):
if int(id_en):
ids.append(0b1 << i)
return ids
@classmethod
def get_host(cls, part_id, mod_id):
"""
Retrieve the hosts module specified by :attr:`mod_id` from a part
specified by :attr:`part_id` in the data file.
.. note:: This is a `classmethod`.
:param part_id: ID number of a specified part from the hosts data
file.
.. note:: ID number is usually an 8-bit control byte.
:type part_id: int
:param mod_id: ID number of a specified module from a specified part.
.. note:: ID number is usually an 8-bit control byte.
:type mod_id: int
.. seealso:: :attr:`make_cfg` in
:class:`~tui.curses_d.CursesDaemon` class.
:return: hosts, mod_name
* hosts(`list`): Hosts entries from a specified module.
* mod_name(`str`): Name of a specified module.
:rtype: list, str
"""
if part_id == 0x04:
return None, "customize"
cls._cur.execute("""
SELECT part_name FROM parts
WHERE part_id=:part_id;
""", (part_id, ))
part_name = cls._cur.fetchone()[0]
cls._cur.execute("""
SELECT ip, host FROM %s
WHERE cate=%s;
""" % (part_name, mod_id))
hosts = cls._cur.fetchall()
cls._cur.execute("""
SELECT mod_name FROM modules
WHERE part_id=:part_id AND mod_id=:mod_id;
""", (part_id, mod_id))
mod_name = cls._cur.fetchone()[0]
return hosts, mod_name
@classmethod
def get_choice(cls, flag_v6=False):
"""
Retrieve module selection items from the hosts data file with default
selection for users.
.. note:: This is a `classmethod`.
:param flag_v6: A flag indicating whether to receive IPv6 hosts
entries or the IPv4 ones. Default by `False`.
=============== =======
:attr:`flag_v6` hosts
=============== =======
True IPv6
False IPv4
=============== =======
:type flag_v6: bool
:return: modules, defaults, slices
* modules(`list`): Information of modules for users to select.
* defaults(`dict`): Default selection config for selected parts.
* slices(`list`): Numbers of modules in each part.
:rtype: list, dict, list
"""
ch_parts = (0x08, 0x20 if flag_v6 else 0x10, 0x40)
cls._cur.execute("""
SELECT * FROM modules
WHERE part_id IN (:id_shared, :id_ipv, :id_adblock);
""", ch_parts)
modules = cls._cur.fetchall()
cls._cur.execute("""
SELECT part_id, part_default FROM parts
WHERE part_id IN (:id_shared, :id_ipv, :id_adblock);
""", ch_parts)
default_cfg = cls._cur.fetchall()
defaults = {}
for default in default_cfg:
defaults[default[0]] = cls.get_ids(default[1])
slices = [0]
for ch_part in ch_parts:
cls._cur.execute("""
SELECT COUNT(mod_id) FROM modules
WHERE part_id=:ch_part;
""", (ch_part, ))
slices.append(cls._cur.fetchone()[0])
for s in range(1, len(slices)):
slices[s] += slices[s - 1]
return modules, defaults, slices
@classmethod
def chk_mutex(cls, part_id, mod_cfg):
"""
Check if there is conflict in user selections :attr:`mod_cfg` from a
part specified by :attr:`part_id` in the data file.
.. note:: A conflict may happen while one module selected is declared
in `mutex` word of ano module selected at the same time.
.. note:: This is a `classmethod`.
:param part_id: ID number of a specified part from the hosts data
file.
.. note:: ID number is usually an 8-bit control byte.
.. seealso:: :meth:`~util.retrievedata.get_host`.
:type part_id: int
:param mod_cfg: A 16-bit config word indicating module selections of a
specified part.
.. note::
If modules in specified part whose IDs are `0x0002` and
`0x0010`, the value here should be `0x0002 + 0x0010 = 0x0012`,
which is `0b0000000000000010 + 0b0000000000010000 =
0b0000000000010010` in binary.
:type: int
.. seealso:: :attr:`make_cfg` in
:class:`~tui.curses_d.CursesDaemon` class.
:return: A flag indicating whether there is a conflict or not.
====== ============
Return Status
====== ============
True Conflict
False No conflicts
====== ============
:rtype: bool
"""
if part_id == 0x04:
return True
cls._cur.execute("""
SELECT mod_id, mutex FROM modules
WHERE part_id=:part_id;
""", (part_id, ))
mutex_tuple = dict(cls._cur.fetchall())
mutex_info = []
mod_info = cls.get_ids(mod_cfg)
for mod_id in mod_info:
mutex_info.extend(cls.get_ids(mutex_tuple[mod_id]))
mutex_info = set(mutex_info)
for mod_id in mod_info:
if mod_id in mutex_info:
return False
return True
@classmethod
def unpack(cls, datafile=DATAFILE, database=DATABASE):
"""
Unzip the archived :attr:`datafile` to a SQLite database file
:attr:`database`.
.. note:: This is a `classmethod`.
:param datafile: Path to the zipped data file. `./hostslist.data` by
default.
:type datafile: str
:param database: Path to a SQLite database file. `./hostslist.s3db` by
default.
:type database: str
"""
datafile = zipfile.ZipFile(datafile, "r")
path, filename = os.path.split(database)
datafile.extract(filename, path)
@classmethod
def clear(cls):
"""
Close connection to the database and delete the database file.
.. note:: This is a `classmethod`.
"""
cls.conn.close()
os.remove(cls._database)
| gpl-3.0 |
CIFASIS/pylearn2 | pylearn2/packaged_dependencies/theano_linear/unshared_conv/localdot.py | 39 | 5044 | """
WRITEME
"""
import logging
from ..linear import LinearTransform
from .unshared_conv import FilterActs, ImgActs
from theano.compat.six.moves import xrange
from theano.sandbox import cuda
if cuda.cuda_available:
import gpu_unshared_conv # register optimizations
import numpy as np
import warnings
try:
import matplotlib.pyplot as plt
except (RuntimeError, ImportError, TypeError) as matplotlib_exception:
warnings.warn("Unable to import matplotlib. Some features unavailable. "
"Original exception: " + str(matplotlib_exception))
logger = logging.getLogger(__name__)
class LocalDot(LinearTransform):
"""
LocalDot is an linear operation computationally similar to
convolution in the spatial domain, except that whereas convolution
applying a single filter or set of filters across an image, the
LocalDot has different filterbanks for different points in the image.
Mathematically, this is a general linear transform except for a
restriction that filters are 0 outside of a spatially localized patch
within the image.
Image shape is 5-tuple:
color_groups
colors_per_group
rows
cols
images
Filterbank shape is 7-tuple (!)
0 row_positions
1 col_positions
2 colors_per_group
3 height
4 width
5 color_groups
6 filters_per_group
The result of left-multiplication a 5-tuple with shape:
filter_groups
filters_per_group
row_positions
col_positions
images
Parameters
----------
filters : WRITEME
irows : WRITEME
Image rows
icols : WRITEME
Image columns
subsample : WRITEME
padding_start : WRITEME
filters_shape : WRITEME
message : WRITEME
"""
def __init__(self, filters, irows, icols=None,
subsample=(1, 1),
padding_start=None,
filters_shape=None,
message=""):
LinearTransform.__init__(self, [filters])
self._filters = filters
if filters_shape is None:
self._filters_shape = tuple(filters.get_value(borrow=True).shape)
else:
self._filters_shape = tuple(filters_shape)
self._irows = irows
if icols is None:
self._icols = irows
else:
self._icols = icols
if self._icols != self._irows:
raise NotImplementedError('GPU code at least needs square imgs')
self._subsample = tuple(subsample)
self._padding_start = padding_start
if len(self._filters_shape) != 7:
raise TypeError('need 7-tuple filter shape', self._filters_shape)
if self._subsample[0] != self._subsample[1]:
raise ValueError('subsampling must be same in rows and cols')
self._filter_acts = FilterActs(self._subsample[0])
self._img_acts = ImgActs(module_stride=self._subsample[0])
if message:
self._message = message
else:
self._message = filters.name
def rmul(self, x):
"""
.. todo::
WRITEME
"""
assert x.ndim == 5
return self._filter_acts(x, self._filters)
def rmul_T(self, x):
"""
.. todo::
WRITEME
"""
return self._img_acts(self._filters, x, self._irows, self._icols)
def col_shape(self):
"""
.. todo::
WRITEME
"""
ishape = self.row_shape() + (-99,)
fshape = self._filters_shape
hshape, = self._filter_acts.infer_shape(None, (ishape, fshape))
assert hshape[-1] == -99
return hshape[:-1]
def row_shape(self):
"""
.. todo::
WRITEME
"""
fshape = self._filters_shape
fmodulesR, fmodulesC, fcolors, frows, fcols = fshape[:-2]
fgroups, filters_per_group = fshape[-2:]
return fgroups, fcolors, self._irows, self._icols
def print_status(self):
"""
.. todo::
WRITEME
"""
raise NotImplementedError("TODO: fix dependence on non-existent "
"ndarray_status function")
"""print ndarray_status(
self._filters.get_value(borrow=True),
msg='%s{%s}'% (self.__class__.__name__,
self._message))
"""
def imshow_gray(self):
"""
.. todo::
WRITEME
"""
filters = self._filters.get_value()
modR, modC, colors, rows, cols, grps, fs_per_grp = filters.shape
logger.info(filters.shape)
rval = np.zeros((
modR * (rows + 1) - 1,
modC * (cols + 1) - 1,
))
for rr, modr in enumerate(xrange(0, rval.shape[0], rows + 1)):
for cc, modc in enumerate(xrange(0, rval.shape[1], cols + 1)):
rval[modr:modr + rows, modc:modc + cols] = filters[rr, cc, 0, :, :, 0, 0]
plt.imshow(rval, cmap='gray')
return rval
| bsd-3-clause |
adityaduggal/erpnext | erpnext/patches/v5_0/update_item_description_and_image.py | 102 | 1951 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
import frappe
from frappe.website.utils import find_first_image
from frappe.utils import cstr
import re
def execute():
item_details = frappe._dict()
for d in frappe.db.sql("select name, description_html, description from `tabItem`", as_dict=1):
description = cstr(d.description_html).strip() or cstr(d.description).strip()
image_url, new_desc = extract_image_and_description(description)
item_details.setdefault(d.name, frappe._dict({
"old_description": description,
"new_description": new_desc,
"image_url": image_url
}))
dt_list= ["Purchase Order Item","Supplier Quotation Item", "BOM", "BOM Explosion Item" , \
"BOM Item", "Opportunity Item" , "Quotation Item" , "Sales Order Item" , "Delivery Note Item" , \
"Material Request Item" , "Purchase Receipt Item" , "Stock Entry Detail"]
for dt in dt_list:
frappe.reload_doctype(dt)
records = frappe.db.sql("""select name, `{0}` as item_code, description from `tab{1}`
where description is not null and image is null and description like '%%<img%%'"""
.format("item" if dt=="BOM" else "item_code", dt), as_dict=1)
count = 1
for d in records:
if d.item_code and item_details.get(d.item_code) \
and cstr(d.description) == item_details.get(d.item_code).old_description:
image_url = item_details.get(d.item_code).image_url
desc = item_details.get(d.item_code).new_description
else:
image_url, desc = extract_image_and_description(cstr(d.description))
if image_url:
frappe.db.sql("""update `tab{0}` set description = %s, image = %s
where name = %s """.format(dt), (desc, image_url, d.name))
count += 1
if count % 500 == 0:
frappe.db.commit()
def extract_image_and_description(data):
image_url = find_first_image(data)
desc = re.sub("\<img[^>]+\>", "", data)
return image_url, desc
| gpl-3.0 |
doomsterinc/odoo | addons/multi_company/__init__.py | 886 | 1054 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
pcamarillor/Exploratory3D | external/gtest-1.5.0/test/gtest_break_on_failure_unittest.py | 1050 | 7214 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's break-on-failure mode.
A user can ask Google Test to seg-fault when an assertion fails, using
either the GTEST_BREAK_ON_FAILURE environment variable or the
--gtest_break_on_failure flag. This script tests such functionality
by invoking gtest_break_on_failure_unittest_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = '[email protected] (Zhanyong Wan)'
import gtest_test_utils
import os
import sys
# Constants.
IS_WINDOWS = os.name == 'nt'
# The environment variable for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_ENV_VAR = 'GTEST_BREAK_ON_FAILURE'
# The command line flag for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_FLAG = 'gtest_break_on_failure'
# The environment variable for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE_ENV_VAR = 'GTEST_THROW_ON_FAILURE'
# The environment variable for enabling/disabling the catch-exceptions mode.
CATCH_EXCEPTIONS_ENV_VAR = 'GTEST_CATCH_EXCEPTIONS'
# Path to the gtest_break_on_failure_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_break_on_failure_unittest_')
# Utilities.
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets an environment variable to a given value; unsets it when the
given value is None.
"""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def Run(command):
"""Runs a command; returns 1 if it was killed by a signal, or 0 otherwise."""
p = gtest_test_utils.Subprocess(command, env=environ)
if p.terminated_by_signal:
return 1
else:
return 0
# The tests.
class GTestBreakOnFailureUnitTest(gtest_test_utils.TestCase):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable or
the --gtest_break_on_failure flag to turn assertion failures into
segmentation faults.
"""
def RunAndVerify(self, env_var_value, flag_value, expect_seg_fault):
"""Runs gtest_break_on_failure_unittest_ and verifies that it does
(or does not) have a seg-fault.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
expect_seg_fault: 1 if the program is expected to generate a seg-fault;
0 otherwise.
"""
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % BREAK_ON_FAILURE_FLAG
else:
flag = '--%s' % BREAK_ON_FAILURE_FLAG
command = [EXE_PATH]
if flag:
command.append(flag)
if expect_seg_fault:
should_or_not = 'should'
else:
should_or_not = 'should not'
has_seg_fault = Run(command)
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a seg-fault.' %
(BREAK_ON_FAILURE_ENV_VAR, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(has_seg_fault == expect_seg_fault, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None,
flag_value=None,
expect_seg_fault=0)
def testEnvVar(self):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value=None,
expect_seg_fault=1)
def testFlag(self):
"""Tests using the --gtest_break_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
def testFlagOverridesEnvVar(self):
"""Tests that the flag overrides the environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='0',
flag_value='1',
expect_seg_fault=1)
self.RunAndVerify(env_var_value='1',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
def testBreakOnFailureOverridesThrowOnFailure(self):
"""Tests that gtest_break_on_failure overrides gtest_throw_on_failure."""
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, None)
if IS_WINDOWS:
def testCatchExceptionsDoesNotInterfere(self):
"""Tests that gtest_catch_exceptions doesn't interfere."""
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, None)
if __name__ == '__main__':
gtest_test_utils.Main()
| mit |
rbdavid/MolecDynamics | Analysis/data_collector.py | 2 | 1272 | #!/Library/Frameworks/Python.framework/Versions/2.7/bin/python
# USAGE:
# PREAMBLE:
import numpy as np
import sys
log_file = sys.argv[1]
system = sys.argv[2]
# SUBROUTINES
# MAIN PROGRAM:
lf = open('%s' %(log_file), 'r')
tf = open('%s.time.dat' %(system), 'w')
ef = open('%s.tot_en.dat' %(system), 'w')
pf = open('%s.pot_en.dat' %(system), 'w')
kf = open('%s.kin_en.dat' %(system), 'w')
tempf = open('%s.temp.dat' %(system), 'w')
for line in lf: # for statement that flows through all lines in the .log file
if line.startswith('Step'): # collect value for time conversion unit (1 step per n fs)
t = line.split() # creates a list that contains all components of the line
step = float(t[-1])
tf.write('%15.6f \n' %(step*0.002))
if line.startswith('Total Energy'):
a = line.split()
en = float(a[-1])
ef.write('%15.6f \n' %(en))
if line.startswith('Total Potential'):
b = line.split()
pot = float(b[-1])
pf.write('%15.6f \n' %(pot))
if line.startswith('Total Kinetic'):
c = line.split()
kin = float(c[-1])
kf.write('%15.6f \n' %(kin))
if line.startswith('Instantaneous'):
d = line.split()
temp = float(d[-1])
tempf.write('%15.6f \n' %(temp))
lf.close()
tf.close()
ef.close()
pf.close()
kf.close()
tempf.close()
| mit |
pschwartz/ansible | v1/ansible/cache/__init__.py | 133 | 1828 | # (c) 2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from collections import MutableMapping
from ansible import utils
from ansible import constants as C
from ansible import errors
class FactCache(MutableMapping):
def __init__(self, *args, **kwargs):
self._plugin = utils.plugins.cache_loader.get(C.CACHE_PLUGIN)
if self._plugin is None:
return
def __getitem__(self, key):
if key not in self:
raise KeyError
return self._plugin.get(key)
def __setitem__(self, key, value):
self._plugin.set(key, value)
def __delitem__(self, key):
self._plugin.delete(key)
def __contains__(self, key):
return self._plugin.contains(key)
def __iter__(self):
return iter(self._plugin.keys())
def __len__(self):
return len(self._plugin.keys())
def copy(self):
""" Return a primitive copy of the keys and values from the cache. """
return dict([(k, v) for (k, v) in self.iteritems()])
def keys(self):
return self._plugin.keys()
def flush(self):
""" Flush the fact cache of all keys. """
self._plugin.flush()
| gpl-3.0 |
Awingu/open-ovf | py/ovf/env/PlatformSection.py | 1 | 2180 | # vi: ts=4 expandtab syntax=python
##############################################################################
# Copyright (c) 2008 IBM Corporation
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
#
# Contributors:
# Murillo Fernandes Bernardes (IBM) - initial implementation
##############################################################################
"""OVF Environment PlatformSection representation"""
ORDERED_ELEMENTS = ("Kind", "Version", "Vendor", "Locale", "Timezone")
#:List of ordered elements that can be members of Platform Section.
class PlatformSection(object):
"""OVF Environment PlatformSection class"""
__slots__ = ORDERED_ELEMENTS
platDict = {} #: Store the elements as dict.
def __init__(self, data=None):
""" Initialize the PlatformSection."""
if data:
self.update(data)
def update(self, data):
"""Method to update attributes based in the dictionary
passed as argument
@param data: dictionary with new values.
@type data: dict
"""
for key, value in data.iteritems():
setattr(self, key, value)
self.platDict[key] = value
def iteritems(self):
"""Make this object iterable, like in a dict.
The iterator is ordered as required in the schema.
@return: iterable (key, value) pairs
"""
ret = []
for key in self.__slots__:
try:
value = getattr(self, key)
ret.append((key, value))
except AttributeError:
continue
return ret
def __setattr__(self, name, value):
"""Overrides method in order to ensure Timezone's type
as required in the OVF spec.
"""
if name == "Timezone":
try:
int(value)
except ValueError:
raise ValueError("Timezone must be an integer-like value")
object.__setattr__(self, name, value)
| epl-1.0 |
jeandet/meson | mesonbuild/mlog.py | 1 | 5948 | # Copyright 2013-2014 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import io
import sys
import time
import platform
from contextlib import contextmanager
"""This is (mostly) a standalone module used to write logging
information about Meson runs. Some output goes to screen,
some to logging dir and some goes to both."""
def _windows_ansi():
from ctypes import windll, byref
from ctypes.wintypes import DWORD
kernel = windll.kernel32
stdout = kernel.GetStdHandle(-11)
mode = DWORD()
if not kernel.GetConsoleMode(stdout, byref(mode)):
return False
# ENABLE_VIRTUAL_TERMINAL_PROCESSING == 0x4
# If the call to enable VT processing fails (returns 0), we fallback to
# original behavior
return kernel.SetConsoleMode(stdout, mode.value | 0x4) or os.environ.get('ANSICON')
if platform.system().lower() == 'windows':
colorize_console = os.isatty(sys.stdout.fileno()) and _windows_ansi()
else:
colorize_console = os.isatty(sys.stdout.fileno()) and os.environ.get('TERM') != 'dumb'
log_dir = None
log_file = None
log_fname = 'meson-log.txt'
log_depth = 0
log_timestamp_start = None
def initialize(logdir):
global log_dir, log_file
log_dir = logdir
log_file = open(os.path.join(logdir, log_fname), 'w', encoding='utf8')
def set_timestamp_start(start):
global log_timestamp_start
log_timestamp_start = start
def shutdown():
global log_file
if log_file is not None:
path = log_file.name
exception_around_goer = log_file
log_file = None
exception_around_goer.close()
return path
return None
class AnsiDecorator:
plain_code = "\033[0m"
def __init__(self, text, code, quoted=False):
self.text = text
self.code = code
self.quoted = quoted
def get_text(self, with_codes):
text = self.text
if with_codes:
text = self.code + self.text + AnsiDecorator.plain_code
if self.quoted:
text = '"{}"'.format(text)
return text
def bold(text, quoted=False):
return AnsiDecorator(text, "\033[1m", quoted=quoted)
def red(text):
return AnsiDecorator(text, "\033[1;31m")
def green(text):
return AnsiDecorator(text, "\033[1;32m")
def yellow(text):
return AnsiDecorator(text, "\033[1;33m")
def cyan(text):
return AnsiDecorator(text, "\033[1;36m")
def process_markup(args, keep):
arr = []
if log_timestamp_start is not None:
arr = ['[{:.3f}]'.format(time.monotonic() - log_timestamp_start)]
for arg in args:
if isinstance(arg, str):
arr.append(arg)
elif isinstance(arg, AnsiDecorator):
arr.append(arg.get_text(keep))
else:
arr.append(str(arg))
return arr
def force_print(*args, **kwargs):
iostr = io.StringIO()
kwargs['file'] = iostr
print(*args, **kwargs)
raw = iostr.getvalue()
if log_depth > 0:
prepend = '|' * log_depth
raw = prepend + raw.replace('\n', '\n' + prepend, raw.count('\n') - 1)
# _Something_ is going to get printed.
try:
print(raw, end='')
except UnicodeEncodeError:
cleaned = raw.encode('ascii', 'replace').decode('ascii')
print(cleaned, end='')
def debug(*args, **kwargs):
arr = process_markup(args, False)
if log_file is not None:
print(*arr, file=log_file, **kwargs) # Log file never gets ANSI codes.
log_file.flush()
def log(*args, **kwargs):
arr = process_markup(args, False)
if log_file is not None:
print(*arr, file=log_file, **kwargs) # Log file never gets ANSI codes.
log_file.flush()
if colorize_console:
arr = process_markup(args, True)
force_print(*arr, **kwargs)
def _log_error(severity, *args, **kwargs):
from .mesonlib import get_error_location_string
from .environment import build_filename
if severity == 'warning':
args = (yellow('WARNING:'),) + args
elif severity == 'error':
args = (red('ERROR:'),) + args
elif severity == 'deprecation':
args = (red('DEPRECATION:'),) + args
else:
assert False, 'Invalid severity ' + severity
location = kwargs.pop('location', None)
if location is not None:
location_file = os.path.join(location.subdir, build_filename)
location_str = get_error_location_string(location_file, location.lineno)
args = (location_str,) + args
log(*args, **kwargs)
def error(*args, **kwargs):
return _log_error('error', *args, **kwargs)
def warning(*args, **kwargs):
return _log_error('warning', *args, **kwargs)
def deprecation(*args, **kwargs):
return _log_error('deprecation', *args, **kwargs)
def exception(e):
log()
if hasattr(e, 'file') and hasattr(e, 'lineno') and hasattr(e, 'colno'):
log('%s:%d:%d:' % (e.file, e.lineno, e.colno), red('ERROR: '), e)
else:
log(red('ERROR:'), e)
# Format a list for logging purposes as a string. It separates
# all but the last item with commas, and the last with 'and'.
def format_list(list):
l = len(list)
if l > 2:
return ' and '.join([', '.join(list[:-1]), list[-1]])
elif l == 2:
return ' and '.join(list)
elif l == 1:
return list[0]
else:
return ''
@contextmanager
def nested():
global log_depth
log_depth += 1
try:
yield
finally:
log_depth -= 1
| apache-2.0 |
Endika/django | django/contrib/sitemaps/__init__.py | 15 | 5550 | from django.apps import apps as django_apps
from django.conf import settings
from django.core import paginator
from django.core.exceptions import ImproperlyConfigured
from django.urls import NoReverseMatch, reverse
from django.utils import translation
from django.utils.six.moves.urllib.parse import urlencode
from django.utils.six.moves.urllib.request import urlopen
PING_URL = "https://www.google.com/webmasters/tools/ping"
class SitemapNotFound(Exception):
pass
def ping_google(sitemap_url=None, ping_url=PING_URL):
"""
Alerts Google that the sitemap for the current site has been updated.
If sitemap_url is provided, it should be an absolute path to the sitemap
for this site -- e.g., '/sitemap.xml'. If sitemap_url is not provided, this
function will attempt to deduce it by using urls.reverse().
"""
if sitemap_url is None:
try:
# First, try to get the "index" sitemap URL.
sitemap_url = reverse('django.contrib.sitemaps.views.index')
except NoReverseMatch:
try:
# Next, try for the "global" sitemap URL.
sitemap_url = reverse('django.contrib.sitemaps.views.sitemap')
except NoReverseMatch:
pass
if sitemap_url is None:
raise SitemapNotFound("You didn't provide a sitemap_url, and the sitemap URL couldn't be auto-detected.")
if not django_apps.is_installed('django.contrib.sites'):
raise ImproperlyConfigured("ping_google requires django.contrib.sites, which isn't installed.")
Site = django_apps.get_model('sites.Site')
current_site = Site.objects.get_current()
url = "http://%s%s" % (current_site.domain, sitemap_url)
params = urlencode({'sitemap': url})
urlopen("%s?%s" % (ping_url, params))
class Sitemap(object):
# This limit is defined by Google. See the index documentation at
# http://sitemaps.org/protocol.php#index.
limit = 50000
# If protocol is None, the URLs in the sitemap will use the protocol
# with which the sitemap was requested.
protocol = None
def __get(self, name, obj, default=None):
try:
attr = getattr(self, name)
except AttributeError:
return default
if callable(attr):
return attr(obj)
return attr
def items(self):
return []
def location(self, obj):
return obj.get_absolute_url()
def _get_paginator(self):
return paginator.Paginator(self.items(), self.limit)
paginator = property(_get_paginator)
def get_urls(self, page=1, site=None, protocol=None):
# Determine protocol
if self.protocol is not None:
protocol = self.protocol
if protocol is None:
protocol = 'http'
# Determine domain
if site is None:
if django_apps.is_installed('django.contrib.sites'):
Site = django_apps.get_model('sites.Site')
try:
site = Site.objects.get_current()
except Site.DoesNotExist:
pass
if site is None:
raise ImproperlyConfigured(
"To use sitemaps, either enable the sites framework or pass "
"a Site/RequestSite object in your view."
)
domain = site.domain
if getattr(self, 'i18n', False):
urls = []
current_lang_code = translation.get_language()
for lang_code, lang_name in settings.LANGUAGES:
translation.activate(lang_code)
urls += self._urls(page, protocol, domain)
translation.activate(current_lang_code)
else:
urls = self._urls(page, protocol, domain)
return urls
def _urls(self, page, protocol, domain):
urls = []
latest_lastmod = None
all_items_lastmod = True # track if all items have a lastmod
for item in self.paginator.page(page).object_list:
loc = "%s://%s%s" % (protocol, domain, self.__get('location', item))
priority = self.__get('priority', item)
lastmod = self.__get('lastmod', item)
if all_items_lastmod:
all_items_lastmod = lastmod is not None
if (all_items_lastmod and
(latest_lastmod is None or lastmod > latest_lastmod)):
latest_lastmod = lastmod
url_info = {
'item': item,
'location': loc,
'lastmod': lastmod,
'changefreq': self.__get('changefreq', item),
'priority': str(priority if priority is not None else ''),
}
urls.append(url_info)
if all_items_lastmod and latest_lastmod:
self.latest_lastmod = latest_lastmod
return urls
class GenericSitemap(Sitemap):
priority = None
changefreq = None
def __init__(self, info_dict, priority=None, changefreq=None):
self.queryset = info_dict['queryset']
self.date_field = info_dict.get('date_field')
self.priority = priority
self.changefreq = changefreq
def items(self):
# Make sure to return a clone; we don't want premature evaluation.
return self.queryset.filter()
def lastmod(self, item):
if self.date_field is not None:
return getattr(item, self.date_field)
return None
default_app_config = 'django.contrib.sitemaps.apps.SiteMapsConfig'
| bsd-3-clause |
ARG-TLQ/Red-DiscordBot | redbot/core/errors.py | 3 | 2555 | import importlib.machinery
import discord
from redbot.core.utils.chat_formatting import humanize_number
from .i18n import Translator
_ = Translator(__name__, __file__)
class RedError(Exception):
"""Base error class for Red-related errors."""
class PackageAlreadyLoaded(RedError):
"""Raised when trying to load an already-loaded package."""
def __init__(self, spec: importlib.machinery.ModuleSpec, *args, **kwargs):
super().__init__(*args, **kwargs)
self.spec: importlib.machinery.ModuleSpec = spec
def __str__(self) -> str:
return f"There is already a package named {self.spec.name.split('.')[-1]} loaded"
class CogLoadError(RedError):
"""Raised by a cog when it cannot load itself.
The message will be sent to the user."""
pass
class BankError(RedError):
"""Base error class for bank-related errors."""
class BalanceTooHigh(BankError, OverflowError):
"""Raised when trying to set a user's balance to higher than the maximum."""
def __init__(
self, user: discord.abc.User, max_balance: int, currency_name: str, *args, **kwargs
):
super().__init__(*args, **kwargs)
self.user = user
self.max_balance = max_balance
self.currency_name = currency_name
def __str__(self) -> str:
return _("{user}'s balance cannot rise above {max} {currency}.").format(
user=self.user, max=humanize_number(self.max_balance), currency=self.currency_name
)
class BankPruneError(BankError):
"""Raised when trying to prune a local bank and no server is specified."""
class MissingExtraRequirements(RedError):
"""Raised when an extra requirement is missing but required."""
class ConfigError(RedError):
"""Error in a Config operation."""
class StoredTypeError(ConfigError, TypeError):
"""A TypeError pertaining to stored Config data.
This error may arise when, for example, trying to increment a value
which is not a number, or trying to toggle a value which is not a
boolean.
"""
class CannotSetSubfield(StoredTypeError):
"""Tried to set sub-field of an invalid data structure.
This would occur in the following example::
>>> import asyncio
>>> from redbot.core import Config
>>> config = Config.get_conf(None, 1234, cog_name="Example")
>>> async def example():
... await config.foo.set(True)
... await config.set_raw("foo", "bar", False) # Should raise here
...
>>> asyncio.run(example())
"""
| gpl-3.0 |
chrrrles/ansible | lib/ansible/plugins/action/set_fact.py | 71 | 1710 | # Copyright 2013 Dag Wieers <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from six import iteritems
from ansible.errors import AnsibleError
from ansible.plugins.action import ActionBase
from ansible.utils.boolean import boolean
from ansible.utils.vars import isidentifier
class ActionModule(ActionBase):
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=dict()):
facts = dict()
if self._task.args:
for (k, v) in iteritems(self._task.args):
k = self._templar.template(k)
if not isidentifier(k):
return dict(failed=True, msg="The variable name '%s' is not valid. Variables must start with a letter or underscore character, and contain only letters, numbers and underscores." % k)
if isinstance(v, basestring) and v.lower() in ('true', 'false', 'yes', 'no'):
v = boolean(v)
facts[k] = v
return dict(changed=False, ansible_facts=facts)
| gpl-3.0 |
mariusor/tlcal | plusfw/scraper/html.py | 1 | 5805 | import plusfw
from datetime import datetime
from requests import get, post, codes, exceptions
import json
class Html:
"""
"""
base_uris = {
plusfw.LABEL_PFW: "https://www.plusforward.net",
plusfw.LABEL_QLIVE: "https://www.plusforward.net",
plusfw.LABEL_QIV: "https://www.plusforward.net",
plusfw.LABEL_QIII: "https://www.plusforward.net",
plusfw.LABEL_QII: "https://www.plusforward.net",
plusfw.LABEL_QWORLD: "https://www.plusforward.net",
plusfw.LABEL_DIABOT: "https://www.plusforward.net",
plusfw.LABEL_DOOM: "https://www.plusforward.net",
plusfw.LABEL_REFLEX: "https://www.plusforward.net",
plusfw.LABEL_OWATCH: "https://www.plusforward.net",
plusfw.LABEL_GG: "https://www.plusforward.net",
plusfw.LABEL_UNREAL: "https://www.plusforward.net",
plusfw.LABEL_WARSOW: "https://www.plusforward.net",
plusfw.LABEL_DBMB: "https://www.plusforward.net",
plusfw.LABEL_XONOT: "https://www.plusforward.net",
plusfw.LABEL_QCHAMP: "https://www.plusforward.net",
plusfw.LABEL_QCPMA: "https://www.plusforward.net",
}
calendar_path = {
plusfw.LABEL_PFW: "/calendar/",
plusfw.LABEL_QLIVE: "/calendar/",
plusfw.LABEL_QIV: "/calendar/",
plusfw.LABEL_QIII: "/calendar/",
plusfw.LABEL_QII: "/calendar/",
plusfw.LABEL_QWORLD: "/calendar/",
plusfw.LABEL_DIABOT: "/calendar/",
plusfw.LABEL_DOOM: "/calendar/",
plusfw.LABEL_REFLEX: "/calendar/",
plusfw.LABEL_OWATCH: "/calendar/",
plusfw.LABEL_GG: "/calendar/",
plusfw.LABEL_UNREAL: "/calendar/",
plusfw.LABEL_WARSOW: "/calendar/",
plusfw.LABEL_DBMB: "/calendar/",
plusfw.LABEL_XONOT: "/calendar/",
plusfw.LABEL_QCHAMP: "/calendar/",
plusfw.LABEL_QCPMA: "/calendar/",
}
event_path = {
plusfw.LABEL_PFW: "/calendar/manage/",
plusfw.LABEL_QLIVE: "/calendar/manage/",
plusfw.LABEL_QIV: "/calendar/manage/",
plusfw.LABEL_QIII: "/calendar/manage/",
plusfw.LABEL_QII: "/calendar/manage/",
plusfw.LABEL_QWORLD: "/calendar/manage/",
plusfw.LABEL_DIABOT: "/calendar/manage/",
plusfw.LABEL_DOOM: "/calendar/manage/",
plusfw.LABEL_REFLEX: "/calendar/manage/",
plusfw.LABEL_OWATCH: "/calendar/manage/",
plusfw.LABEL_GG: "/calendar/manage/",
plusfw.LABEL_UNREAL: "/calendar/manage/",
plusfw.LABEL_WARSOW: "/calendar/manage/",
plusfw.LABEL_DBMB: "/calendar/manage/",
plusfw.LABEL_XONOT: "/calendar/manage/",
plusfw.LABEL_QCHAMP: "/calendar/manage/",
plusfw.LABEL_QCPMA: "/calendar/manage/",
}
calendar_type = {
plusfw.LABEL_QLIVE: 3,
plusfw.LABEL_QIV: 4,
plusfw.LABEL_QIII: 5,
plusfw.LABEL_QII: 6,
plusfw.LABEL_QWORLD: 7,
plusfw.LABEL_DIABOT: 8,
plusfw.LABEL_DOOM: 9,
plusfw.LABEL_REFLEX: 10,
plusfw.LABEL_OWATCH: 13,
plusfw.LABEL_GG: 14,
plusfw.LABEL_UNREAL: 15,
plusfw.LABEL_WARSOW: 16,
plusfw.LABEL_DBMB: 17,
plusfw.LABEL_XONOT: 18,
plusfw.LABEL_QCHAMP: 20,
plusfw.LABEL_QCPMA: 21,
}
class UriBuilder:
"""
"""
@staticmethod
def get_uri(calendar=plusfw.LABEL_PFW):
return Html.base_uris[calendar]
@staticmethod
def get_calendar_uri(calendar=plusfw.LABEL_PFW, by_week=True, date=None):
if by_week:
view_by = "week"
else:
view_by = "month"
if date is None:
date = datetime.now()
fmt = date.strftime
url = Html.base_uris[calendar] + Html.calendar_path[calendar] + \
'?view=%s&year=%s&month=%s&day=%s¤t=0' % (view_by, fmt("%Y"), fmt("%m"), fmt("%d"))
return url
@staticmethod
def get_event_uri(calendar=plusfw.LABEL_PFW):
return Html.base_uris[calendar] + Html.event_path[calendar]
@staticmethod
def get_calendar(calendar="+fw", by_week=True, date=None, debug=False):
"""
:param debug:
:param date:
:param by_week:
:param calendar:
:return: str
"""
calendar_uri = Html.UriBuilder.get_calendar_uri(calendar, by_week, date)
if debug:
print("Loading calendar from: %s" % calendar_uri)
post_data = None
if calendar in Html.calendar_type:
post_data = {
"cat": str(Html.calendar_type[calendar])
}
try:
if post_data:
tl_response = post(calendar_uri, post_data)
else:
tl_response = get(calendar_uri)
except exceptions.ConnectionError as h:
return "" % h
if tl_response.status_code == codes.ok:
return tl_response.content
else:
return ""
@staticmethod
def get_event(calendar=plusfw.LABEL_QCHAMP, event_id=None, debug=True):
if event_id is None:
return ""
if debug:
print(event_id, end=" ", flush=True)
html = ""
event_uri = Html.UriBuilder.get_event_uri(calendar)
post_data = {
"action": "view-event-popup",
"event_id": event_id
}
tl_response = post(event_uri, post_data)
if tl_response.status_code == codes.ok and tl_response.headers.get('content-type') == "application/json":
decoded_response = json.loads(tl_response.content.decode(encoding="UTF-8"))
if "html" in decoded_response:
html = decoded_response["html"]
return html
| mit |
paulhtremblay/boto_emr | boto_emr/get_s3_common_crawl_paths.py | 1 | 1380 | import boto3
import os
def list_objects(bucket_name, prefix):
final = []
session = boto3.Session()
client = session.client('s3')
next_token = 'init'
kwargs = {}
while next_token:
if next_token != 'init':
kwargs.update({'ContinuationToken': next_token})
response = client.list_objects_v2(Bucket=bucket_name, Prefix = prefix,
**kwargs)
next_token = response.get('NextContinuationToken')
for contents in response.get('Contents', []):
key = contents['Key']
yield key
def get_segments(bucket_name = 'commoncrawl', prefix =
'crawl-data/CC-MAIN-2016-50/segments'):
return list_objects(bucket_name = bucket_name, prefix = prefix)
def main():
"""
get one file for testing
"""
gen = list_objects(bucket_name = 'commoncrawl', prefix =
'crawl-data/CC-MAIN-2016-50/segments')
key = next(gen)
s3 = boto3.resource('s3')
bucket = s3.Bucket('commoncrawl')
bucket.download_file(key, '/tmp/warc_ex.gz')
def get_crawl_paths(the_type, recursion_limit = None):
assert the_type in ['crawldiagnostics']
return list(filter(lambda x: os.path.split(os.path.split(x)[0])[1] == the_type,
get_segments()))
if __name__ == '__main__':
#main()
l = get_crawl_paths(the_type = 'crawldiagnostics')
print(len(l))
| mit |
frohoff/Empire | lib/modules/python/situational_awareness/network/active_directory/get_domaincontrollers.py | 12 | 4374 | class Module:
def __init__(self, mainMenu, params=[]):
# metadata info about the module, not modified during runtime
self.info = {
# name for the module that will appear in module menus
'Name': 'Get Domain Controllers',
# list of one or more authors for the module
'Author': ['@424f424f'],
# more verbose multi-line description of the module
'Description': 'This module will list all domain controllers from active directory',
# True if the module needs to run in the background
'Background' : False,
# File extension to save the file as
'OutputExtension' : "",
# if the module needs administrative privileges
'NeedsAdmin' : False,
# True if the method doesn't touch disk/is reasonably opsec safe
'OpsecSafe' : True,
# the module language
'Language' : 'python',
# the minimum language version needed
'MinLanguageVersion' : '2.6',
# list of any references/other comments
'Comments': ['']
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Agent to run on.',
'Required' : True,
'Value' : ''
},
'LDAPAddress' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'LDAP IP/Hostname',
'Required' : True,
'Value' : ''
},
'BindDN' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : '[email protected]',
'Required' : True,
'Value' : ''
},
'Password' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Password to connect to LDAP',
'Required' : False,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
# During instantiation, any settable option parameters
# are passed as an object set to the module and the
# options dictionary is automatically set. This is mostly
# in case options are passed on the command line
if params:
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
LDAPAddress = self.options['LDAPAddress']['Value']
BindDN = self.options['BindDN']['Value']
password = self.options['Password']['Value']
# the Python script itself, with the command to invoke
# for execution appended to the end. Scripts should output
# everything to the pipeline for proper parsing.
#
# the script should be stripped of comments, with a link to any
# original reference script included in the comments.
script = """
import sys, os, subprocess, re
BindDN = "%s"
LDAPAddress = "%s"
password = "%s"
regex = re.compile('.+@([^.]+)\..+')
global tld
match = re.match(regex, BindDN)
tld = match.group(1)
global ext
ext = BindDN.split('.')[1]
cmd = \"""ldapsearch -x -h {} -b "dc={},dc={}" -D {} -w {} "(&(objectcategory=Computer)(userAccountControl:1.2.840.113556.1.4.803:=8192))" ""\".format(LDAPAddress, tld, ext, BindDN, password)
output = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
output2 = subprocess.Popen(["grep", "name:"],stdin=output.stdout, stdout=subprocess.PIPE,universal_newlines=True)
output.stdout.close()
out,err = output2.communicate()
print ""
print out
""" % (BindDN, LDAPAddress, password)
return script
| bsd-3-clause |
semplice/alan2 | alan-config.py | 1 | 5617 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# alan2 - An openbox menu builder
# Copyright (C) 2013 Semplice Project
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Authors:
# Eugenio "g7" Paolantonio <[email protected]>
#
import xml.etree.ElementTree as etree
import alan.core.tree_helpers as tree_helpers
import alan.core.config as config
import argparse
import os
import sys
# Create and parse arguments
parser = argparse.ArgumentParser(description="Modifies Openbox configuration to enable/disable alan2 modules.")
parser.add_argument(
"extension",
nargs="?",
help="the extension to process",
)
parser.add_argument(
"-i", "--directory",
help="directory where to look for configuration files (default: ~/.config)",
default="~/.config"
)
parser.add_argument(
"-p", "--profile",
help="the profile to use"
)
group = parser.add_mutually_exclusive_group()
group.add_argument(
"-e", "--enable",
help="enable the extension",
action="store_true"
)
group.add_argument(
"-d", "--disable",
help="disable the extension",
action="store_true"
)
group.add_argument(
"-l", "--list",
help="list active estensions",
action="store_true"
)
group.add_argument(
"-s", "--setup",
help="setups alan2",
action="store_true"
)
args = parser.parse_args()
if not (args.list or args.setup) and not args.extension:
raise Exception("You must specify an extension!")
## Welcome to alan2!
DIRECTORY = os.path.expanduser(args.directory)
OPENBOX_CONFIGURATION_DIR = os.path.join(DIRECTORY, "openbox")
DEFAULT_PATH=os.path.join(DIRECTORY, "alan-menus")
# Open alan2 configuration
configuration = config.Configuration(args.extension, DIRECTORY, args.profile)
map_as_main = configuration.settings["alan"]["map_as_main"]
if args.extension:
args.extension = args.extension.split(" ")
# Check for map_as_main
if map_as_main in args.extension:
# Found it, ensure we put it at the end
if args.extension.index(map_as_main) != len(args.extension)-1:
# It isn't at the end, remove it and reappend again
args.extension.remove(map_as_main)
args.extension.append(map_as_main)
# Open openbox configuration
tree = etree.parse(os.path.join(OPENBOX_CONFIGURATION_DIR, "rc.xml"), tree_helpers.PIParser())
document = tree.getroot()
root = document.find("ob:openbox_config", namespaces={"ob":"http://openbox.org/3.5/rc"})
if root is None:
# openbox-3.4
root = document.find("ob3:openbox_config", namespaces={"ob3":"http://openbox.org/3.4/rc"})
namespaces = {"ob":"http://openbox.org/3.4/rc"}
else:
namespaces = {"ob":"http://openbox.org/3.5/rc"}
etree.register_namespace('',namespaces["ob"])
menu = root.find("ob:menu", namespaces=namespaces)
# Get and parse all enabled modules...
ENABLED_MODULES = {}
_openbox_menu = []
for child in menu.findall("ob:file", namespaces=namespaces):
if os.path.dirname(child.text) == DEFAULT_PATH:
# it's ours!
ENABLED_MODULES[".".join(os.path.basename(child.text).split(".")[:-1])] = child
elif os.path.basename(child.text) in ("menu.xml","menu-static.xml"):
# Openbox default? Maybe. Cache the child, so that we can use it
# if we are going to setup alan2.
_openbox_menu.append(child)
# What should we do?
if args.setup:
# Setup!
for _menu in _openbox_menu:
# Remove found menus, they will clash with alan2 *for sure*.
menu.remove(_menu)
# If there are some extensions in the arguments, it's a good thing
# to set args.enable to True so that we do not need to recall this
# executable to enable things...
if args.extension:
args.enable = True
if args.list:
# Should do a list!
for module, child in ENABLED_MODULES.items():
print("Extension '%s', cached menu file at %s." % (module, child.text))
sys.exit()
elif args.enable:
# Should enable a module!
for extension in args.extension:
if extension in ENABLED_MODULES:
# Already enabled!
raise Exception("Extension %s already enabled!" % extension)
ext = etree.SubElement(menu, "file")
ext.text = os.path.join(DEFAULT_PATH, "%s.xml" % extension)
elif args.disable:
# Should remove a module!
for extension in args.extension:
if not extension in ENABLED_MODULES:
# Not found :(
raise Exception("Extension %s is not currently enabled!" % extension)
elif extension == map_as_main:
# Removing the main module would be a kind of pointless...
raise Exception("Extension %s is currently mapped as the main extension in alan2. Please change alan2's main module in order to disable this." % extension)
menu.remove(ENABLED_MODULES[extension])
del ENABLED_MODULES[extension]
# This is not a great thing to do, but we must: ensure we have the module
# mapped as main at the end of the subsection.
if map_as_main in ENABLED_MODULES:
# Ok, it's there, and it hasn't be touched (likely it is not the last anymore)
menu.remove(ENABLED_MODULES[map_as_main])
menu.append(ENABLED_MODULES[map_as_main])
tree_helpers.indent(root)
tree._setroot(root) # fixme?
tree.write(os.path.join(OPENBOX_CONFIGURATION_DIR, "rc.xml"),
xml_declaration=True,
encoding="utf-8",
method="xml"
)
| gpl-3.0 |
llamberson/git-cola | cola/widgets/highlighter.py | 11 | 3404 | from __future__ import division, absolute_import, unicode_literals
from PyQt4 import QtCore, QtGui
from cola.compat import ustr
have_pygments = True
try:
from pygments.styles import get_style_by_name
from pygments import lex
from pygments.util import ClassNotFound
from pygments.lexers import get_lexer_for_filename
except ImportError:
have_pygments = False
def highlight_document(edit, filename):
doc = edit.document()
if not have_pygments:
return
try:
lexer = get_lexer_for_filename(filename, stripnl=False)
except ClassNotFound:
return
style = get_style_by_name("default")
font = doc.defaultFont()
base_format = QtGui.QTextCharFormat()
base_format.setFont(font)
token_formats = {}
window = edit.window()
if hasattr(window, "processEvents"):
processEvents = window.processEvents
else:
processEvents = QtCore.QCoreApplication.processEvents
def get_token_format(token):
if token in token_formats:
return token_formats[token]
if token.parent:
parent_format = get_token_format(token.parent)
else:
parent_format = base_format
format = QtGui.QTextCharFormat(parent_format)
font = format.font()
if style.styles_token(token):
tstyle = style.style_for_token(token)
if tstyle['color']:
format.setForeground (QtGui.QColor("#"+tstyle['color']))
if tstyle['bold']: font.setWeight(QtGui.QFont.Bold)
if tstyle['italic']: font.setItalic (True)
if tstyle['underline']: format.setFontUnderline(True)
if tstyle['bgcolor']: format.setBackground (QtGui.QColor("#"+tstyle['bgcolor']))
# No way to set this for a QTextCharFormat
#if tstyle['border']: format.
token_formats[token] = format
return format
text = ustr(doc.toPlainText())
block_count = 0
block = doc.firstBlock()
assert(isinstance(block, QtGui.QTextBlock))
block_pos = 0
block_len = block.length()
block_formats = []
for token, ttext in lex(text, lexer):
format_len = len(ttext)
format = get_token_format(token)
while format_len > 0:
format_range = QtGui.QTextLayout.FormatRange()
format_range.start = block_pos
format_range.length = min(format_len, block_len)
format_range.format = format
block_formats.append(format_range)
block_len -= format_range.length
format_len -= format_range.length
block_pos += format_range.length
if block_len == 0:
block.layout().setAdditionalFormats(block_formats)
doc.markContentsDirty(block.position(), block.length())
block = block.next()
block_pos = 0
block_len = block.length()
block_formats = []
block_count += 1
if block_count % 100 == 0:
processEvents()
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
python = QtGui.QPlainTextEdit()
f = open(__file__, 'r')
python.setPlainText(f.read())
f.close()
python.setWindowTitle('python')
python.show()
highlight_document(python, __file__)
sys.exit(app.exec_())
| gpl-2.0 |
eharney/nova | nova/objects/utils.py | 14 | 4736 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utility methods for objects"""
import datetime
import iso8601
import netaddr
import six
from nova.network import model as network_model
from nova.openstack.common.gettextutils import _
from nova.openstack.common import timeutils
def datetime_or_none(dt):
"""Validate a datetime or None value."""
if dt is None:
return None
elif isinstance(dt, datetime.datetime):
if dt.utcoffset() is None:
# NOTE(danms): Legacy objects from sqlalchemy are stored in UTC,
# but are returned without a timezone attached.
# As a transitional aid, assume a tz-naive object is in UTC.
return dt.replace(tzinfo=iso8601.iso8601.Utc())
else:
return dt
raise ValueError('A datetime.datetime is required here')
# NOTE(danms): Being tolerant of isotime strings here will help us
# during our objects transition
def datetime_or_str_or_none(val):
if isinstance(val, six.string_types):
return timeutils.parse_isotime(val)
return datetime_or_none(val)
def int_or_none(val):
"""Attempt to parse an integer value, or None."""
if val is None:
return val
else:
return int(val)
def str_value(val):
if val is None:
raise ValueError(_('None is not valid here'))
return unicode(val)
def str_or_none(val):
"""Attempt to stringify a value, or None."""
if val is None:
return val
else:
return str_value(val)
def cstring(val):
if val is None:
raise ValueError(_('None is not valid here'))
return str(val)
def ip_or_none(version):
"""Return a version-specific IP address validator."""
def validator(val, version=version):
if val is None:
return val
else:
return netaddr.IPAddress(val, version=version)
return validator
def nested_object(objclass, none_ok=True):
def validator(val, objclass=objclass):
if none_ok and val is None:
return val
if isinstance(val, objclass):
return val
raise ValueError('An object of class %s is required here' % objclass)
return validator
def network_model_or_none(val):
"""Validate/Convert to a network_model.NetworkInfo, or None."""
if val is None:
return val
if isinstance(val, network_model.NetworkInfo):
return val
return network_model.NetworkInfo.hydrate(val)
def list_of_strings_or_none(val):
if val is None:
return val
if not isinstance(val, list):
raise ValueError(_('A list of strings is required here'))
if not all([isinstance(x, six.string_types) for x in val]):
raise ValueError(_('Invalid values found in list '
'(strings are required)'))
return val
def dict_of_strings_or_none(val):
if val is None:
return val
if not isinstance(val, dict):
try:
val = dict(val.iteritems())
except Exception:
raise ValueError(_('A dict of strings is required here'))
if not all([isinstance(x, six.string_types) for x in val.keys()]):
raise ValueError(_('Invalid keys found in dict '
'(strings are required)'))
if not all([isinstance(x, six.string_types) for x in val.values()]):
raise ValueError(_('Invalid values found in dict '
'(strings are required)'))
return val
def dt_serializer(name):
"""Return a datetime serializer for a named attribute."""
def serializer(self, name=name):
if getattr(self, name) is not None:
return timeutils.isotime(getattr(self, name))
else:
return None
return serializer
def dt_deserializer(instance, val):
"""A deserializer method for datetime attributes."""
if val is None:
return None
else:
return timeutils.parse_isotime(val)
def obj_serializer(name):
def serializer(self, name=name):
if getattr(self, name) is not None:
return getattr(self, name).obj_to_primitive()
else:
return None
return serializer
| apache-2.0 |
jhg/django | tests/gis_tests/inspectapp/tests.py | 17 | 8167 | from __future__ import unicode_literals
import os
import re
from unittest import skipUnless
from django.contrib.gis.gdal import HAS_GDAL
from django.core.management import call_command
from django.db import connection, connections
from django.test import TestCase, skipUnlessDBFeature
from django.test.utils import modify_settings
from django.utils.six import StringIO
from ..test_data import TEST_DATA
if HAS_GDAL:
from django.contrib.gis.gdal import Driver, GDALException
from django.contrib.gis.utils.ogrinspect import ogrinspect
from .models import AllOGRFields
@skipUnless(HAS_GDAL, "InspectDbTests needs GDAL support")
class InspectDbTests(TestCase):
@skipUnlessDBFeature("gis_enabled")
def test_geom_columns(self):
"""
Test the geo-enabled inspectdb command.
"""
out = StringIO()
call_command(
'inspectdb',
table_name_filter=lambda tn: tn == 'inspectapp_allogrfields',
stdout=out
)
output = out.getvalue()
if connection.features.supports_geometry_field_introspection:
self.assertIn('geom = models.PolygonField()', output)
self.assertIn('point = models.PointField()', output)
else:
self.assertIn('geom = models.GeometryField(', output)
self.assertIn('point = models.GeometryField(', output)
self.assertIn('objects = models.GeoManager()', output)
@skipUnlessDBFeature("supports_3d_storage")
def test_3d_columns(self):
out = StringIO()
call_command(
'inspectdb',
table_name_filter=lambda tn: tn == 'inspectapp_fields3d',
stdout=out
)
output = out.getvalue()
if connection.features.supports_geometry_field_introspection:
self.assertIn('point = models.PointField(dim=3)', output)
self.assertIn('line = models.LineStringField(dim=3)', output)
self.assertIn('poly = models.PolygonField(dim=3)', output)
else:
self.assertIn('point = models.GeometryField(', output)
self.assertIn('line = models.GeometryField(', output)
self.assertIn('poly = models.GeometryField(', output)
self.assertIn('objects = models.GeoManager()', output)
@skipUnless(HAS_GDAL, "OGRInspectTest needs GDAL support")
@modify_settings(
INSTALLED_APPS={'append': 'django.contrib.gis'},
)
class OGRInspectTest(TestCase):
maxDiff = 1024
def test_poly(self):
shp_file = os.path.join(TEST_DATA, 'test_poly', 'test_poly.shp')
model_def = ogrinspect(shp_file, 'MyModel')
expected = [
'# This is an auto-generated Django model module created by ogrinspect.',
'from django.contrib.gis.db import models',
'',
'class MyModel(models.Model):',
' float = models.FloatField()',
' int = models.FloatField()',
' str = models.CharField(max_length=80)',
' geom = models.PolygonField(srid=-1)',
' objects = models.GeoManager()',
]
self.assertEqual(model_def, '\n'.join(expected))
def test_poly_multi(self):
shp_file = os.path.join(TEST_DATA, 'test_poly', 'test_poly.shp')
model_def = ogrinspect(shp_file, 'MyModel', multi_geom=True)
self.assertIn('geom = models.MultiPolygonField(srid=-1)', model_def)
# Same test with a 25D-type geometry field
shp_file = os.path.join(TEST_DATA, 'gas_lines', 'gas_leitung.shp')
model_def = ogrinspect(shp_file, 'MyModel', multi_geom=True)
self.assertIn('geom = models.MultiLineStringField(srid=-1)', model_def)
def test_date_field(self):
shp_file = os.path.join(TEST_DATA, 'cities', 'cities.shp')
model_def = ogrinspect(shp_file, 'City')
expected = [
'# This is an auto-generated Django model module created by ogrinspect.',
'from django.contrib.gis.db import models',
'',
'class City(models.Model):',
' name = models.CharField(max_length=80)',
' population = models.FloatField()',
' density = models.FloatField()',
' created = models.DateField()',
' geom = models.PointField(srid=-1)',
' objects = models.GeoManager()',
]
self.assertEqual(model_def, '\n'.join(expected))
def test_time_field(self):
# Getting the database identifier used by OGR, if None returned
# GDAL does not have the support compiled in.
ogr_db = get_ogr_db_string()
if not ogr_db:
self.skipTest("Unable to setup an OGR connection to your database")
try:
# Writing shapefiles via GDAL currently does not support writing OGRTime
# fields, so we need to actually use a database
model_def = ogrinspect(ogr_db, 'Measurement',
layer_key=AllOGRFields._meta.db_table,
decimal=['f_decimal'])
except GDALException:
self.skipTest("Unable to setup an OGR connection to your database")
self.assertTrue(model_def.startswith(
'# This is an auto-generated Django model module created by ogrinspect.\n'
'from django.contrib.gis.db import models\n'
'\n'
'class Measurement(models.Model):\n'
))
# The ordering of model fields might vary depending on several factors (version of GDAL, etc.)
self.assertIn(' f_decimal = models.DecimalField(max_digits=0, decimal_places=0)', model_def)
self.assertIn(' f_int = models.IntegerField()', model_def)
self.assertIn(' f_datetime = models.DateTimeField()', model_def)
self.assertIn(' f_time = models.TimeField()', model_def)
self.assertIn(' f_float = models.FloatField()', model_def)
self.assertIn(' f_char = models.CharField(max_length=10)', model_def)
self.assertIn(' f_date = models.DateField()', model_def)
self.assertIsNotNone(re.search(
r' geom = models.PolygonField\(([^\)])*\)\n' # Some backends may have srid=-1
r' objects = models.GeoManager\(\)', model_def))
def test_management_command(self):
shp_file = os.path.join(TEST_DATA, 'cities', 'cities.shp')
out = StringIO()
call_command('ogrinspect', shp_file, 'City', stdout=out)
output = out.getvalue()
self.assertIn('class City(models.Model):', output)
def get_ogr_db_string():
"""
Construct the DB string that GDAL will use to inspect the database.
GDAL will create its own connection to the database, so we re-use the
connection settings from the Django test.
"""
db = connections.databases['default']
# Map from the django backend into the OGR driver name and database identifier
# http://www.gdal.org/ogr/ogr_formats.html
#
# TODO: Support Oracle (OCI).
drivers = {
'django.contrib.gis.db.backends.postgis': ('PostgreSQL', "PG:dbname='%(db_name)s'", ' '),
'django.contrib.gis.db.backends.mysql': ('MySQL', 'MYSQL:"%(db_name)s"', ','),
'django.contrib.gis.db.backends.spatialite': ('SQLite', '%(db_name)s', '')
}
db_engine = db['ENGINE']
if db_engine not in drivers:
return None
drv_name, db_str, param_sep = drivers[db_engine]
# Ensure that GDAL library has driver support for the database.
try:
Driver(drv_name)
except:
return None
# SQLite/Spatialite in-memory databases
if db['NAME'] == ":memory:":
return None
# Build the params of the OGR database connection string
params = [db_str % {'db_name': db['NAME']}]
def add(key, template):
value = db.get(key, None)
# Don't add the parameter if it is not in django's settings
if value:
params.append(template % value)
add('HOST', "host='%s'")
add('PORT', "port='%s'")
add('USER', "user='%s'")
add('PASSWORD', "password='%s'")
return param_sep.join(params)
| bsd-3-clause |
helenst/django | django/contrib/gis/tests/relatedapp/models.py | 36 | 1892 | from django.contrib.gis.db import models
from django.utils.encoding import python_2_unicode_compatible
class SimpleModel(models.Model):
objects = models.GeoManager()
class Meta:
abstract = True
app_label = 'relatedapp'
@python_2_unicode_compatible
class Location(SimpleModel):
point = models.PointField()
def __str__(self):
return self.point.wkt
@python_2_unicode_compatible
class City(SimpleModel):
name = models.CharField(max_length=50)
state = models.CharField(max_length=2)
location = models.ForeignKey(Location)
def __str__(self):
return self.name
class AugmentedLocation(Location):
extra_text = models.TextField(blank=True)
objects = models.GeoManager()
class Meta:
app_label = 'relatedapp'
class DirectoryEntry(SimpleModel):
listing_text = models.CharField(max_length=50)
location = models.ForeignKey(AugmentedLocation)
@python_2_unicode_compatible
class Parcel(SimpleModel):
name = models.CharField(max_length=30)
city = models.ForeignKey(City)
center1 = models.PointField()
# Throwing a curveball w/`db_column` here.
center2 = models.PointField(srid=2276, db_column='mycenter')
border1 = models.PolygonField()
border2 = models.PolygonField(srid=2276)
def __str__(self):
return self.name
# These use the GeoManager but do not have any geographic fields.
class Author(SimpleModel):
name = models.CharField(max_length=100)
dob = models.DateField()
class Article(SimpleModel):
title = models.CharField(max_length=100)
author = models.ForeignKey(Author, unique=True)
class Book(SimpleModel):
title = models.CharField(max_length=100)
author = models.ForeignKey(Author, related_name='books', null=True)
class Event(SimpleModel):
name = models.CharField(max_length=100)
when = models.DateTimeField()
| bsd-3-clause |
DrKylstein/tiled-vorticons | keenlib/ted15.py | 1 | 3290 | '''
Commander Keen 1-3 level loader/saver
2014 <[email protected]>
'''
from __future__ import print_function
import math
import rlew
import file_gen_util as util
RLEW_FLAG = 0xFEFE
HEADER_FIELDS = ('width', 'height', 'plane_count', 'unk1', 'unk2', 'unk3',
'unk4', 'plane_size', 'unk5', 'unk6', 'unk7', 'unk8', 'unk9',
'unkA', 'unkB', 'unkC')
HEADER_LENGTH = len(HEADER_FIELDS)
def log(words):
for word in words:
print(word)
yield word
def load(file):
words = rlew.unrlew(util.first(util.uniform_file( "<H", open(file, 'rb'))),
0xFEFE)
for i in range(2):
words.next()
return _level(words)
def save(file, level):
_regen_header(level)
size = HEADER_LENGTH*2 + level['plane_size']*2
size = [size & 0xFFFF, size >> 16]
compressed = size+[item for item in rlew.rlew(_dump_level(level),
RLEW_FLAG)]
#compressed = size+[item for item in _dump_level(level)]
util.uniform_file_out('<H', open(file, 'wb'), util.tupelize(iter(
compressed)))
def _plane(width, height, remainder, words):
plane = [[words.next() for x in range(width)] for y in range(height)]
for i in range(remainder):
words.next()
return plane
def _regen_header(level):
if not 'plane_size' in level:
size = level['width']*level['height']*2
level['plane_size'] = int(math.ceil(size/16.0)*16)
if not 'plane_count' in level:
level['plane_count'] = 2
for key in HEADER_FIELDS:
if key not in level:
level[key] = 0
def _level(words):
level = {}
header_words = []
for i in range(HEADER_LENGTH):
header_words.append(words.next())
level.update(dict(zip(HEADER_FIELDS, header_words)))
remainder = _plane_padding(level)
level['tiles'] = _plane(level['width'], level['height'], remainder, words)
level['sprites'] = _plane(level['width'], level['height'], remainder,
words)
return level
def _dump_level(level):
for key in HEADER_FIELDS:
yield level[key]
remainder = _plane_padding(level)
for row in level['tiles']:
for tile in row:
yield tile
for i in range(remainder):
yield 0
for row in level['sprites']:
for sprite in row:
yield sprite
for i in range(remainder):
yield 0
def _plane_padding(level):
return level['plane_size']/2 - level['width']*level['height']
if __name__ == '__main__':
level = load('LEVEL80.CK1')
for key, item in level.items():
print(key, item)
print(level['width'], level['height'])
for row in level['tiles']:
for tile in row:
print("{:<7}".format(hex(tile)), end=' ')
print()
loop = _level(_dump_level(level))
if loop == level:
print("Great Success!")
else:
print("Fission Mailed!")
print(loop)
print(loop['width'], loop['height'])
for row in loop['tiles']:
for tile in row:
print("{:<7}".format(hex(tile)), end=' ')
print()
save('LEVEL99.CK1', level)
| mit |
gabriel-samfira/jrunner | jrunner/jobqueue/workers/simple.py | 1 | 1445 | # Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from jrunner.common.amqpclient.client import Consume
from jrunner.jobqueue.dispatch import dispatch
from jrunner.common import has_gevent_enabled
from jrunner.openstack.common import log as logging
from jrunner.jobqueue.workers import *
from oslo.config import cfg
logging.setup('jrunner')
opts = [
cfg.StrOpt(
'queue',
default='controller',
help='Worker queue'),
]
CONF = cfg.CONF
CONF.register_opts(opts, 'jobqueue')
LOG = logging.getLogger(__name__)
if has_gevent_enabled():
import gevent
sleep = gevent.sleep
else:
import time
sleep = time.sleep
def main():
while True:
try:
c = Consume(BROKER_EXCHANGE, CONF.jobqueue.queue)
c.consume(dispatch)
except Exception as err:
LOG.exception(err)
finally:
sleep(3)
| apache-2.0 |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/jupyter_client/channels.py | 9 | 6729 | """Base classes to manage a Client's interaction with a running kernel"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import absolute_import
import atexit
import errno
from threading import Thread
import time
import zmq
# import ZMQError in top-level namespace, to avoid ugly attribute-error messages
# during garbage collection of threads at exit:
from zmq import ZMQError
from jupyter_client import protocol_version_info
from .channelsabc import HBChannelABC
#-----------------------------------------------------------------------------
# Constants and exceptions
#-----------------------------------------------------------------------------
major_protocol_version = protocol_version_info[0]
class InvalidPortNumber(Exception):
pass
class HBChannel(Thread):
"""The heartbeat channel which monitors the kernel heartbeat.
Note that the heartbeat channel is paused by default. As long as you start
this channel, the kernel manager will ensure that it is paused and un-paused
as appropriate.
"""
context = None
session = None
socket = None
address = None
_exiting = False
time_to_dead = 1.
poller = None
_running = None
_pause = None
_beating = None
def __init__(self, context=None, session=None, address=None):
"""Create the heartbeat monitor thread.
Parameters
----------
context : :class:`zmq.Context`
The ZMQ context to use.
session : :class:`session.Session`
The session to use.
address : zmq url
Standard (ip, port) tuple that the kernel is listening on.
"""
super(HBChannel, self).__init__()
self.daemon = True
self.context = context
self.session = session
if isinstance(address, tuple):
if address[1] == 0:
message = 'The port number for a channel cannot be 0.'
raise InvalidPortNumber(message)
address = "tcp://%s:%i" % address
self.address = address
atexit.register(self._notice_exit)
# running is False until `.start()` is called
self._running = False
# don't start paused
self._pause = False
self.poller = zmq.Poller()
def _notice_exit(self):
self._exiting = True
def _create_socket(self):
if self.socket is not None:
# close previous socket, before opening a new one
self.poller.unregister(self.socket)
self.socket.close()
self.socket = self.context.socket(zmq.REQ)
self.socket.linger = 1000
self.socket.connect(self.address)
self.poller.register(self.socket, zmq.POLLIN)
def _poll(self, start_time):
"""poll for heartbeat replies until we reach self.time_to_dead.
Ignores interrupts, and returns the result of poll(), which
will be an empty list if no messages arrived before the timeout,
or the event tuple if there is a message to receive.
"""
until_dead = self.time_to_dead - (time.time() - start_time)
# ensure poll at least once
until_dead = max(until_dead, 1e-3)
events = []
while True:
try:
events = self.poller.poll(1000 * until_dead)
except ZMQError as e:
if e.errno == errno.EINTR:
# ignore interrupts during heartbeat
# this may never actually happen
until_dead = self.time_to_dead - (time.time() - start_time)
until_dead = max(until_dead, 1e-3)
pass
else:
raise
except Exception:
if self._exiting:
break
else:
raise
else:
break
return events
def run(self):
"""The thread's main activity. Call start() instead."""
self._create_socket()
self._running = True
self._beating = True
while self._running:
if self._pause:
# just sleep, and skip the rest of the loop
time.sleep(self.time_to_dead)
continue
since_last_heartbeat = 0.0
# io.rprint('Ping from HB channel') # dbg
# no need to catch EFSM here, because the previous event was
# either a recv or connect, which cannot be followed by EFSM
self.socket.send(b'ping')
request_time = time.time()
ready = self._poll(request_time)
if ready:
self._beating = True
# the poll above guarantees we have something to recv
self.socket.recv()
# sleep the remainder of the cycle
remainder = self.time_to_dead - (time.time() - request_time)
if remainder > 0:
time.sleep(remainder)
continue
else:
# nothing was received within the time limit, signal heart failure
self._beating = False
since_last_heartbeat = time.time() - request_time
self.call_handlers(since_last_heartbeat)
# and close/reopen the socket, because the REQ/REP cycle has been broken
self._create_socket()
continue
def pause(self):
"""Pause the heartbeat."""
self._pause = True
def unpause(self):
"""Unpause the heartbeat."""
self._pause = False
def is_beating(self):
"""Is the heartbeat running and responsive (and not paused)."""
if self.is_alive() and not self._pause and self._beating:
return True
else:
return False
def stop(self):
"""Stop the channel's event loop and join its thread."""
self._running = False
self.join()
self.close()
def close(self):
if self.socket is not None:
try:
self.socket.close(linger=0)
except Exception:
pass
self.socket = None
def call_handlers(self, since_last_heartbeat):
"""This method is called in the ioloop thread when a message arrives.
Subclasses should override this method to handle incoming messages.
It is important to remember that this method is called in the thread
so that some logic must be done to ensure that the application level
handlers are called in the application thread.
"""
pass
HBChannelABC.register(HBChannel)
| mit |
Diapolo/bitcoin | qa/rpc-tests/disablewallet.py | 102 | 1820 | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Exercise API with -disablewallet.
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class DisableWalletTest (BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [['-disablewallet']])
self.is_network_split = False
self.sync_all()
def run_test (self):
# Check regression: https://github.com/bitcoin/bitcoin/issues/6963#issuecomment-154548880
x = self.nodes[0].validateaddress('3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy')
assert(x['isvalid'] == False)
x = self.nodes[0].validateaddress('mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ')
assert(x['isvalid'] == True)
# Checking mining to an address without a wallet
try:
self.nodes[0].generatetoaddress(1, 'mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ')
except JSONRPCException as e:
assert("Invalid address" not in e.error['message'])
assert("ProcessNewBlock, block not accepted" not in e.error['message'])
assert("Couldn't create new block" not in e.error['message'])
try:
self.nodes[0].generatetoaddress(1, '3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy')
raise AssertionError("Must not mine to invalid address!")
except JSONRPCException as e:
assert("Invalid address" in e.error['message'])
if __name__ == '__main__':
DisableWalletTest ().main ()
| mit |
poppogbr/genropy | legacy_packages/dev/main.py | 1 | 1554 | #!/usr/bin/env python
# encoding: utf-8
import sys
import os
class Package(object):
def config_attributes(self):
return dict(comment='dev package',
name_short='dev', name_long='dev', name_full='dev')
def config_db(self, pkg):
#developer table
t = pkg.table('developer', pkey='id')
t.column('id', size='22')
t.column('shortname', size='12')
t.column('name', size='30', validate_case='capitalize')
t.column('password', size='12', input_mode='hidden')
t.column('tags')
#customer table
t = pkg.table('customer', pkey='id')
t.column('id', size='22')
t.column('shortname', size='12')
t.column('name', size='50')
t.column('password', size='12')
t.column('tags')
#package table
t = pkg.table('package', pkey='id')
t.column('id', size='6')
t.column('description', size='40')
t.column('customer_id', size='22').relation('dev.customer.id')
#role table
t = pkg.table('role', pkey='code')
t.column('code', size='10')
t.column('description', size='30')
#package-developer table
t = pkg.table('package_developer')
t.column('id', size='22', pkey='id')
t.column('package_id', size='6').relation('dev.package.id')
t.column('developer_id', size='22').relation('dev.developer.id')
t.column('role', size='10').relation('dev.role.code')
| lgpl-2.1 |
kirca/odoo | openerp/tools/misc.py | 9 | 42537 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2014 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
Miscellaneous tools used by OpenERP.
"""
from functools import wraps
import cProfile
import subprocess
import logging
import os
import socket
import sys
import threading
import time
import zipfile
from collections import defaultdict, Mapping
from datetime import datetime
from itertools import islice, izip, groupby
from lxml import etree
from which import which
from threading import local
import traceback
try:
from html2text import html2text
except ImportError:
html2text = None
from config import config
from cache import *
import openerp
# get_encodings, ustr and exception_to_unicode were originally from tools.misc.
# There are moved to loglevels until we refactor tools.
from openerp.loglevels import get_encodings, ustr, exception_to_unicode # noqa
_logger = logging.getLogger(__name__)
# List of etree._Element subclasses that we choose to ignore when parsing XML.
# We include the *Base ones just in case, currently they seem to be subclasses of the _* ones.
SKIPPED_ELEMENT_TYPES = (etree._Comment, etree._ProcessingInstruction, etree.CommentBase, etree.PIBase)
def find_in_path(name):
try:
return which(name)
except IOError:
return None
def find_pg_tool(name):
path = None
if config['pg_path'] and config['pg_path'] != 'None':
path = config['pg_path']
try:
return which(name, path=path)
except IOError:
return None
def exec_pg_command(name, *args):
prog = find_pg_tool(name)
if not prog:
raise Exception('Couldn\'t find %s' % name)
args2 = (prog,) + args
with open(os.devnull) as dn:
return subprocess.call(args2, stdout=dn, stderr=subprocess.STDOUT)
def exec_pg_command_pipe(name, *args):
prog = find_pg_tool(name)
if not prog:
raise Exception('Couldn\'t find %s' % name)
# on win32, passing close_fds=True is not compatible
# with redirecting std[in/err/out]
pop = subprocess.Popen((prog,) + args, bufsize= -1,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=(os.name=="posix"))
return pop.stdin, pop.stdout
def exec_command_pipe(name, *args):
prog = find_in_path(name)
if not prog:
raise Exception('Couldn\'t find %s' % name)
# on win32, passing close_fds=True is not compatible
# with redirecting std[in/err/out]
pop = subprocess.Popen((prog,) + args, bufsize= -1,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=(os.name=="posix"))
return pop.stdin, pop.stdout
#----------------------------------------------------------
# File paths
#----------------------------------------------------------
#file_path_root = os.getcwd()
#file_path_addons = os.path.join(file_path_root, 'addons')
def file_open(name, mode="r", subdir='addons', pathinfo=False):
"""Open a file from the OpenERP root, using a subdir folder.
Example::
>>> file_open('hr/report/timesheer.xsl')
>>> file_open('addons/hr/report/timesheet.xsl')
>>> file_open('../../base/report/rml_template.xsl', subdir='addons/hr/report', pathinfo=True)
@param name name of the file
@param mode file open mode
@param subdir subdirectory
@param pathinfo if True returns tuple (fileobject, filepath)
@return fileobject if pathinfo is False else (fileobject, filepath)
"""
import openerp.modules as addons
adps = addons.module.ad_paths
rtp = os.path.normcase(os.path.abspath(config['root_path']))
basename = name
if os.path.isabs(name):
# It is an absolute path
# Is it below 'addons_path' or 'root_path'?
name = os.path.normcase(os.path.normpath(name))
for root in adps + [rtp]:
root = os.path.normcase(os.path.normpath(root)) + os.sep
if name.startswith(root):
base = root.rstrip(os.sep)
name = name[len(base) + 1:]
break
else:
# It is outside the OpenERP root: skip zipfile lookup.
base, name = os.path.split(name)
return _fileopen(name, mode=mode, basedir=base, pathinfo=pathinfo, basename=basename)
if name.replace(os.sep, '/').startswith('addons/'):
subdir = 'addons'
name2 = name[7:]
elif subdir:
name = os.path.join(subdir, name)
if name.replace(os.sep, '/').startswith('addons/'):
subdir = 'addons'
name2 = name[7:]
else:
name2 = name
# First, try to locate in addons_path
if subdir:
for adp in adps:
try:
return _fileopen(name2, mode=mode, basedir=adp,
pathinfo=pathinfo, basename=basename)
except IOError:
pass
# Second, try to locate in root_path
return _fileopen(name, mode=mode, basedir=rtp, pathinfo=pathinfo, basename=basename)
def _fileopen(path, mode, basedir, pathinfo, basename=None):
name = os.path.normpath(os.path.join(basedir, path))
if basename is None:
basename = name
# Give higher priority to module directories, which is
# a more common case than zipped modules.
if os.path.isfile(name):
fo = open(name, mode)
if pathinfo:
return fo, name
return fo
# Support for loading modules in zipped form.
# This will not work for zipped modules that are sitting
# outside of known addons paths.
head = os.path.normpath(path)
zipname = False
while os.sep in head:
head, tail = os.path.split(head)
if not tail:
break
if zipname:
zipname = os.path.join(tail, zipname)
else:
zipname = tail
zpath = os.path.join(basedir, head + '.zip')
if zipfile.is_zipfile(zpath):
from cStringIO import StringIO
zfile = zipfile.ZipFile(zpath)
try:
fo = StringIO()
fo.write(zfile.read(os.path.join(
os.path.basename(head), zipname).replace(
os.sep, '/')))
fo.seek(0)
if pathinfo:
return fo, name
return fo
except Exception:
pass
# Not found
if name.endswith('.rml'):
raise IOError('Report %r doesn\'t exist or deleted' % basename)
raise IOError('File not found: %s' % basename)
#----------------------------------------------------------
# iterables
#----------------------------------------------------------
def flatten(list):
"""Flatten a list of elements into a uniqu list
Author: Christophe Simonis ([email protected])
Examples::
>>> flatten(['a'])
['a']
>>> flatten('b')
['b']
>>> flatten( [] )
[]
>>> flatten( [[], [[]]] )
[]
>>> flatten( [[['a','b'], 'c'], 'd', ['e', [], 'f']] )
['a', 'b', 'c', 'd', 'e', 'f']
>>> t = (1,2,(3,), [4, 5, [6, [7], (8, 9), ([10, 11, (12, 13)]), [14, [], (15,)], []]])
>>> flatten(t)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
"""
def isiterable(x):
return hasattr(x, "__iter__")
r = []
for e in list:
if isiterable(e):
map(r.append, flatten(e))
else:
r.append(e)
return r
def reverse_enumerate(l):
"""Like enumerate but in the other sens
Usage::
>>> a = ['a', 'b', 'c']
>>> it = reverse_enumerate(a)
>>> it.next()
(2, 'c')
>>> it.next()
(1, 'b')
>>> it.next()
(0, 'a')
>>> it.next()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
StopIteration
"""
return izip(xrange(len(l)-1, -1, -1), reversed(l))
class UpdateableStr(local):
""" Class that stores an updateable string (used in wizards)
"""
def __init__(self, string=''):
self.string = string
def __str__(self):
return str(self.string)
def __repr__(self):
return str(self.string)
def __nonzero__(self):
return bool(self.string)
class UpdateableDict(local):
"""Stores an updateable dict to use in wizards
"""
def __init__(self, dict=None):
if dict is None:
dict = {}
self.dict = dict
def __str__(self):
return str(self.dict)
def __repr__(self):
return str(self.dict)
def clear(self):
return self.dict.clear()
def keys(self):
return self.dict.keys()
def __setitem__(self, i, y):
self.dict.__setitem__(i, y)
def __getitem__(self, i):
return self.dict.__getitem__(i)
def copy(self):
return self.dict.copy()
def iteritems(self):
return self.dict.iteritems()
def iterkeys(self):
return self.dict.iterkeys()
def itervalues(self):
return self.dict.itervalues()
def pop(self, k, d=None):
return self.dict.pop(k, d)
def popitem(self):
return self.dict.popitem()
def setdefault(self, k, d=None):
return self.dict.setdefault(k, d)
def update(self, E, **F):
return self.dict.update(E, F)
def values(self):
return self.dict.values()
def get(self, k, d=None):
return self.dict.get(k, d)
def has_key(self, k):
return self.dict.has_key(k)
def items(self):
return self.dict.items()
def __cmp__(self, y):
return self.dict.__cmp__(y)
def __contains__(self, k):
return self.dict.__contains__(k)
def __delitem__(self, y):
return self.dict.__delitem__(y)
def __eq__(self, y):
return self.dict.__eq__(y)
def __ge__(self, y):
return self.dict.__ge__(y)
def __gt__(self, y):
return self.dict.__gt__(y)
def __hash__(self):
return self.dict.__hash__()
def __iter__(self):
return self.dict.__iter__()
def __le__(self, y):
return self.dict.__le__(y)
def __len__(self):
return self.dict.__len__()
def __lt__(self, y):
return self.dict.__lt__(y)
def __ne__(self, y):
return self.dict.__ne__(y)
class currency(float):
""" Deprecate
.. warning::
Don't use ! Use res.currency.round()
"""
def __init__(self, value, accuracy=2, rounding=None):
if rounding is None:
rounding=10**-accuracy
self.rounding=rounding
self.accuracy=accuracy
def __new__(cls, value, accuracy=2, rounding=None):
return float.__new__(cls, round(value, accuracy))
#def __str__(self):
# display_value = int(self*(10**(-self.accuracy))/self.rounding)*self.rounding/(10**(-self.accuracy))
# return str(display_value)
def to_xml(s):
return s.replace('&','&').replace('<','<').replace('>','>')
def get_iso_codes(lang):
if lang.find('_') != -1:
if lang.split('_')[0] == lang.split('_')[1].lower():
lang = lang.split('_')[0]
return lang
ALL_LANGUAGES = {
'ab_RU': u'Abkhazian / аҧсуа',
'am_ET': u'Amharic / አምሃርኛ',
'ar_SY': u'Arabic / الْعَرَبيّة',
'bg_BG': u'Bulgarian / български език',
'bs_BS': u'Bosnian / bosanski jezik',
'ca_ES': u'Catalan / Català',
'cs_CZ': u'Czech / Čeština',
'da_DK': u'Danish / Dansk',
'de_DE': u'German / Deutsch',
'el_GR': u'Greek / Ελληνικά',
'en_CA': u'English (CA)',
'en_GB': u'English (UK)',
'en_US': u'English (US)',
'es_AR': u'Spanish (AR) / Español (AR)',
'es_BO': u'Spanish (BO) / Español (BO)',
'es_CL': u'Spanish (CL) / Español (CL)',
'es_CO': u'Spanish (CO) / Español (CO)',
'es_CR': u'Spanish (CR) / Español (CR)',
'es_DO': u'Spanish (DO) / Español (DO)',
'es_EC': u'Spanish (EC) / Español (EC)',
'es_ES': u'Spanish / Español',
'es_GT': u'Spanish (GT) / Español (GT)',
'es_HN': u'Spanish (HN) / Español (HN)',
'es_MX': u'Spanish (MX) / Español (MX)',
'es_NI': u'Spanish (NI) / Español (NI)',
'es_PA': u'Spanish (PA) / Español (PA)',
'es_PE': u'Spanish (PE) / Español (PE)',
'es_PR': u'Spanish (PR) / Español (PR)',
'es_PY': u'Spanish (PY) / Español (PY)',
'es_SV': u'Spanish (SV) / Español (SV)',
'es_UY': u'Spanish (UY) / Español (UY)',
'es_VE': u'Spanish (VE) / Español (VE)',
'et_EE': u'Estonian / Eesti keel',
'fa_IR': u'Persian / فارس',
'fi_FI': u'Finnish / Suomi',
'fr_BE': u'French (BE) / Français (BE)',
'fr_CA': u'French (CA) / Français (CA)',
'fr_CH': u'French (CH) / Français (CH)',
'fr_FR': u'French / Français',
'gl_ES': u'Galician / Galego',
'gu_IN': u'Gujarati / ગુજરાતી',
'he_IL': u'Hebrew / עִבְרִי',
'hi_IN': u'Hindi / हिंदी',
'hr_HR': u'Croatian / hrvatski jezik',
'hu_HU': u'Hungarian / Magyar',
'id_ID': u'Indonesian / Bahasa Indonesia',
'it_IT': u'Italian / Italiano',
'iu_CA': u'Inuktitut / ᐃᓄᒃᑎᑐᑦ',
'ja_JP': u'Japanese / 日本語',
'ko_KP': u'Korean (KP) / 한국어 (KP)',
'ko_KR': u'Korean (KR) / 한국어 (KR)',
'lo_LA': u'Lao / ພາສາລາວ',
'lt_LT': u'Lithuanian / Lietuvių kalba',
'lv_LV': u'Latvian / latviešu valoda',
'ml_IN': u'Malayalam / മലയാളം',
'mn_MN': u'Mongolian / монгол',
'nb_NO': u'Norwegian Bokmål / Norsk bokmål',
'nl_NL': u'Dutch / Nederlands',
'nl_BE': u'Flemish (BE) / Vlaams (BE)',
'oc_FR': u'Occitan (FR, post 1500) / Occitan',
'pl_PL': u'Polish / Język polski',
'pt_BR': u'Portuguese (BR) / Português (BR)',
'pt_PT': u'Portuguese / Português',
'ro_RO': u'Romanian / română',
'ru_RU': u'Russian / русский язык',
'si_LK': u'Sinhalese / සිංහල',
'sl_SI': u'Slovenian / slovenščina',
'sk_SK': u'Slovak / Slovenský jazyk',
'sq_AL': u'Albanian / Shqip',
'sr_RS': u'Serbian (Cyrillic) / српски',
'sr@latin': u'Serbian (Latin) / srpski',
'sv_SE': u'Swedish / svenska',
'te_IN': u'Telugu / తెలుగు',
'tr_TR': u'Turkish / Türkçe',
'vi_VN': u'Vietnamese / Tiếng Việt',
'uk_UA': u'Ukrainian / українська',
'ur_PK': u'Urdu / اردو',
'zh_CN': u'Chinese (CN) / 简体中文',
'zh_HK': u'Chinese (HK)',
'zh_TW': u'Chinese (TW) / 正體字',
'th_TH': u'Thai / ภาษาไทย',
'tlh_TLH': u'Klingon',
}
def scan_languages():
""" Returns all languages supported by OpenERP for translation
:returns: a list of (lang_code, lang_name) pairs
:rtype: [(str, unicode)]
"""
return sorted(ALL_LANGUAGES.iteritems(), key=lambda k: k[1])
def get_user_companies(cr, user):
def _get_company_children(cr, ids):
if not ids:
return []
cr.execute('SELECT id FROM res_company WHERE parent_id IN %s', (tuple(ids),))
res = [x[0] for x in cr.fetchall()]
res.extend(_get_company_children(cr, res))
return res
cr.execute('SELECT company_id FROM res_users WHERE id=%s', (user,))
user_comp = cr.fetchone()[0]
if not user_comp:
return []
return [user_comp] + _get_company_children(cr, [user_comp])
def mod10r(number):
"""
Input number : account or invoice number
Output return: the same number completed with the recursive mod10
key
"""
codec=[0,9,4,6,8,2,7,1,3,5]
report = 0
result=""
for digit in number:
result += digit
if digit.isdigit():
report = codec[ (int(digit) + report) % 10 ]
return result + str((10 - report) % 10)
def human_size(sz):
"""
Return the size in a human readable format
"""
if not sz:
return False
units = ('bytes', 'Kb', 'Mb', 'Gb')
if isinstance(sz,basestring):
sz=len(sz)
s, i = float(sz), 0
while s >= 1024 and i < len(units)-1:
s /= 1024
i += 1
return "%0.2f %s" % (s, units[i])
def logged(f):
@wraps(f)
def wrapper(*args, **kwargs):
from pprint import pformat
vector = ['Call -> function: %r' % f]
for i, arg in enumerate(args):
vector.append(' arg %02d: %s' % (i, pformat(arg)))
for key, value in kwargs.items():
vector.append(' kwarg %10s: %s' % (key, pformat(value)))
timeb4 = time.time()
res = f(*args, **kwargs)
vector.append(' result: %s' % pformat(res))
vector.append(' time delta: %s' % (time.time() - timeb4))
_logger.debug('\n'.join(vector))
return res
return wrapper
class profile(object):
def __init__(self, fname=None):
self.fname = fname
def __call__(self, f):
@wraps(f)
def wrapper(*args, **kwargs):
profile = cProfile.Profile()
result = profile.runcall(f, *args, **kwargs)
profile.dump_stats(self.fname or ("%s.cprof" % (f.func_name,)))
return result
return wrapper
__icons_list = ['STOCK_ABOUT', 'STOCK_ADD', 'STOCK_APPLY', 'STOCK_BOLD',
'STOCK_CANCEL', 'STOCK_CDROM', 'STOCK_CLEAR', 'STOCK_CLOSE', 'STOCK_COLOR_PICKER',
'STOCK_CONNECT', 'STOCK_CONVERT', 'STOCK_COPY', 'STOCK_CUT', 'STOCK_DELETE',
'STOCK_DIALOG_AUTHENTICATION', 'STOCK_DIALOG_ERROR', 'STOCK_DIALOG_INFO',
'STOCK_DIALOG_QUESTION', 'STOCK_DIALOG_WARNING', 'STOCK_DIRECTORY', 'STOCK_DISCONNECT',
'STOCK_DND', 'STOCK_DND_MULTIPLE', 'STOCK_EDIT', 'STOCK_EXECUTE', 'STOCK_FILE',
'STOCK_FIND', 'STOCK_FIND_AND_REPLACE', 'STOCK_FLOPPY', 'STOCK_GOTO_BOTTOM',
'STOCK_GOTO_FIRST', 'STOCK_GOTO_LAST', 'STOCK_GOTO_TOP', 'STOCK_GO_BACK',
'STOCK_GO_DOWN', 'STOCK_GO_FORWARD', 'STOCK_GO_UP', 'STOCK_HARDDISK',
'STOCK_HELP', 'STOCK_HOME', 'STOCK_INDENT', 'STOCK_INDEX', 'STOCK_ITALIC',
'STOCK_JUMP_TO', 'STOCK_JUSTIFY_CENTER', 'STOCK_JUSTIFY_FILL',
'STOCK_JUSTIFY_LEFT', 'STOCK_JUSTIFY_RIGHT', 'STOCK_MEDIA_FORWARD',
'STOCK_MEDIA_NEXT', 'STOCK_MEDIA_PAUSE', 'STOCK_MEDIA_PLAY',
'STOCK_MEDIA_PREVIOUS', 'STOCK_MEDIA_RECORD', 'STOCK_MEDIA_REWIND',
'STOCK_MEDIA_STOP', 'STOCK_MISSING_IMAGE', 'STOCK_NETWORK', 'STOCK_NEW',
'STOCK_NO', 'STOCK_OK', 'STOCK_OPEN', 'STOCK_PASTE', 'STOCK_PREFERENCES',
'STOCK_PRINT', 'STOCK_PRINT_PREVIEW', 'STOCK_PROPERTIES', 'STOCK_QUIT',
'STOCK_REDO', 'STOCK_REFRESH', 'STOCK_REMOVE', 'STOCK_REVERT_TO_SAVED',
'STOCK_SAVE', 'STOCK_SAVE_AS', 'STOCK_SELECT_COLOR', 'STOCK_SELECT_FONT',
'STOCK_SORT_ASCENDING', 'STOCK_SORT_DESCENDING', 'STOCK_SPELL_CHECK',
'STOCK_STOP', 'STOCK_STRIKETHROUGH', 'STOCK_UNDELETE', 'STOCK_UNDERLINE',
'STOCK_UNDO', 'STOCK_UNINDENT', 'STOCK_YES', 'STOCK_ZOOM_100',
'STOCK_ZOOM_FIT', 'STOCK_ZOOM_IN', 'STOCK_ZOOM_OUT',
'terp-account', 'terp-crm', 'terp-mrp', 'terp-product', 'terp-purchase',
'terp-sale', 'terp-tools', 'terp-administration', 'terp-hr', 'terp-partner',
'terp-project', 'terp-report', 'terp-stock', 'terp-calendar', 'terp-graph',
'terp-check','terp-go-month','terp-go-year','terp-go-today','terp-document-new','terp-camera_test',
'terp-emblem-important','terp-gtk-media-pause','terp-gtk-stop','terp-gnome-cpu-frequency-applet+',
'terp-dialog-close','terp-gtk-jump-to-rtl','terp-gtk-jump-to-ltr','terp-accessories-archiver',
'terp-stock_align_left_24','terp-stock_effects-object-colorize','terp-go-home','terp-gtk-go-back-rtl',
'terp-gtk-go-back-ltr','terp-personal','terp-personal-','terp-personal+','terp-accessories-archiver-minus',
'terp-accessories-archiver+','terp-stock_symbol-selection','terp-call-start','terp-dolar',
'terp-face-plain','terp-folder-blue','terp-folder-green','terp-folder-orange','terp-folder-yellow',
'terp-gdu-smart-failing','terp-go-week','terp-gtk-select-all','terp-locked','terp-mail-forward',
'terp-mail-message-new','terp-mail-replied','terp-rating-rated','terp-stage','terp-stock_format-scientific',
'terp-dolar_ok!','terp-idea','terp-stock_format-default','terp-mail-','terp-mail_delete'
]
def icons(*a, **kw):
global __icons_list
return [(x, x) for x in __icons_list ]
def detect_ip_addr():
"""Try a very crude method to figure out a valid external
IP or hostname for the current machine. Don't rely on this
for binding to an interface, but it could be used as basis
for constructing a remote URL to the server.
"""
def _detect_ip_addr():
from array import array
from struct import pack, unpack
try:
import fcntl
except ImportError:
fcntl = None
ip_addr = None
if not fcntl: # not UNIX:
host = socket.gethostname()
ip_addr = socket.gethostbyname(host)
else: # UNIX:
# get all interfaces:
nbytes = 128 * 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
names = array('B', '\0' * nbytes)
#print 'names: ', names
outbytes = unpack('iL', fcntl.ioctl( s.fileno(), 0x8912, pack('iL', nbytes, names.buffer_info()[0])))[0]
namestr = names.tostring()
# try 64 bit kernel:
for i in range(0, outbytes, 40):
name = namestr[i:i+16].split('\0', 1)[0]
if name != 'lo':
ip_addr = socket.inet_ntoa(namestr[i+20:i+24])
break
# try 32 bit kernel:
if ip_addr is None:
ifaces = filter(None, [namestr[i:i+32].split('\0', 1)[0] for i in range(0, outbytes, 32)])
for ifname in [iface for iface in ifaces if iface != 'lo']:
ip_addr = socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, pack('256s', ifname[:15]))[20:24])
break
return ip_addr or 'localhost'
try:
ip_addr = _detect_ip_addr()
except Exception:
ip_addr = 'localhost'
return ip_addr
# RATIONALE BEHIND TIMESTAMP CALCULATIONS AND TIMEZONE MANAGEMENT:
# The server side never does any timestamp calculation, always
# sends them in a naive (timezone agnostic) format supposed to be
# expressed within the server timezone, and expects the clients to
# provide timestamps in the server timezone as well.
# It stores all timestamps in the database in naive format as well,
# which also expresses the time in the server timezone.
# For this reason the server makes its timezone name available via the
# common/timezone_get() rpc method, which clients need to read
# to know the appropriate time offset to use when reading/writing
# times.
def get_win32_timezone():
"""Attempt to return the "standard name" of the current timezone on a win32 system.
@return the standard name of the current win32 timezone, or False if it cannot be found.
"""
res = False
if sys.platform == "win32":
try:
import _winreg
hklm = _winreg.ConnectRegistry(None,_winreg.HKEY_LOCAL_MACHINE)
current_tz_key = _winreg.OpenKey(hklm, r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation", 0,_winreg.KEY_ALL_ACCESS)
res = str(_winreg.QueryValueEx(current_tz_key,"StandardName")[0]) # [0] is value, [1] is type code
_winreg.CloseKey(current_tz_key)
_winreg.CloseKey(hklm)
except Exception:
pass
return res
def detect_server_timezone():
"""Attempt to detect the timezone to use on the server side.
Defaults to UTC if no working timezone can be found.
@return the timezone identifier as expected by pytz.timezone.
"""
try:
import pytz
except Exception:
_logger.warning("Python pytz module is not available. "
"Timezone will be set to UTC by default.")
return 'UTC'
# Option 1: the configuration option (did not exist before, so no backwards compatibility issue)
# Option 2: to be backwards compatible with 5.0 or earlier, the value from time.tzname[0], but only if it is known to pytz
# Option 3: the environment variable TZ
sources = [ (config['timezone'], 'OpenERP configuration'),
(time.tzname[0], 'time.tzname'),
(os.environ.get('TZ',False),'TZ environment variable'), ]
# Option 4: OS-specific: /etc/timezone on Unix
if os.path.exists("/etc/timezone"):
tz_value = False
try:
f = open("/etc/timezone")
tz_value = f.read(128).strip()
except Exception:
pass
finally:
f.close()
sources.append((tz_value,"/etc/timezone file"))
# Option 5: timezone info from registry on Win32
if sys.platform == "win32":
# Timezone info is stored in windows registry.
# However this is not likely to work very well as the standard name
# of timezones in windows is rarely something that is known to pytz.
# But that's ok, it is always possible to use a config option to set
# it explicitly.
sources.append((get_win32_timezone(),"Windows Registry"))
for (value,source) in sources:
if value:
try:
tz = pytz.timezone(value)
_logger.info("Using timezone %s obtained from %s.", tz.zone, source)
return value
except pytz.UnknownTimeZoneError:
_logger.warning("The timezone specified in %s (%s) is invalid, ignoring it.", source, value)
_logger.warning("No valid timezone could be detected, using default UTC "
"timezone. You can specify it explicitly with option 'timezone' in "
"the server configuration.")
return 'UTC'
def get_server_timezone():
return "UTC"
DEFAULT_SERVER_DATE_FORMAT = "%Y-%m-%d"
DEFAULT_SERVER_TIME_FORMAT = "%H:%M:%S"
DEFAULT_SERVER_DATETIME_FORMAT = "%s %s" % (
DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_TIME_FORMAT)
# Python's strftime supports only the format directives
# that are available on the platform's libc, so in order to
# be cross-platform we map to the directives required by
# the C standard (1989 version), always available on platforms
# with a C standard implementation.
DATETIME_FORMATS_MAP = {
'%C': '', # century
'%D': '%m/%d/%Y', # modified %y->%Y
'%e': '%d',
'%E': '', # special modifier
'%F': '%Y-%m-%d',
'%g': '%Y', # modified %y->%Y
'%G': '%Y',
'%h': '%b',
'%k': '%H',
'%l': '%I',
'%n': '\n',
'%O': '', # special modifier
'%P': '%p',
'%R': '%H:%M',
'%r': '%I:%M:%S %p',
'%s': '', #num of seconds since epoch
'%T': '%H:%M:%S',
'%t': ' ', # tab
'%u': ' %w',
'%V': '%W',
'%y': '%Y', # Even if %y works, it's ambiguous, so we should use %Y
'%+': '%Y-%m-%d %H:%M:%S',
# %Z is a special case that causes 2 problems at least:
# - the timezone names we use (in res_user.context_tz) come
# from pytz, but not all these names are recognized by
# strptime(), so we cannot convert in both directions
# when such a timezone is selected and %Z is in the format
# - %Z is replaced by an empty string in strftime() when
# there is not tzinfo in a datetime value (e.g when the user
# did not pick a context_tz). The resulting string does not
# parse back if the format requires %Z.
# As a consequence, we strip it completely from format strings.
# The user can always have a look at the context_tz in
# preferences to check the timezone.
'%z': '',
'%Z': '',
}
POSIX_TO_LDML = {
'a': 'E',
'A': 'EEEE',
'b': 'MMM',
'B': 'MMMM',
#'c': '',
'd': 'dd',
'H': 'HH',
'I': 'hh',
'j': 'DDD',
'm': 'MM',
'M': 'mm',
'p': 'a',
'S': 'ss',
'U': 'w',
'w': 'e',
'W': 'w',
'y': 'yy',
'Y': 'yyyy',
# see comments above, and babel's format_datetime assumes an UTC timezone
# for naive datetime objects
#'z': 'Z',
#'Z': 'z',
}
def posix_to_ldml(fmt, locale):
""" Converts a posix/strftime pattern into an LDML date format pattern.
:param fmt: non-extended C89/C90 strftime pattern
:param locale: babel locale used for locale-specific conversions (e.g. %x and %X)
:return: unicode
"""
buf = []
pc = False
quoted = []
for c in fmt:
# LDML date format patterns uses letters, so letters must be quoted
if not pc and c.isalpha():
quoted.append(c if c != "'" else "''")
continue
if quoted:
buf.append("'")
buf.append(''.join(quoted))
buf.append("'")
quoted = []
if pc:
if c == '%': # escaped percent
buf.append('%')
elif c == 'x': # date format, short seems to match
buf.append(locale.date_formats['short'].pattern)
elif c == 'X': # time format, seems to include seconds. short does not
buf.append(locale.time_formats['medium'].pattern)
else: # look up format char in static mapping
buf.append(POSIX_TO_LDML[c])
pc = False
elif c == '%':
pc = True
else:
buf.append(c)
# flush anything remaining in quoted buffer
if quoted:
buf.append("'")
buf.append(''.join(quoted))
buf.append("'")
return ''.join(buf)
def server_to_local_timestamp(src_tstamp_str, src_format, dst_format, dst_tz_name,
tz_offset=True, ignore_unparsable_time=True):
"""
Convert a source timestamp string into a destination timestamp string, attempting to apply the
correct offset if both the server and local timezone are recognized, or no
offset at all if they aren't or if tz_offset is false (i.e. assuming they are both in the same TZ).
WARNING: This method is here to allow formatting dates correctly for inclusion in strings where
the client would not be able to format/offset it correctly. DO NOT use it for returning
date fields directly, these are supposed to be handled by the client!!
@param src_tstamp_str: the str value containing the timestamp in the server timezone.
@param src_format: the format to use when parsing the server timestamp.
@param dst_format: the format to use when formatting the resulting timestamp for the local/client timezone.
@param dst_tz_name: name of the destination timezone (such as the 'tz' value of the client context)
@param ignore_unparsable_time: if True, return False if src_tstamp_str cannot be parsed
using src_format or formatted using dst_format.
@return local/client formatted timestamp, expressed in the local/client timezone if possible
and if tz_offset is true, or src_tstamp_str if timezone offset could not be determined.
"""
if not src_tstamp_str:
return False
res = src_tstamp_str
if src_format and dst_format:
# find out server timezone
server_tz = get_server_timezone()
try:
# dt_value needs to be a datetime.datetime object (so no time.struct_time or mx.DateTime.DateTime here!)
dt_value = datetime.strptime(src_tstamp_str, src_format)
if tz_offset and dst_tz_name:
try:
import pytz
src_tz = pytz.timezone(server_tz)
dst_tz = pytz.timezone(dst_tz_name)
src_dt = src_tz.localize(dt_value, is_dst=True)
dt_value = src_dt.astimezone(dst_tz)
except Exception:
pass
res = dt_value.strftime(dst_format)
except Exception:
# Normal ways to end up here are if strptime or strftime failed
if not ignore_unparsable_time:
return False
return res
def split_every(n, iterable, piece_maker=tuple):
"""Splits an iterable into length-n pieces. The last piece will be shorter
if ``n`` does not evenly divide the iterable length.
@param ``piece_maker``: function to build the pieces
from the slices (tuple,list,...)
"""
iterator = iter(iterable)
piece = piece_maker(islice(iterator, n))
while piece:
yield piece
piece = piece_maker(islice(iterator, n))
if __name__ == '__main__':
import doctest
doctest.testmod()
class upload_data_thread(threading.Thread):
def __init__(self, email, data, type):
self.args = [('email',email),('type',type),('data',data)]
super(upload_data_thread,self).__init__()
def run(self):
try:
import urllib
args = urllib.urlencode(self.args)
fp = urllib.urlopen('http://www.openerp.com/scripts/survey.php', args)
fp.read()
fp.close()
except Exception:
pass
def upload_data(email, data, type='SURVEY'):
a = upload_data_thread(email, data, type)
a.start()
return True
def get_and_group_by_field(cr, uid, obj, ids, field, context=None):
""" Read the values of ``field´´ for the given ``ids´´ and group ids by value.
:param string field: name of the field we want to read and group by
:return: mapping of field values to the list of ids that have it
:rtype: dict
"""
res = {}
for record in obj.read(cr, uid, ids, [field], context=context):
key = record[field]
res.setdefault(key[0] if isinstance(key, tuple) else key, []).append(record['id'])
return res
def get_and_group_by_company(cr, uid, obj, ids, context=None):
return get_and_group_by_field(cr, uid, obj, ids, field='company_id', context=context)
# port of python 2.6's attrgetter with support for dotted notation
def resolve_attr(obj, attr):
for name in attr.split("."):
obj = getattr(obj, name)
return obj
def attrgetter(*items):
if len(items) == 1:
attr = items[0]
def g(obj):
return resolve_attr(obj, attr)
else:
def g(obj):
return tuple(resolve_attr(obj, attr) for attr in items)
return g
class unquote(str):
"""A subclass of str that implements repr() without enclosing quotation marks
or escaping, keeping the original string untouched. The name come from Lisp's unquote.
One of the uses for this is to preserve or insert bare variable names within dicts during eval()
of a dict's repr(). Use with care.
Some examples (notice that there are never quotes surrounding
the ``active_id`` name:
>>> unquote('active_id')
active_id
>>> d = {'test': unquote('active_id')}
>>> d
{'test': active_id}
>>> print d
{'test': active_id}
"""
def __repr__(self):
return self
class UnquoteEvalContext(defaultdict):
"""Defaultdict-based evaluation context that returns
an ``unquote`` string for any missing name used during
the evaluation.
Mostly useful for evaluating OpenERP domains/contexts that
may refer to names that are unknown at the time of eval,
so that when the context/domain is converted back to a string,
the original names are preserved.
**Warning**: using an ``UnquoteEvalContext`` as context for ``eval()`` or
``safe_eval()`` will shadow the builtins, which may cause other
failures, depending on what is evaluated.
Example (notice that ``section_id`` is preserved in the final
result) :
>>> context_str = "{'default_user_id': uid, 'default_section_id': section_id}"
>>> eval(context_str, UnquoteEvalContext(uid=1))
{'default_user_id': 1, 'default_section_id': section_id}
"""
def __init__(self, *args, **kwargs):
super(UnquoteEvalContext, self).__init__(None, *args, **kwargs)
def __missing__(self, key):
return unquote(key)
class mute_logger(object):
"""Temporary suppress the logging.
Can be used as context manager or decorator.
@mute_logger('openerp.plic.ploc')
def do_stuff():
blahblah()
with mute_logger('openerp.foo.bar'):
do_suff()
"""
def __init__(self, *loggers):
self.loggers = loggers
def filter(self, record):
return 0
def __enter__(self):
for logger in self.loggers:
assert isinstance(logger, basestring),\
"A logger name must be a string, got %s" % type(logger)
logging.getLogger(logger).addFilter(self)
def __exit__(self, exc_type=None, exc_val=None, exc_tb=None):
for logger in self.loggers:
logging.getLogger(logger).removeFilter(self)
def __call__(self, func):
@wraps(func)
def deco(*args, **kwargs):
with self:
return func(*args, **kwargs)
return deco
_ph = object()
class CountingStream(object):
""" Stream wrapper counting the number of element it has yielded. Similar
role to ``enumerate``, but for use when the iteration process of the stream
isn't fully under caller control (the stream can be iterated from multiple
points including within a library)
``start`` allows overriding the starting index (the index before the first
item is returned).
On each iteration (call to :meth:`~.next`), increases its :attr:`~.index`
by one.
.. attribute:: index
``int``, index of the last yielded element in the stream. If the stream
has ended, will give an index 1-past the stream
"""
def __init__(self, stream, start=-1):
self.stream = iter(stream)
self.index = start
self.stopped = False
def __iter__(self):
return self
def next(self):
if self.stopped: raise StopIteration()
self.index += 1
val = next(self.stream, _ph)
if val is _ph:
self.stopped = True
raise StopIteration()
return val
def stripped_sys_argv(*strip_args):
"""Return sys.argv with some arguments stripped, suitable for reexecution or subprocesses"""
strip_args = sorted(set(strip_args) | set(['-s', '--save', '-d', '--database', '-u', '--update', '-i', '--init']))
assert all(config.parser.has_option(s) for s in strip_args)
takes_value = dict((s, config.parser.get_option(s).takes_value()) for s in strip_args)
longs, shorts = list(tuple(y) for _, y in groupby(strip_args, lambda x: x.startswith('--')))
longs_eq = tuple(l + '=' for l in longs if takes_value[l])
args = sys.argv[:]
def strip(args, i):
return args[i].startswith(shorts) \
or args[i].startswith(longs_eq) or (args[i] in longs) \
or (i >= 1 and (args[i - 1] in strip_args) and takes_value[args[i - 1]])
return [x for i, x in enumerate(args) if not strip(args, i)]
class ConstantMapping(Mapping):
"""
An immutable mapping returning the provided value for every single key.
Useful for default value to methods
"""
__slots__ = ['_value']
def __init__(self, val):
self._value = val
def __len__(self):
"""
defaultdict updates its length for each individually requested key, is
that really useful?
"""
return 0
def __iter__(self):
"""
same as len, defaultdict udpates its iterable keyset with each key
requested, is there a point for this?
"""
return iter([])
def __getitem__(self, item):
return self._value
def dumpstacks(sig=None, frame=None):
""" Signal handler: dump a stack trace for each existing thread."""
code = []
def extract_stack(stack):
for filename, lineno, name, line in traceback.extract_stack(stack):
yield 'File: "%s", line %d, in %s' % (filename, lineno, name)
if line:
yield " %s" % (line.strip(),)
# code from http://stackoverflow.com/questions/132058/getting-stack-trace-from-a-running-python-application#answer-2569696
# modified for python 2.5 compatibility
threads_info = dict([(th.ident, {'name': th.name, 'uid': getattr(th, 'uid', 'n/a')})
for th in threading.enumerate()])
for threadId, stack in sys._current_frames().items():
thread_info = threads_info.get(threadId)
code.append("\n# Thread: %s (id:%s) (uid:%s)" %
(thread_info and thread_info['name'] or 'n/a',
threadId,
thread_info and thread_info['uid'] or 'n/a'))
for line in extract_stack(stack):
code.append(line)
if openerp.evented:
# code from http://stackoverflow.com/questions/12510648/in-gevent-how-can-i-dump-stack-traces-of-all-running-greenlets
import gc
from greenlet import greenlet
for ob in gc.get_objects():
if not isinstance(ob, greenlet) or not ob:
continue
code.append("\n# Greenlet: %r" % (ob,))
for line in extract_stack(ob.gr_frame):
code.append(line)
_logger.info("\n".join(code))
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
chachan/nodeshot | nodeshot/interop/sync/synchronizers/openwisp.py | 5 | 1809 | from __future__ import absolute_import
from django.contrib.gis.geos import Point
from .base import XMLParserMixin, GenericGisSynchronizer
class OpenWisp(XMLParserMixin, GenericGisSynchronizer):
""" OpenWisp GeoRSS synchronizer class """
def parse(self):
""" parse data """
super(OpenWisp, self).parse()
self.parsed_data = self.parsed_data.getElementsByTagName('item')
def parse_item(self, item):
guid = self.get_text(item, 'guid')
try:
name, created_at = guid.split('201', 1) # ugly hack
except ValueError:
name = guid.replace('_', '-')
created_at = None
name = name.replace('_', ' ')
description = self.get_text(item, 'title')
address = self.get_text(item, 'description')
updated_at = self.get_text(item, 'updated')
# ensure created_at and updated_at are dates
if created_at:
created_at = "201%s" % created_at
else:
created_at = None
if updated_at == '0':
updated_at = None
try:
lat, lng = self.get_text(item, 'georss:point').split(' ')
except IndexError:
# detail view
lat = self.get_text(item, 'georss:lat')
lng = self.get_text(item, 'georss:long')
# point object
geometry = Point(float(lng), float(lat))
result = {
"name": name,
"status": None,
"address": address,
"is_published": True,
"user": None,
"geometry": geometry,
"elev": None,
"description": description,
"notes": guid,
"added": created_at,
"updated": updated_at,
"data": {}
}
return result
| gpl-3.0 |
songmonit/CTTMSONLINE_V8 | addons/hr/__openerp__.py | 260 | 2372 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Employee Directory',
'version': '1.1',
'author': 'OpenERP SA',
'category': 'Human Resources',
'sequence': 21,
'website': 'https://www.odoo.com',
'summary': 'Jobs, Departments, Employees Details',
'description': """
Human Resources Management
==========================
This application enables you to manage important aspects of your company's staff and other details such as their skills, contacts, working time...
You can manage:
---------------
* Employees and hierarchies : You can define your employee with User and display hierarchies
* HR Departments
* HR Jobs
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/employees',
'depends': ['base_setup','mail', 'resource', 'board'],
'data': [
'security/hr_security.xml',
'security/ir.model.access.csv',
'hr_view.xml',
'hr_installer.xml',
'hr_data.xml',
'res_config_view.xml',
'mail_hr_view.xml',
'res_users_view.xml',
'views/hr.xml',
],
'demo': ['hr_demo.xml'],
'test': [
'test/hr_users.yml',
'test/open2recruit2close_job.yml',
'test/hr_demo.yml',
],
'installable': True,
'application': True,
'auto_install': False,
'qweb': [ 'static/src/xml/suggestions.xml' ],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
wndhydrnt/airflow | airflow/example_dags/example_http_operator.py | 3 | 2871 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
### Example HTTP operator and sensor
"""
import json
from datetime import timedelta
import airflow
from airflow import DAG
from airflow.operators.http_operator import SimpleHttpOperator
from airflow.sensors.http_sensor import HttpSensor
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': airflow.utils.dates.days_ago(2),
'email': ['[email protected]'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5),
}
dag = DAG('example_http_operator', default_args=default_args)
dag.doc_md = __doc__
# t1, t2 and t3 are examples of tasks created by instantiating operators
t1 = SimpleHttpOperator(
task_id='post_op',
endpoint='api/v1.0/nodes',
data=json.dumps({"priority": 5}),
headers={"Content-Type": "application/json"},
response_check=lambda response: True if len(response.json()) == 0 else False,
dag=dag)
t5 = SimpleHttpOperator(
task_id='post_op_formenc',
endpoint='nodes/url',
data="name=Joe",
headers={"Content-Type": "application/x-www-form-urlencoded"},
dag=dag)
t2 = SimpleHttpOperator(
task_id='get_op',
method='GET',
endpoint='api/v1.0/nodes',
data={"param1": "value1", "param2": "value2"},
headers={},
dag=dag)
t3 = SimpleHttpOperator(
task_id='put_op',
method='PUT',
endpoint='api/v1.0/nodes',
data=json.dumps({"priority": 5}),
headers={"Content-Type": "application/json"},
dag=dag)
t4 = SimpleHttpOperator(
task_id='del_op',
method='DELETE',
endpoint='api/v1.0/nodes',
data="some=data",
headers={"Content-Type": "application/x-www-form-urlencoded"},
dag=dag)
sensor = HttpSensor(
task_id='http_sensor_check',
http_conn_id='http_default',
endpoint='',
request_params={},
response_check=lambda response: True if "Google" in response.content else False,
poke_interval=5,
dag=dag)
t1.set_upstream(sensor)
t2.set_upstream(t1)
t3.set_upstream(t2)
t4.set_upstream(t3)
t5.set_upstream(t4)
| apache-2.0 |
felixfontein/ansible | test/support/integration/plugins/modules/postgresql_db.py | 53 | 23381 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: postgresql_db
short_description: Add or remove PostgreSQL databases from a remote host.
description:
- Add or remove PostgreSQL databases from a remote host.
version_added: '0.6'
options:
name:
description:
- Name of the database to add or remove
type: str
required: true
aliases: [ db ]
port:
description:
- Database port to connect (if needed)
type: int
default: 5432
aliases:
- login_port
owner:
description:
- Name of the role to set as owner of the database
type: str
template:
description:
- Template used to create the database
type: str
encoding:
description:
- Encoding of the database
type: str
lc_collate:
description:
- Collation order (LC_COLLATE) to use in the database. Must match collation order of template database unless C(template0) is used as template.
type: str
lc_ctype:
description:
- Character classification (LC_CTYPE) to use in the database (e.g. lower, upper, ...) Must match LC_CTYPE of template database unless C(template0)
is used as template.
type: str
session_role:
description:
- Switch to session_role after connecting. The specified session_role must be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally.
type: str
version_added: '2.8'
state:
description:
- The database state.
- C(present) implies that the database should be created if necessary.
- C(absent) implies that the database should be removed if present.
- C(dump) requires a target definition to which the database will be backed up. (Added in Ansible 2.4)
Note that in some PostgreSQL versions of pg_dump, which is an embedded PostgreSQL utility and is used by the module,
returns rc 0 even when errors occurred (e.g. the connection is forbidden by pg_hba.conf, etc.),
so the module returns changed=True but the dump has not actually been done. Please, be sure that your version of
pg_dump returns rc 1 in this case.
- C(restore) also requires a target definition from which the database will be restored. (Added in Ansible 2.4)
- The format of the backup will be detected based on the target name.
- Supported compression formats for dump and restore include C(.pgc), C(.bz2), C(.gz) and C(.xz)
- Supported formats for dump and restore include C(.sql) and C(.tar)
type: str
choices: [ absent, dump, present, restore ]
default: present
target:
description:
- File to back up or restore from.
- Used when I(state) is C(dump) or C(restore).
type: path
version_added: '2.4'
target_opts:
description:
- Further arguments for pg_dump or pg_restore.
- Used when I(state) is C(dump) or C(restore).
type: str
version_added: '2.4'
maintenance_db:
description:
- The value specifies the initial database (which is also called as maintenance DB) that Ansible connects to.
type: str
default: postgres
version_added: '2.5'
conn_limit:
description:
- Specifies the database connection limit.
type: str
version_added: '2.8'
tablespace:
description:
- The tablespace to set for the database
U(https://www.postgresql.org/docs/current/sql-alterdatabase.html).
- If you want to move the database back to the default tablespace,
explicitly set this to pg_default.
type: path
version_added: '2.9'
dump_extra_args:
description:
- Provides additional arguments when I(state) is C(dump).
- Cannot be used with dump-file-format-related arguments like ``--format=d``.
type: str
version_added: '2.10'
seealso:
- name: CREATE DATABASE reference
description: Complete reference of the CREATE DATABASE command documentation.
link: https://www.postgresql.org/docs/current/sql-createdatabase.html
- name: DROP DATABASE reference
description: Complete reference of the DROP DATABASE command documentation.
link: https://www.postgresql.org/docs/current/sql-dropdatabase.html
- name: pg_dump reference
description: Complete reference of pg_dump documentation.
link: https://www.postgresql.org/docs/current/app-pgdump.html
- name: pg_restore reference
description: Complete reference of pg_restore documentation.
link: https://www.postgresql.org/docs/current/app-pgrestore.html
- module: postgresql_tablespace
- module: postgresql_info
- module: postgresql_ping
notes:
- State C(dump) and C(restore) don't require I(psycopg2) since version 2.8.
author: "Ansible Core Team"
extends_documentation_fragment:
- postgres
'''
EXAMPLES = r'''
- name: Create a new database with name "acme"
postgresql_db:
name: acme
# Note: If a template different from "template0" is specified, encoding and locale settings must match those of the template.
- name: Create a new database with name "acme" and specific encoding and locale # settings.
postgresql_db:
name: acme
encoding: UTF-8
lc_collate: de_DE.UTF-8
lc_ctype: de_DE.UTF-8
template: template0
# Note: Default limit for the number of concurrent connections to a specific database is "-1", which means "unlimited"
- name: Create a new database with name "acme" which has a limit of 100 concurrent connections
postgresql_db:
name: acme
conn_limit: "100"
- name: Dump an existing database to a file
postgresql_db:
name: acme
state: dump
target: /tmp/acme.sql
- name: Dump an existing database to a file excluding the test table
postgresql_db:
name: acme
state: dump
target: /tmp/acme.sql
dump_extra_args: --exclude-table=test
- name: Dump an existing database to a file (with compression)
postgresql_db:
name: acme
state: dump
target: /tmp/acme.sql.gz
- name: Dump a single schema for an existing database
postgresql_db:
name: acme
state: dump
target: /tmp/acme.sql
target_opts: "-n public"
# Note: In the example below, if database foo exists and has another tablespace
# the tablespace will be changed to foo. Access to the database will be locked
# until the copying of database files is finished.
- name: Create a new database called foo in tablespace bar
postgresql_db:
name: foo
tablespace: bar
'''
RETURN = r'''
executed_commands:
description: List of commands which tried to run.
returned: always
type: list
sample: ["CREATE DATABASE acme"]
version_added: '2.10'
'''
import os
import subprocess
import traceback
try:
import psycopg2
import psycopg2.extras
except ImportError:
HAS_PSYCOPG2 = False
else:
HAS_PSYCOPG2 = True
import ansible.module_utils.postgres as pgutils
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.database import SQLParseError, pg_quote_identifier
from ansible.module_utils.six import iteritems
from ansible.module_utils.six.moves import shlex_quote
from ansible.module_utils._text import to_native
executed_commands = []
class NotSupportedError(Exception):
pass
# ===========================================
# PostgreSQL module specific support methods.
#
def set_owner(cursor, db, owner):
query = 'ALTER DATABASE %s OWNER TO "%s"' % (
pg_quote_identifier(db, 'database'),
owner)
executed_commands.append(query)
cursor.execute(query)
return True
def set_conn_limit(cursor, db, conn_limit):
query = "ALTER DATABASE %s CONNECTION LIMIT %s" % (
pg_quote_identifier(db, 'database'),
conn_limit)
executed_commands.append(query)
cursor.execute(query)
return True
def get_encoding_id(cursor, encoding):
query = "SELECT pg_char_to_encoding(%(encoding)s) AS encoding_id;"
cursor.execute(query, {'encoding': encoding})
return cursor.fetchone()['encoding_id']
def get_db_info(cursor, db):
query = """
SELECT rolname AS owner,
pg_encoding_to_char(encoding) AS encoding, encoding AS encoding_id,
datcollate AS lc_collate, datctype AS lc_ctype, pg_database.datconnlimit AS conn_limit,
spcname AS tablespace
FROM pg_database
JOIN pg_roles ON pg_roles.oid = pg_database.datdba
JOIN pg_tablespace ON pg_tablespace.oid = pg_database.dattablespace
WHERE datname = %(db)s
"""
cursor.execute(query, {'db': db})
return cursor.fetchone()
def db_exists(cursor, db):
query = "SELECT * FROM pg_database WHERE datname=%(db)s"
cursor.execute(query, {'db': db})
return cursor.rowcount == 1
def db_delete(cursor, db):
if db_exists(cursor, db):
query = "DROP DATABASE %s" % pg_quote_identifier(db, 'database')
executed_commands.append(query)
cursor.execute(query)
return True
else:
return False
def db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace):
params = dict(enc=encoding, collate=lc_collate, ctype=lc_ctype, conn_limit=conn_limit, tablespace=tablespace)
if not db_exists(cursor, db):
query_fragments = ['CREATE DATABASE %s' % pg_quote_identifier(db, 'database')]
if owner:
query_fragments.append('OWNER "%s"' % owner)
if template:
query_fragments.append('TEMPLATE %s' % pg_quote_identifier(template, 'database'))
if encoding:
query_fragments.append('ENCODING %(enc)s')
if lc_collate:
query_fragments.append('LC_COLLATE %(collate)s')
if lc_ctype:
query_fragments.append('LC_CTYPE %(ctype)s')
if tablespace:
query_fragments.append('TABLESPACE %s' % pg_quote_identifier(tablespace, 'tablespace'))
if conn_limit:
query_fragments.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit})
query = ' '.join(query_fragments)
executed_commands.append(cursor.mogrify(query, params))
cursor.execute(query, params)
return True
else:
db_info = get_db_info(cursor, db)
if (encoding and get_encoding_id(cursor, encoding) != db_info['encoding_id']):
raise NotSupportedError(
'Changing database encoding is not supported. '
'Current encoding: %s' % db_info['encoding']
)
elif lc_collate and lc_collate != db_info['lc_collate']:
raise NotSupportedError(
'Changing LC_COLLATE is not supported. '
'Current LC_COLLATE: %s' % db_info['lc_collate']
)
elif lc_ctype and lc_ctype != db_info['lc_ctype']:
raise NotSupportedError(
'Changing LC_CTYPE is not supported.'
'Current LC_CTYPE: %s' % db_info['lc_ctype']
)
else:
changed = False
if owner and owner != db_info['owner']:
changed = set_owner(cursor, db, owner)
if conn_limit and conn_limit != str(db_info['conn_limit']):
changed = set_conn_limit(cursor, db, conn_limit)
if tablespace and tablespace != db_info['tablespace']:
changed = set_tablespace(cursor, db, tablespace)
return changed
def db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace):
if not db_exists(cursor, db):
return False
else:
db_info = get_db_info(cursor, db)
if (encoding and get_encoding_id(cursor, encoding) != db_info['encoding_id']):
return False
elif lc_collate and lc_collate != db_info['lc_collate']:
return False
elif lc_ctype and lc_ctype != db_info['lc_ctype']:
return False
elif owner and owner != db_info['owner']:
return False
elif conn_limit and conn_limit != str(db_info['conn_limit']):
return False
elif tablespace and tablespace != db_info['tablespace']:
return False
else:
return True
def db_dump(module, target, target_opts="",
db=None,
dump_extra_args=None,
user=None,
password=None,
host=None,
port=None,
**kw):
flags = login_flags(db, host, port, user, db_prefix=False)
cmd = module.get_bin_path('pg_dump', True)
comp_prog_path = None
if os.path.splitext(target)[-1] == '.tar':
flags.append(' --format=t')
elif os.path.splitext(target)[-1] == '.pgc':
flags.append(' --format=c')
if os.path.splitext(target)[-1] == '.gz':
if module.get_bin_path('pigz'):
comp_prog_path = module.get_bin_path('pigz', True)
else:
comp_prog_path = module.get_bin_path('gzip', True)
elif os.path.splitext(target)[-1] == '.bz2':
comp_prog_path = module.get_bin_path('bzip2', True)
elif os.path.splitext(target)[-1] == '.xz':
comp_prog_path = module.get_bin_path('xz', True)
cmd += "".join(flags)
if dump_extra_args:
cmd += " {0} ".format(dump_extra_args)
if target_opts:
cmd += " {0} ".format(target_opts)
if comp_prog_path:
# Use a fifo to be notified of an error in pg_dump
# Using shell pipe has no way to return the code of the first command
# in a portable way.
fifo = os.path.join(module.tmpdir, 'pg_fifo')
os.mkfifo(fifo)
cmd = '{1} <{3} > {2} & {0} >{3}'.format(cmd, comp_prog_path, shlex_quote(target), fifo)
else:
cmd = '{0} > {1}'.format(cmd, shlex_quote(target))
return do_with_password(module, cmd, password)
def db_restore(module, target, target_opts="",
db=None,
user=None,
password=None,
host=None,
port=None,
**kw):
flags = login_flags(db, host, port, user)
comp_prog_path = None
cmd = module.get_bin_path('psql', True)
if os.path.splitext(target)[-1] == '.sql':
flags.append(' --file={0}'.format(target))
elif os.path.splitext(target)[-1] == '.tar':
flags.append(' --format=Tar')
cmd = module.get_bin_path('pg_restore', True)
elif os.path.splitext(target)[-1] == '.pgc':
flags.append(' --format=Custom')
cmd = module.get_bin_path('pg_restore', True)
elif os.path.splitext(target)[-1] == '.gz':
comp_prog_path = module.get_bin_path('zcat', True)
elif os.path.splitext(target)[-1] == '.bz2':
comp_prog_path = module.get_bin_path('bzcat', True)
elif os.path.splitext(target)[-1] == '.xz':
comp_prog_path = module.get_bin_path('xzcat', True)
cmd += "".join(flags)
if target_opts:
cmd += " {0} ".format(target_opts)
if comp_prog_path:
env = os.environ.copy()
if password:
env = {"PGPASSWORD": password}
p1 = subprocess.Popen([comp_prog_path, target], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p2 = subprocess.Popen(cmd, stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=env)
(stdout2, stderr2) = p2.communicate()
p1.stdout.close()
p1.wait()
if p1.returncode != 0:
stderr1 = p1.stderr.read()
return p1.returncode, '', stderr1, 'cmd: ****'
else:
return p2.returncode, '', stderr2, 'cmd: ****'
else:
cmd = '{0} < {1}'.format(cmd, shlex_quote(target))
return do_with_password(module, cmd, password)
def login_flags(db, host, port, user, db_prefix=True):
"""
returns a list of connection argument strings each prefixed
with a space and quoted where necessary to later be combined
in a single shell string with `"".join(rv)`
db_prefix determines if "--dbname" is prefixed to the db argument,
since the argument was introduced in 9.3.
"""
flags = []
if db:
if db_prefix:
flags.append(' --dbname={0}'.format(shlex_quote(db)))
else:
flags.append(' {0}'.format(shlex_quote(db)))
if host:
flags.append(' --host={0}'.format(host))
if port:
flags.append(' --port={0}'.format(port))
if user:
flags.append(' --username={0}'.format(user))
return flags
def do_with_password(module, cmd, password):
env = {}
if password:
env = {"PGPASSWORD": password}
executed_commands.append(cmd)
rc, stderr, stdout = module.run_command(cmd, use_unsafe_shell=True, environ_update=env)
return rc, stderr, stdout, cmd
def set_tablespace(cursor, db, tablespace):
query = "ALTER DATABASE %s SET TABLESPACE %s" % (
pg_quote_identifier(db, 'database'),
pg_quote_identifier(tablespace, 'tablespace'))
executed_commands.append(query)
cursor.execute(query)
return True
# ===========================================
# Module execution.
#
def main():
argument_spec = pgutils.postgres_common_argument_spec()
argument_spec.update(
db=dict(type='str', required=True, aliases=['name']),
owner=dict(type='str', default=''),
template=dict(type='str', default=''),
encoding=dict(type='str', default=''),
lc_collate=dict(type='str', default=''),
lc_ctype=dict(type='str', default=''),
state=dict(type='str', default='present', choices=['absent', 'dump', 'present', 'restore']),
target=dict(type='path', default=''),
target_opts=dict(type='str', default=''),
maintenance_db=dict(type='str', default="postgres"),
session_role=dict(type='str'),
conn_limit=dict(type='str', default=''),
tablespace=dict(type='path', default=''),
dump_extra_args=dict(type='str', default=None),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
db = module.params["db"]
owner = module.params["owner"]
template = module.params["template"]
encoding = module.params["encoding"]
lc_collate = module.params["lc_collate"]
lc_ctype = module.params["lc_ctype"]
target = module.params["target"]
target_opts = module.params["target_opts"]
state = module.params["state"]
changed = False
maintenance_db = module.params['maintenance_db']
session_role = module.params["session_role"]
conn_limit = module.params['conn_limit']
tablespace = module.params['tablespace']
dump_extra_args = module.params['dump_extra_args']
raw_connection = state in ("dump", "restore")
if not raw_connection:
pgutils.ensure_required_libs(module)
# To use defaults values, keyword arguments must be absent, so
# check which values are empty and don't include in the **kw
# dictionary
params_map = {
"login_host": "host",
"login_user": "user",
"login_password": "password",
"port": "port",
"ssl_mode": "sslmode",
"ca_cert": "sslrootcert"
}
kw = dict((params_map[k], v) for (k, v) in iteritems(module.params)
if k in params_map and v != '' and v is not None)
# If a login_unix_socket is specified, incorporate it here.
is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
if is_localhost and module.params["login_unix_socket"] != "":
kw["host"] = module.params["login_unix_socket"]
if target == "":
target = "{0}/{1}.sql".format(os.getcwd(), db)
target = os.path.expanduser(target)
if not raw_connection:
try:
db_connection = psycopg2.connect(database=maintenance_db, **kw)
# Enable autocommit so we can create databases
if psycopg2.__version__ >= '2.4.2':
db_connection.autocommit = True
else:
db_connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
except TypeError as e:
if 'sslrootcert' in e.args[0]:
module.fail_json(msg='Postgresql server must be at least version 8.4 to support sslrootcert. Exception: {0}'.format(to_native(e)),
exception=traceback.format_exc())
module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
except Exception as e:
module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
if session_role:
try:
cursor.execute('SET ROLE "%s"' % session_role)
except Exception as e:
module.fail_json(msg="Could not switch role: %s" % to_native(e), exception=traceback.format_exc())
try:
if module.check_mode:
if state == "absent":
changed = db_exists(cursor, db)
elif state == "present":
changed = not db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace)
module.exit_json(changed=changed, db=db, executed_commands=executed_commands)
if state == "absent":
try:
changed = db_delete(cursor, db)
except SQLParseError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
elif state == "present":
try:
changed = db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace)
except SQLParseError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
elif state in ("dump", "restore"):
method = state == "dump" and db_dump or db_restore
try:
if state == 'dump':
rc, stdout, stderr, cmd = method(module, target, target_opts, db, dump_extra_args, **kw)
else:
rc, stdout, stderr, cmd = method(module, target, target_opts, db, **kw)
if rc != 0:
module.fail_json(msg=stderr, stdout=stdout, rc=rc, cmd=cmd)
else:
module.exit_json(changed=True, msg=stdout, stderr=stderr, rc=rc, cmd=cmd,
executed_commands=executed_commands)
except SQLParseError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
except NotSupportedError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
except SystemExit:
# Avoid catching this on Python 2.4
raise
except Exception as e:
module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc())
module.exit_json(changed=changed, db=db, executed_commands=executed_commands)
if __name__ == '__main__':
main()
| gpl-3.0 |
titasakgm/brc-stock | openerp/addons/note_pad/note_pad.py | 441 | 1301 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from openerp.tools.translate import _
class note_pad_note(osv.osv):
""" memo pad """
_name = 'note.note'
_inherit = ['pad.common','note.note']
_pad_fields = ['note_pad']
_columns = {
'note_pad_url': fields.char('Pad Url', pad_content_field='memo'),
} | agpl-3.0 |
makermade/arm_android-19_arm-linux-androideabi-4.8 | lib/python2.7/mailbox.py | 64 | 80751 | #! /usr/bin/env python
"""Read/write support for Maildir, mbox, MH, Babyl, and MMDF mailboxes."""
# Notes for authors of new mailbox subclasses:
#
# Remember to fsync() changes to disk before closing a modified file
# or returning from a flush() method. See functions _sync_flush() and
# _sync_close().
import sys
import os
import time
import calendar
import socket
import errno
import copy
import email
import email.message
import email.generator
import StringIO
try:
if sys.platform == 'os2emx':
# OS/2 EMX fcntl() not adequate
raise ImportError
import fcntl
except ImportError:
fcntl = None
import warnings
with warnings.catch_warnings():
if sys.py3kwarning:
warnings.filterwarnings("ignore", ".*rfc822 has been removed",
DeprecationWarning)
import rfc822
__all__ = [ 'Mailbox', 'Maildir', 'mbox', 'MH', 'Babyl', 'MMDF',
'Message', 'MaildirMessage', 'mboxMessage', 'MHMessage',
'BabylMessage', 'MMDFMessage', 'UnixMailbox',
'PortableUnixMailbox', 'MmdfMailbox', 'MHMailbox', 'BabylMailbox' ]
class Mailbox:
"""A group of messages in a particular place."""
def __init__(self, path, factory=None, create=True):
"""Initialize a Mailbox instance."""
self._path = os.path.abspath(os.path.expanduser(path))
self._factory = factory
def add(self, message):
"""Add message and return assigned key."""
raise NotImplementedError('Method must be implemented by subclass')
def remove(self, key):
"""Remove the keyed message; raise KeyError if it doesn't exist."""
raise NotImplementedError('Method must be implemented by subclass')
def __delitem__(self, key):
self.remove(key)
def discard(self, key):
"""If the keyed message exists, remove it."""
try:
self.remove(key)
except KeyError:
pass
def __setitem__(self, key, message):
"""Replace the keyed message; raise KeyError if it doesn't exist."""
raise NotImplementedError('Method must be implemented by subclass')
def get(self, key, default=None):
"""Return the keyed message, or default if it doesn't exist."""
try:
return self.__getitem__(key)
except KeyError:
return default
def __getitem__(self, key):
"""Return the keyed message; raise KeyError if it doesn't exist."""
if not self._factory:
return self.get_message(key)
else:
return self._factory(self.get_file(key))
def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
raise NotImplementedError('Method must be implemented by subclass')
def get_string(self, key):
"""Return a string representation or raise a KeyError."""
raise NotImplementedError('Method must be implemented by subclass')
def get_file(self, key):
"""Return a file-like representation or raise a KeyError."""
raise NotImplementedError('Method must be implemented by subclass')
def iterkeys(self):
"""Return an iterator over keys."""
raise NotImplementedError('Method must be implemented by subclass')
def keys(self):
"""Return a list of keys."""
return list(self.iterkeys())
def itervalues(self):
"""Return an iterator over all messages."""
for key in self.iterkeys():
try:
value = self[key]
except KeyError:
continue
yield value
def __iter__(self):
return self.itervalues()
def values(self):
"""Return a list of messages. Memory intensive."""
return list(self.itervalues())
def iteritems(self):
"""Return an iterator over (key, message) tuples."""
for key in self.iterkeys():
try:
value = self[key]
except KeyError:
continue
yield (key, value)
def items(self):
"""Return a list of (key, message) tuples. Memory intensive."""
return list(self.iteritems())
def has_key(self, key):
"""Return True if the keyed message exists, False otherwise."""
raise NotImplementedError('Method must be implemented by subclass')
def __contains__(self, key):
return self.has_key(key)
def __len__(self):
"""Return a count of messages in the mailbox."""
raise NotImplementedError('Method must be implemented by subclass')
def clear(self):
"""Delete all messages."""
for key in self.iterkeys():
self.discard(key)
def pop(self, key, default=None):
"""Delete the keyed message and return it, or default."""
try:
result = self[key]
except KeyError:
return default
self.discard(key)
return result
def popitem(self):
"""Delete an arbitrary (key, message) pair and return it."""
for key in self.iterkeys():
return (key, self.pop(key)) # This is only run once.
else:
raise KeyError('No messages in mailbox')
def update(self, arg=None):
"""Change the messages that correspond to certain keys."""
if hasattr(arg, 'iteritems'):
source = arg.iteritems()
elif hasattr(arg, 'items'):
source = arg.items()
else:
source = arg
bad_key = False
for key, message in source:
try:
self[key] = message
except KeyError:
bad_key = True
if bad_key:
raise KeyError('No message with key(s)')
def flush(self):
"""Write any pending changes to the disk."""
raise NotImplementedError('Method must be implemented by subclass')
def lock(self):
"""Lock the mailbox."""
raise NotImplementedError('Method must be implemented by subclass')
def unlock(self):
"""Unlock the mailbox if it is locked."""
raise NotImplementedError('Method must be implemented by subclass')
def close(self):
"""Flush and close the mailbox."""
raise NotImplementedError('Method must be implemented by subclass')
# Whether each message must end in a newline
_append_newline = False
def _dump_message(self, message, target, mangle_from_=False):
# Most files are opened in binary mode to allow predictable seeking.
# To get native line endings on disk, the user-friendly \n line endings
# used in strings and by email.Message are translated here.
"""Dump message contents to target file."""
if isinstance(message, email.message.Message):
buffer = StringIO.StringIO()
gen = email.generator.Generator(buffer, mangle_from_, 0)
gen.flatten(message)
buffer.seek(0)
data = buffer.read().replace('\n', os.linesep)
target.write(data)
if self._append_newline and not data.endswith(os.linesep):
# Make sure the message ends with a newline
target.write(os.linesep)
elif isinstance(message, str):
if mangle_from_:
message = message.replace('\nFrom ', '\n>From ')
message = message.replace('\n', os.linesep)
target.write(message)
if self._append_newline and not message.endswith(os.linesep):
# Make sure the message ends with a newline
target.write(os.linesep)
elif hasattr(message, 'read'):
lastline = None
while True:
line = message.readline()
if line == '':
break
if mangle_from_ and line.startswith('From '):
line = '>From ' + line[5:]
line = line.replace('\n', os.linesep)
target.write(line)
lastline = line
if self._append_newline and lastline and not lastline.endswith(os.linesep):
# Make sure the message ends with a newline
target.write(os.linesep)
else:
raise TypeError('Invalid message type: %s' % type(message))
class Maildir(Mailbox):
"""A qmail-style Maildir mailbox."""
colon = ':'
def __init__(self, dirname, factory=rfc822.Message, create=True):
"""Initialize a Maildir instance."""
Mailbox.__init__(self, dirname, factory, create)
self._paths = {
'tmp': os.path.join(self._path, 'tmp'),
'new': os.path.join(self._path, 'new'),
'cur': os.path.join(self._path, 'cur'),
}
if not os.path.exists(self._path):
if create:
os.mkdir(self._path, 0700)
for path in self._paths.values():
os.mkdir(path, 0o700)
else:
raise NoSuchMailboxError(self._path)
self._toc = {}
self._toc_mtimes = {'cur': 0, 'new': 0}
self._last_read = 0 # Records last time we read cur/new
self._skewfactor = 0.1 # Adjust if os/fs clocks are skewing
def add(self, message):
"""Add message and return assigned key."""
tmp_file = self._create_tmp()
try:
self._dump_message(message, tmp_file)
except BaseException:
tmp_file.close()
os.remove(tmp_file.name)
raise
_sync_close(tmp_file)
if isinstance(message, MaildirMessage):
subdir = message.get_subdir()
suffix = self.colon + message.get_info()
if suffix == self.colon:
suffix = ''
else:
subdir = 'new'
suffix = ''
uniq = os.path.basename(tmp_file.name).split(self.colon)[0]
dest = os.path.join(self._path, subdir, uniq + suffix)
try:
if hasattr(os, 'link'):
os.link(tmp_file.name, dest)
os.remove(tmp_file.name)
else:
os.rename(tmp_file.name, dest)
except OSError, e:
os.remove(tmp_file.name)
if e.errno == errno.EEXIST:
raise ExternalClashError('Name clash with existing message: %s'
% dest)
else:
raise
if isinstance(message, MaildirMessage):
os.utime(dest, (os.path.getatime(dest), message.get_date()))
return uniq
def remove(self, key):
"""Remove the keyed message; raise KeyError if it doesn't exist."""
os.remove(os.path.join(self._path, self._lookup(key)))
def discard(self, key):
"""If the keyed message exists, remove it."""
# This overrides an inapplicable implementation in the superclass.
try:
self.remove(key)
except KeyError:
pass
except OSError, e:
if e.errno != errno.ENOENT:
raise
def __setitem__(self, key, message):
"""Replace the keyed message; raise KeyError if it doesn't exist."""
old_subpath = self._lookup(key)
temp_key = self.add(message)
temp_subpath = self._lookup(temp_key)
if isinstance(message, MaildirMessage):
# temp's subdir and suffix were specified by message.
dominant_subpath = temp_subpath
else:
# temp's subdir and suffix were defaults from add().
dominant_subpath = old_subpath
subdir = os.path.dirname(dominant_subpath)
if self.colon in dominant_subpath:
suffix = self.colon + dominant_subpath.split(self.colon)[-1]
else:
suffix = ''
self.discard(key)
new_path = os.path.join(self._path, subdir, key + suffix)
os.rename(os.path.join(self._path, temp_subpath), new_path)
if isinstance(message, MaildirMessage):
os.utime(new_path, (os.path.getatime(new_path),
message.get_date()))
def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
subpath = self._lookup(key)
f = open(os.path.join(self._path, subpath), 'r')
try:
if self._factory:
msg = self._factory(f)
else:
msg = MaildirMessage(f)
finally:
f.close()
subdir, name = os.path.split(subpath)
msg.set_subdir(subdir)
if self.colon in name:
msg.set_info(name.split(self.colon)[-1])
msg.set_date(os.path.getmtime(os.path.join(self._path, subpath)))
return msg
def get_string(self, key):
"""Return a string representation or raise a KeyError."""
f = open(os.path.join(self._path, self._lookup(key)), 'r')
try:
return f.read()
finally:
f.close()
def get_file(self, key):
"""Return a file-like representation or raise a KeyError."""
f = open(os.path.join(self._path, self._lookup(key)), 'rb')
return _ProxyFile(f)
def iterkeys(self):
"""Return an iterator over keys."""
self._refresh()
for key in self._toc:
try:
self._lookup(key)
except KeyError:
continue
yield key
def has_key(self, key):
"""Return True if the keyed message exists, False otherwise."""
self._refresh()
return key in self._toc
def __len__(self):
"""Return a count of messages in the mailbox."""
self._refresh()
return len(self._toc)
def flush(self):
"""Write any pending changes to disk."""
# Maildir changes are always written immediately, so there's nothing
# to do.
pass
def lock(self):
"""Lock the mailbox."""
return
def unlock(self):
"""Unlock the mailbox if it is locked."""
return
def close(self):
"""Flush and close the mailbox."""
return
def list_folders(self):
"""Return a list of folder names."""
result = []
for entry in os.listdir(self._path):
if len(entry) > 1 and entry[0] == '.' and \
os.path.isdir(os.path.join(self._path, entry)):
result.append(entry[1:])
return result
def get_folder(self, folder):
"""Return a Maildir instance for the named folder."""
return Maildir(os.path.join(self._path, '.' + folder),
factory=self._factory,
create=False)
def add_folder(self, folder):
"""Create a folder and return a Maildir instance representing it."""
path = os.path.join(self._path, '.' + folder)
result = Maildir(path, factory=self._factory)
maildirfolder_path = os.path.join(path, 'maildirfolder')
if not os.path.exists(maildirfolder_path):
os.close(os.open(maildirfolder_path, os.O_CREAT | os.O_WRONLY,
0666))
return result
def remove_folder(self, folder):
"""Delete the named folder, which must be empty."""
path = os.path.join(self._path, '.' + folder)
for entry in os.listdir(os.path.join(path, 'new')) + \
os.listdir(os.path.join(path, 'cur')):
if len(entry) < 1 or entry[0] != '.':
raise NotEmptyError('Folder contains message(s): %s' % folder)
for entry in os.listdir(path):
if entry != 'new' and entry != 'cur' and entry != 'tmp' and \
os.path.isdir(os.path.join(path, entry)):
raise NotEmptyError("Folder contains subdirectory '%s': %s" %
(folder, entry))
for root, dirs, files in os.walk(path, topdown=False):
for entry in files:
os.remove(os.path.join(root, entry))
for entry in dirs:
os.rmdir(os.path.join(root, entry))
os.rmdir(path)
def clean(self):
"""Delete old files in "tmp"."""
now = time.time()
for entry in os.listdir(os.path.join(self._path, 'tmp')):
path = os.path.join(self._path, 'tmp', entry)
if now - os.path.getatime(path) > 129600: # 60 * 60 * 36
os.remove(path)
_count = 1 # This is used to generate unique file names.
def _create_tmp(self):
"""Create a file in the tmp subdirectory and open and return it."""
now = time.time()
hostname = socket.gethostname()
if '/' in hostname:
hostname = hostname.replace('/', r'\057')
if ':' in hostname:
hostname = hostname.replace(':', r'\072')
uniq = "%s.M%sP%sQ%s.%s" % (int(now), int(now % 1 * 1e6), os.getpid(),
Maildir._count, hostname)
path = os.path.join(self._path, 'tmp', uniq)
try:
os.stat(path)
except OSError, e:
if e.errno == errno.ENOENT:
Maildir._count += 1
try:
return _create_carefully(path)
except OSError, e:
if e.errno != errno.EEXIST:
raise
else:
raise
# Fall through to here if stat succeeded or open raised EEXIST.
raise ExternalClashError('Name clash prevented file creation: %s' %
path)
def _refresh(self):
"""Update table of contents mapping."""
# If it has been less than two seconds since the last _refresh() call,
# we have to unconditionally re-read the mailbox just in case it has
# been modified, because os.path.mtime() has a 2 sec resolution in the
# most common worst case (FAT) and a 1 sec resolution typically. This
# results in a few unnecessary re-reads when _refresh() is called
# multiple times in that interval, but once the clock ticks over, we
# will only re-read as needed. Because the filesystem might be being
# served by an independent system with its own clock, we record and
# compare with the mtimes from the filesystem. Because the other
# system's clock might be skewing relative to our clock, we add an
# extra delta to our wait. The default is one tenth second, but is an
# instance variable and so can be adjusted if dealing with a
# particularly skewed or irregular system.
if time.time() - self._last_read > 2 + self._skewfactor:
refresh = False
for subdir in self._toc_mtimes:
mtime = os.path.getmtime(self._paths[subdir])
if mtime > self._toc_mtimes[subdir]:
refresh = True
self._toc_mtimes[subdir] = mtime
if not refresh:
return
# Refresh toc
self._toc = {}
for subdir in self._toc_mtimes:
path = self._paths[subdir]
for entry in os.listdir(path):
p = os.path.join(path, entry)
if os.path.isdir(p):
continue
uniq = entry.split(self.colon)[0]
self._toc[uniq] = os.path.join(subdir, entry)
self._last_read = time.time()
def _lookup(self, key):
"""Use TOC to return subpath for given key, or raise a KeyError."""
try:
if os.path.exists(os.path.join(self._path, self._toc[key])):
return self._toc[key]
except KeyError:
pass
self._refresh()
try:
return self._toc[key]
except KeyError:
raise KeyError('No message with key: %s' % key)
# This method is for backward compatibility only.
def next(self):
"""Return the next message in a one-time iteration."""
if not hasattr(self, '_onetime_keys'):
self._onetime_keys = self.iterkeys()
while True:
try:
return self[self._onetime_keys.next()]
except StopIteration:
return None
except KeyError:
continue
class _singlefileMailbox(Mailbox):
"""A single-file mailbox."""
def __init__(self, path, factory=None, create=True):
"""Initialize a single-file mailbox."""
Mailbox.__init__(self, path, factory, create)
try:
f = open(self._path, 'rb+')
except IOError, e:
if e.errno == errno.ENOENT:
if create:
f = open(self._path, 'wb+')
else:
raise NoSuchMailboxError(self._path)
elif e.errno in (errno.EACCES, errno.EROFS):
f = open(self._path, 'rb')
else:
raise
self._file = f
self._toc = None
self._next_key = 0
self._pending = False # No changes require rewriting the file.
self._pending_sync = False # No need to sync the file
self._locked = False
self._file_length = None # Used to record mailbox size
def add(self, message):
"""Add message and return assigned key."""
self._lookup()
self._toc[self._next_key] = self._append_message(message)
self._next_key += 1
# _append_message appends the message to the mailbox file. We
# don't need a full rewrite + rename, sync is enough.
self._pending_sync = True
return self._next_key - 1
def remove(self, key):
"""Remove the keyed message; raise KeyError if it doesn't exist."""
self._lookup(key)
del self._toc[key]
self._pending = True
def __setitem__(self, key, message):
"""Replace the keyed message; raise KeyError if it doesn't exist."""
self._lookup(key)
self._toc[key] = self._append_message(message)
self._pending = True
def iterkeys(self):
"""Return an iterator over keys."""
self._lookup()
for key in self._toc.keys():
yield key
def has_key(self, key):
"""Return True if the keyed message exists, False otherwise."""
self._lookup()
return key in self._toc
def __len__(self):
"""Return a count of messages in the mailbox."""
self._lookup()
return len(self._toc)
def lock(self):
"""Lock the mailbox."""
if not self._locked:
_lock_file(self._file)
self._locked = True
def unlock(self):
"""Unlock the mailbox if it is locked."""
if self._locked:
_unlock_file(self._file)
self._locked = False
def flush(self):
"""Write any pending changes to disk."""
if not self._pending:
if self._pending_sync:
# Messages have only been added, so syncing the file
# is enough.
_sync_flush(self._file)
self._pending_sync = False
return
# In order to be writing anything out at all, self._toc must
# already have been generated (and presumably has been modified
# by adding or deleting an item).
assert self._toc is not None
# Check length of self._file; if it's changed, some other process
# has modified the mailbox since we scanned it.
self._file.seek(0, 2)
cur_len = self._file.tell()
if cur_len != self._file_length:
raise ExternalClashError('Size of mailbox file changed '
'(expected %i, found %i)' %
(self._file_length, cur_len))
new_file = _create_temporary(self._path)
try:
new_toc = {}
self._pre_mailbox_hook(new_file)
for key in sorted(self._toc.keys()):
start, stop = self._toc[key]
self._file.seek(start)
self._pre_message_hook(new_file)
new_start = new_file.tell()
while True:
buffer = self._file.read(min(4096,
stop - self._file.tell()))
if buffer == '':
break
new_file.write(buffer)
new_toc[key] = (new_start, new_file.tell())
self._post_message_hook(new_file)
self._file_length = new_file.tell()
except:
new_file.close()
os.remove(new_file.name)
raise
_sync_close(new_file)
# self._file is about to get replaced, so no need to sync.
self._file.close()
# Make sure the new file's mode is the same as the old file's
mode = os.stat(self._path).st_mode
os.chmod(new_file.name, mode)
try:
os.rename(new_file.name, self._path)
except OSError, e:
if e.errno == errno.EEXIST or \
(os.name == 'os2' and e.errno == errno.EACCES):
os.remove(self._path)
os.rename(new_file.name, self._path)
else:
raise
self._file = open(self._path, 'rb+')
self._toc = new_toc
self._pending = False
self._pending_sync = False
if self._locked:
_lock_file(self._file, dotlock=False)
def _pre_mailbox_hook(self, f):
"""Called before writing the mailbox to file f."""
return
def _pre_message_hook(self, f):
"""Called before writing each message to file f."""
return
def _post_message_hook(self, f):
"""Called after writing each message to file f."""
return
def close(self):
"""Flush and close the mailbox."""
self.flush()
if self._locked:
self.unlock()
self._file.close() # Sync has been done by self.flush() above.
def _lookup(self, key=None):
"""Return (start, stop) or raise KeyError."""
if self._toc is None:
self._generate_toc()
if key is not None:
try:
return self._toc[key]
except KeyError:
raise KeyError('No message with key: %s' % key)
def _append_message(self, message):
"""Append message to mailbox and return (start, stop) offsets."""
self._file.seek(0, 2)
before = self._file.tell()
if len(self._toc) == 0 and not self._pending:
# This is the first message, and the _pre_mailbox_hook
# hasn't yet been called. If self._pending is True,
# messages have been removed, so _pre_mailbox_hook must
# have been called already.
self._pre_mailbox_hook(self._file)
try:
self._pre_message_hook(self._file)
offsets = self._install_message(message)
self._post_message_hook(self._file)
except BaseException:
self._file.truncate(before)
raise
self._file.flush()
self._file_length = self._file.tell() # Record current length of mailbox
return offsets
class _mboxMMDF(_singlefileMailbox):
"""An mbox or MMDF mailbox."""
_mangle_from_ = True
def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
start, stop = self._lookup(key)
self._file.seek(start)
from_line = self._file.readline().replace(os.linesep, '')
string = self._file.read(stop - self._file.tell())
msg = self._message_factory(string.replace(os.linesep, '\n'))
msg.set_from(from_line[5:])
return msg
def get_string(self, key, from_=False):
"""Return a string representation or raise a KeyError."""
start, stop = self._lookup(key)
self._file.seek(start)
if not from_:
self._file.readline()
string = self._file.read(stop - self._file.tell())
return string.replace(os.linesep, '\n')
def get_file(self, key, from_=False):
"""Return a file-like representation or raise a KeyError."""
start, stop = self._lookup(key)
self._file.seek(start)
if not from_:
self._file.readline()
return _PartialFile(self._file, self._file.tell(), stop)
def _install_message(self, message):
"""Format a message and blindly write to self._file."""
from_line = None
if isinstance(message, str) and message.startswith('From '):
newline = message.find('\n')
if newline != -1:
from_line = message[:newline]
message = message[newline + 1:]
else:
from_line = message
message = ''
elif isinstance(message, _mboxMMDFMessage):
from_line = 'From ' + message.get_from()
elif isinstance(message, email.message.Message):
from_line = message.get_unixfrom() # May be None.
if from_line is None:
from_line = 'From MAILER-DAEMON %s' % time.asctime(time.gmtime())
start = self._file.tell()
self._file.write(from_line + os.linesep)
self._dump_message(message, self._file, self._mangle_from_)
stop = self._file.tell()
return (start, stop)
class mbox(_mboxMMDF):
"""A classic mbox mailbox."""
_mangle_from_ = True
# All messages must end in a newline character, and
# _post_message_hooks outputs an empty line between messages.
_append_newline = True
def __init__(self, path, factory=None, create=True):
"""Initialize an mbox mailbox."""
self._message_factory = mboxMessage
_mboxMMDF.__init__(self, path, factory, create)
def _post_message_hook(self, f):
"""Called after writing each message to file f."""
f.write(os.linesep)
def _generate_toc(self):
"""Generate key-to-(start, stop) table of contents."""
starts, stops = [], []
last_was_empty = False
self._file.seek(0)
while True:
line_pos = self._file.tell()
line = self._file.readline()
if line.startswith('From '):
if len(stops) < len(starts):
if last_was_empty:
stops.append(line_pos - len(os.linesep))
else:
# The last line before the "From " line wasn't
# blank, but we consider it a start of a
# message anyway.
stops.append(line_pos)
starts.append(line_pos)
last_was_empty = False
elif not line:
if last_was_empty:
stops.append(line_pos - len(os.linesep))
else:
stops.append(line_pos)
break
elif line == os.linesep:
last_was_empty = True
else:
last_was_empty = False
self._toc = dict(enumerate(zip(starts, stops)))
self._next_key = len(self._toc)
self._file_length = self._file.tell()
class MMDF(_mboxMMDF):
"""An MMDF mailbox."""
def __init__(self, path, factory=None, create=True):
"""Initialize an MMDF mailbox."""
self._message_factory = MMDFMessage
_mboxMMDF.__init__(self, path, factory, create)
def _pre_message_hook(self, f):
"""Called before writing each message to file f."""
f.write('\001\001\001\001' + os.linesep)
def _post_message_hook(self, f):
"""Called after writing each message to file f."""
f.write(os.linesep + '\001\001\001\001' + os.linesep)
def _generate_toc(self):
"""Generate key-to-(start, stop) table of contents."""
starts, stops = [], []
self._file.seek(0)
next_pos = 0
while True:
line_pos = next_pos
line = self._file.readline()
next_pos = self._file.tell()
if line.startswith('\001\001\001\001' + os.linesep):
starts.append(next_pos)
while True:
line_pos = next_pos
line = self._file.readline()
next_pos = self._file.tell()
if line == '\001\001\001\001' + os.linesep:
stops.append(line_pos - len(os.linesep))
break
elif line == '':
stops.append(line_pos)
break
elif line == '':
break
self._toc = dict(enumerate(zip(starts, stops)))
self._next_key = len(self._toc)
self._file.seek(0, 2)
self._file_length = self._file.tell()
class MH(Mailbox):
"""An MH mailbox."""
def __init__(self, path, factory=None, create=True):
"""Initialize an MH instance."""
Mailbox.__init__(self, path, factory, create)
if not os.path.exists(self._path):
if create:
os.mkdir(self._path, 0700)
os.close(os.open(os.path.join(self._path, '.mh_sequences'),
os.O_CREAT | os.O_EXCL | os.O_WRONLY, 0600))
else:
raise NoSuchMailboxError(self._path)
self._locked = False
def add(self, message):
"""Add message and return assigned key."""
keys = self.keys()
if len(keys) == 0:
new_key = 1
else:
new_key = max(keys) + 1
new_path = os.path.join(self._path, str(new_key))
f = _create_carefully(new_path)
closed = False
try:
if self._locked:
_lock_file(f)
try:
try:
self._dump_message(message, f)
except BaseException:
# Unlock and close so it can be deleted on Windows
if self._locked:
_unlock_file(f)
_sync_close(f)
closed = True
os.remove(new_path)
raise
if isinstance(message, MHMessage):
self._dump_sequences(message, new_key)
finally:
if self._locked:
_unlock_file(f)
finally:
if not closed:
_sync_close(f)
return new_key
def remove(self, key):
"""Remove the keyed message; raise KeyError if it doesn't exist."""
path = os.path.join(self._path, str(key))
try:
f = open(path, 'rb+')
except IOError, e:
if e.errno == errno.ENOENT:
raise KeyError('No message with key: %s' % key)
else:
raise
else:
f.close()
os.remove(path)
def __setitem__(self, key, message):
"""Replace the keyed message; raise KeyError if it doesn't exist."""
path = os.path.join(self._path, str(key))
try:
f = open(path, 'rb+')
except IOError, e:
if e.errno == errno.ENOENT:
raise KeyError('No message with key: %s' % key)
else:
raise
try:
if self._locked:
_lock_file(f)
try:
os.close(os.open(path, os.O_WRONLY | os.O_TRUNC))
self._dump_message(message, f)
if isinstance(message, MHMessage):
self._dump_sequences(message, key)
finally:
if self._locked:
_unlock_file(f)
finally:
_sync_close(f)
def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
try:
if self._locked:
f = open(os.path.join(self._path, str(key)), 'r+')
else:
f = open(os.path.join(self._path, str(key)), 'r')
except IOError, e:
if e.errno == errno.ENOENT:
raise KeyError('No message with key: %s' % key)
else:
raise
try:
if self._locked:
_lock_file(f)
try:
msg = MHMessage(f)
finally:
if self._locked:
_unlock_file(f)
finally:
f.close()
for name, key_list in self.get_sequences().iteritems():
if key in key_list:
msg.add_sequence(name)
return msg
def get_string(self, key):
"""Return a string representation or raise a KeyError."""
try:
if self._locked:
f = open(os.path.join(self._path, str(key)), 'r+')
else:
f = open(os.path.join(self._path, str(key)), 'r')
except IOError, e:
if e.errno == errno.ENOENT:
raise KeyError('No message with key: %s' % key)
else:
raise
try:
if self._locked:
_lock_file(f)
try:
return f.read()
finally:
if self._locked:
_unlock_file(f)
finally:
f.close()
def get_file(self, key):
"""Return a file-like representation or raise a KeyError."""
try:
f = open(os.path.join(self._path, str(key)), 'rb')
except IOError, e:
if e.errno == errno.ENOENT:
raise KeyError('No message with key: %s' % key)
else:
raise
return _ProxyFile(f)
def iterkeys(self):
"""Return an iterator over keys."""
return iter(sorted(int(entry) for entry in os.listdir(self._path)
if entry.isdigit()))
def has_key(self, key):
"""Return True if the keyed message exists, False otherwise."""
return os.path.exists(os.path.join(self._path, str(key)))
def __len__(self):
"""Return a count of messages in the mailbox."""
return len(list(self.iterkeys()))
def lock(self):
"""Lock the mailbox."""
if not self._locked:
self._file = open(os.path.join(self._path, '.mh_sequences'), 'rb+')
_lock_file(self._file)
self._locked = True
def unlock(self):
"""Unlock the mailbox if it is locked."""
if self._locked:
_unlock_file(self._file)
_sync_close(self._file)
del self._file
self._locked = False
def flush(self):
"""Write any pending changes to the disk."""
return
def close(self):
"""Flush and close the mailbox."""
if self._locked:
self.unlock()
def list_folders(self):
"""Return a list of folder names."""
result = []
for entry in os.listdir(self._path):
if os.path.isdir(os.path.join(self._path, entry)):
result.append(entry)
return result
def get_folder(self, folder):
"""Return an MH instance for the named folder."""
return MH(os.path.join(self._path, folder),
factory=self._factory, create=False)
def add_folder(self, folder):
"""Create a folder and return an MH instance representing it."""
return MH(os.path.join(self._path, folder),
factory=self._factory)
def remove_folder(self, folder):
"""Delete the named folder, which must be empty."""
path = os.path.join(self._path, folder)
entries = os.listdir(path)
if entries == ['.mh_sequences']:
os.remove(os.path.join(path, '.mh_sequences'))
elif entries == []:
pass
else:
raise NotEmptyError('Folder not empty: %s' % self._path)
os.rmdir(path)
def get_sequences(self):
"""Return a name-to-key-list dictionary to define each sequence."""
results = {}
f = open(os.path.join(self._path, '.mh_sequences'), 'r')
try:
all_keys = set(self.keys())
for line in f:
try:
name, contents = line.split(':')
keys = set()
for spec in contents.split():
if spec.isdigit():
keys.add(int(spec))
else:
start, stop = (int(x) for x in spec.split('-'))
keys.update(range(start, stop + 1))
results[name] = [key for key in sorted(keys) \
if key in all_keys]
if len(results[name]) == 0:
del results[name]
except ValueError:
raise FormatError('Invalid sequence specification: %s' %
line.rstrip())
finally:
f.close()
return results
def set_sequences(self, sequences):
"""Set sequences using the given name-to-key-list dictionary."""
f = open(os.path.join(self._path, '.mh_sequences'), 'r+')
try:
os.close(os.open(f.name, os.O_WRONLY | os.O_TRUNC))
for name, keys in sequences.iteritems():
if len(keys) == 0:
continue
f.write('%s:' % name)
prev = None
completing = False
for key in sorted(set(keys)):
if key - 1 == prev:
if not completing:
completing = True
f.write('-')
elif completing:
completing = False
f.write('%s %s' % (prev, key))
else:
f.write(' %s' % key)
prev = key
if completing:
f.write(str(prev) + '\n')
else:
f.write('\n')
finally:
_sync_close(f)
def pack(self):
"""Re-name messages to eliminate numbering gaps. Invalidates keys."""
sequences = self.get_sequences()
prev = 0
changes = []
for key in self.iterkeys():
if key - 1 != prev:
changes.append((key, prev + 1))
if hasattr(os, 'link'):
os.link(os.path.join(self._path, str(key)),
os.path.join(self._path, str(prev + 1)))
os.unlink(os.path.join(self._path, str(key)))
else:
os.rename(os.path.join(self._path, str(key)),
os.path.join(self._path, str(prev + 1)))
prev += 1
self._next_key = prev + 1
if len(changes) == 0:
return
for name, key_list in sequences.items():
for old, new in changes:
if old in key_list:
key_list[key_list.index(old)] = new
self.set_sequences(sequences)
def _dump_sequences(self, message, key):
"""Inspect a new MHMessage and update sequences appropriately."""
pending_sequences = message.get_sequences()
all_sequences = self.get_sequences()
for name, key_list in all_sequences.iteritems():
if name in pending_sequences:
key_list.append(key)
elif key in key_list:
del key_list[key_list.index(key)]
for sequence in pending_sequences:
if sequence not in all_sequences:
all_sequences[sequence] = [key]
self.set_sequences(all_sequences)
class Babyl(_singlefileMailbox):
"""An Rmail-style Babyl mailbox."""
_special_labels = frozenset(('unseen', 'deleted', 'filed', 'answered',
'forwarded', 'edited', 'resent'))
def __init__(self, path, factory=None, create=True):
"""Initialize a Babyl mailbox."""
_singlefileMailbox.__init__(self, path, factory, create)
self._labels = {}
def add(self, message):
"""Add message and return assigned key."""
key = _singlefileMailbox.add(self, message)
if isinstance(message, BabylMessage):
self._labels[key] = message.get_labels()
return key
def remove(self, key):
"""Remove the keyed message; raise KeyError if it doesn't exist."""
_singlefileMailbox.remove(self, key)
if key in self._labels:
del self._labels[key]
def __setitem__(self, key, message):
"""Replace the keyed message; raise KeyError if it doesn't exist."""
_singlefileMailbox.__setitem__(self, key, message)
if isinstance(message, BabylMessage):
self._labels[key] = message.get_labels()
def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
start, stop = self._lookup(key)
self._file.seek(start)
self._file.readline() # Skip '1,' line specifying labels.
original_headers = StringIO.StringIO()
while True:
line = self._file.readline()
if line == '*** EOOH ***' + os.linesep or line == '':
break
original_headers.write(line.replace(os.linesep, '\n'))
visible_headers = StringIO.StringIO()
while True:
line = self._file.readline()
if line == os.linesep or line == '':
break
visible_headers.write(line.replace(os.linesep, '\n'))
body = self._file.read(stop - self._file.tell()).replace(os.linesep,
'\n')
msg = BabylMessage(original_headers.getvalue() + body)
msg.set_visible(visible_headers.getvalue())
if key in self._labels:
msg.set_labels(self._labels[key])
return msg
def get_string(self, key):
"""Return a string representation or raise a KeyError."""
start, stop = self._lookup(key)
self._file.seek(start)
self._file.readline() # Skip '1,' line specifying labels.
original_headers = StringIO.StringIO()
while True:
line = self._file.readline()
if line == '*** EOOH ***' + os.linesep or line == '':
break
original_headers.write(line.replace(os.linesep, '\n'))
while True:
line = self._file.readline()
if line == os.linesep or line == '':
break
return original_headers.getvalue() + \
self._file.read(stop - self._file.tell()).replace(os.linesep,
'\n')
def get_file(self, key):
"""Return a file-like representation or raise a KeyError."""
return StringIO.StringIO(self.get_string(key).replace('\n',
os.linesep))
def get_labels(self):
"""Return a list of user-defined labels in the mailbox."""
self._lookup()
labels = set()
for label_list in self._labels.values():
labels.update(label_list)
labels.difference_update(self._special_labels)
return list(labels)
def _generate_toc(self):
"""Generate key-to-(start, stop) table of contents."""
starts, stops = [], []
self._file.seek(0)
next_pos = 0
label_lists = []
while True:
line_pos = next_pos
line = self._file.readline()
next_pos = self._file.tell()
if line == '\037\014' + os.linesep:
if len(stops) < len(starts):
stops.append(line_pos - len(os.linesep))
starts.append(next_pos)
labels = [label.strip() for label
in self._file.readline()[1:].split(',')
if label.strip() != '']
label_lists.append(labels)
elif line == '\037' or line == '\037' + os.linesep:
if len(stops) < len(starts):
stops.append(line_pos - len(os.linesep))
elif line == '':
stops.append(line_pos - len(os.linesep))
break
self._toc = dict(enumerate(zip(starts, stops)))
self._labels = dict(enumerate(label_lists))
self._next_key = len(self._toc)
self._file.seek(0, 2)
self._file_length = self._file.tell()
def _pre_mailbox_hook(self, f):
"""Called before writing the mailbox to file f."""
f.write('BABYL OPTIONS:%sVersion: 5%sLabels:%s%s\037' %
(os.linesep, os.linesep, ','.join(self.get_labels()),
os.linesep))
def _pre_message_hook(self, f):
"""Called before writing each message to file f."""
f.write('\014' + os.linesep)
def _post_message_hook(self, f):
"""Called after writing each message to file f."""
f.write(os.linesep + '\037')
def _install_message(self, message):
"""Write message contents and return (start, stop)."""
start = self._file.tell()
if isinstance(message, BabylMessage):
special_labels = []
labels = []
for label in message.get_labels():
if label in self._special_labels:
special_labels.append(label)
else:
labels.append(label)
self._file.write('1')
for label in special_labels:
self._file.write(', ' + label)
self._file.write(',,')
for label in labels:
self._file.write(' ' + label + ',')
self._file.write(os.linesep)
else:
self._file.write('1,,' + os.linesep)
if isinstance(message, email.message.Message):
orig_buffer = StringIO.StringIO()
orig_generator = email.generator.Generator(orig_buffer, False, 0)
orig_generator.flatten(message)
orig_buffer.seek(0)
while True:
line = orig_buffer.readline()
self._file.write(line.replace('\n', os.linesep))
if line == '\n' or line == '':
break
self._file.write('*** EOOH ***' + os.linesep)
if isinstance(message, BabylMessage):
vis_buffer = StringIO.StringIO()
vis_generator = email.generator.Generator(vis_buffer, False, 0)
vis_generator.flatten(message.get_visible())
while True:
line = vis_buffer.readline()
self._file.write(line.replace('\n', os.linesep))
if line == '\n' or line == '':
break
else:
orig_buffer.seek(0)
while True:
line = orig_buffer.readline()
self._file.write(line.replace('\n', os.linesep))
if line == '\n' or line == '':
break
while True:
buffer = orig_buffer.read(4096) # Buffer size is arbitrary.
if buffer == '':
break
self._file.write(buffer.replace('\n', os.linesep))
elif isinstance(message, str):
body_start = message.find('\n\n') + 2
if body_start - 2 != -1:
self._file.write(message[:body_start].replace('\n',
os.linesep))
self._file.write('*** EOOH ***' + os.linesep)
self._file.write(message[:body_start].replace('\n',
os.linesep))
self._file.write(message[body_start:].replace('\n',
os.linesep))
else:
self._file.write('*** EOOH ***' + os.linesep + os.linesep)
self._file.write(message.replace('\n', os.linesep))
elif hasattr(message, 'readline'):
original_pos = message.tell()
first_pass = True
while True:
line = message.readline()
self._file.write(line.replace('\n', os.linesep))
if line == '\n' or line == '':
if first_pass:
first_pass = False
self._file.write('*** EOOH ***' + os.linesep)
message.seek(original_pos)
else:
break
while True:
buffer = message.read(4096) # Buffer size is arbitrary.
if buffer == '':
break
self._file.write(buffer.replace('\n', os.linesep))
else:
raise TypeError('Invalid message type: %s' % type(message))
stop = self._file.tell()
return (start, stop)
class Message(email.message.Message):
"""Message with mailbox-format-specific properties."""
def __init__(self, message=None):
"""Initialize a Message instance."""
if isinstance(message, email.message.Message):
self._become_message(copy.deepcopy(message))
if isinstance(message, Message):
message._explain_to(self)
elif isinstance(message, str):
self._become_message(email.message_from_string(message))
elif hasattr(message, "read"):
self._become_message(email.message_from_file(message))
elif message is None:
email.message.Message.__init__(self)
else:
raise TypeError('Invalid message type: %s' % type(message))
def _become_message(self, message):
"""Assume the non-format-specific state of message."""
for name in ('_headers', '_unixfrom', '_payload', '_charset',
'preamble', 'epilogue', 'defects', '_default_type'):
self.__dict__[name] = message.__dict__[name]
def _explain_to(self, message):
"""Copy format-specific state to message insofar as possible."""
if isinstance(message, Message):
return # There's nothing format-specific to explain.
else:
raise TypeError('Cannot convert to specified type')
class MaildirMessage(Message):
"""Message with Maildir-specific properties."""
def __init__(self, message=None):
"""Initialize a MaildirMessage instance."""
self._subdir = 'new'
self._info = ''
self._date = time.time()
Message.__init__(self, message)
def get_subdir(self):
"""Return 'new' or 'cur'."""
return self._subdir
def set_subdir(self, subdir):
"""Set subdir to 'new' or 'cur'."""
if subdir == 'new' or subdir == 'cur':
self._subdir = subdir
else:
raise ValueError("subdir must be 'new' or 'cur': %s" % subdir)
def get_flags(self):
"""Return as a string the flags that are set."""
if self._info.startswith('2,'):
return self._info[2:]
else:
return ''
def set_flags(self, flags):
"""Set the given flags and unset all others."""
self._info = '2,' + ''.join(sorted(flags))
def add_flag(self, flag):
"""Set the given flag(s) without changing others."""
self.set_flags(''.join(set(self.get_flags()) | set(flag)))
def remove_flag(self, flag):
"""Unset the given string flag(s) without changing others."""
if self.get_flags() != '':
self.set_flags(''.join(set(self.get_flags()) - set(flag)))
def get_date(self):
"""Return delivery date of message, in seconds since the epoch."""
return self._date
def set_date(self, date):
"""Set delivery date of message, in seconds since the epoch."""
try:
self._date = float(date)
except ValueError:
raise TypeError("can't convert to float: %s" % date)
def get_info(self):
"""Get the message's "info" as a string."""
return self._info
def set_info(self, info):
"""Set the message's "info" string."""
if isinstance(info, str):
self._info = info
else:
raise TypeError('info must be a string: %s' % type(info))
def _explain_to(self, message):
"""Copy Maildir-specific state to message insofar as possible."""
if isinstance(message, MaildirMessage):
message.set_flags(self.get_flags())
message.set_subdir(self.get_subdir())
message.set_date(self.get_date())
elif isinstance(message, _mboxMMDFMessage):
flags = set(self.get_flags())
if 'S' in flags:
message.add_flag('R')
if self.get_subdir() == 'cur':
message.add_flag('O')
if 'T' in flags:
message.add_flag('D')
if 'F' in flags:
message.add_flag('F')
if 'R' in flags:
message.add_flag('A')
message.set_from('MAILER-DAEMON', time.gmtime(self.get_date()))
elif isinstance(message, MHMessage):
flags = set(self.get_flags())
if 'S' not in flags:
message.add_sequence('unseen')
if 'R' in flags:
message.add_sequence('replied')
if 'F' in flags:
message.add_sequence('flagged')
elif isinstance(message, BabylMessage):
flags = set(self.get_flags())
if 'S' not in flags:
message.add_label('unseen')
if 'T' in flags:
message.add_label('deleted')
if 'R' in flags:
message.add_label('answered')
if 'P' in flags:
message.add_label('forwarded')
elif isinstance(message, Message):
pass
else:
raise TypeError('Cannot convert to specified type: %s' %
type(message))
class _mboxMMDFMessage(Message):
"""Message with mbox- or MMDF-specific properties."""
def __init__(self, message=None):
"""Initialize an mboxMMDFMessage instance."""
self.set_from('MAILER-DAEMON', True)
if isinstance(message, email.message.Message):
unixfrom = message.get_unixfrom()
if unixfrom is not None and unixfrom.startswith('From '):
self.set_from(unixfrom[5:])
Message.__init__(self, message)
def get_from(self):
"""Return contents of "From " line."""
return self._from
def set_from(self, from_, time_=None):
"""Set "From " line, formatting and appending time_ if specified."""
if time_ is not None:
if time_ is True:
time_ = time.gmtime()
from_ += ' ' + time.asctime(time_)
self._from = from_
def get_flags(self):
"""Return as a string the flags that are set."""
return self.get('Status', '') + self.get('X-Status', '')
def set_flags(self, flags):
"""Set the given flags and unset all others."""
flags = set(flags)
status_flags, xstatus_flags = '', ''
for flag in ('R', 'O'):
if flag in flags:
status_flags += flag
flags.remove(flag)
for flag in ('D', 'F', 'A'):
if flag in flags:
xstatus_flags += flag
flags.remove(flag)
xstatus_flags += ''.join(sorted(flags))
try:
self.replace_header('Status', status_flags)
except KeyError:
self.add_header('Status', status_flags)
try:
self.replace_header('X-Status', xstatus_flags)
except KeyError:
self.add_header('X-Status', xstatus_flags)
def add_flag(self, flag):
"""Set the given flag(s) without changing others."""
self.set_flags(''.join(set(self.get_flags()) | set(flag)))
def remove_flag(self, flag):
"""Unset the given string flag(s) without changing others."""
if 'Status' in self or 'X-Status' in self:
self.set_flags(''.join(set(self.get_flags()) - set(flag)))
def _explain_to(self, message):
"""Copy mbox- or MMDF-specific state to message insofar as possible."""
if isinstance(message, MaildirMessage):
flags = set(self.get_flags())
if 'O' in flags:
message.set_subdir('cur')
if 'F' in flags:
message.add_flag('F')
if 'A' in flags:
message.add_flag('R')
if 'R' in flags:
message.add_flag('S')
if 'D' in flags:
message.add_flag('T')
del message['status']
del message['x-status']
maybe_date = ' '.join(self.get_from().split()[-5:])
try:
message.set_date(calendar.timegm(time.strptime(maybe_date,
'%a %b %d %H:%M:%S %Y')))
except (ValueError, OverflowError):
pass
elif isinstance(message, _mboxMMDFMessage):
message.set_flags(self.get_flags())
message.set_from(self.get_from())
elif isinstance(message, MHMessage):
flags = set(self.get_flags())
if 'R' not in flags:
message.add_sequence('unseen')
if 'A' in flags:
message.add_sequence('replied')
if 'F' in flags:
message.add_sequence('flagged')
del message['status']
del message['x-status']
elif isinstance(message, BabylMessage):
flags = set(self.get_flags())
if 'R' not in flags:
message.add_label('unseen')
if 'D' in flags:
message.add_label('deleted')
if 'A' in flags:
message.add_label('answered')
del message['status']
del message['x-status']
elif isinstance(message, Message):
pass
else:
raise TypeError('Cannot convert to specified type: %s' %
type(message))
class mboxMessage(_mboxMMDFMessage):
"""Message with mbox-specific properties."""
class MHMessage(Message):
"""Message with MH-specific properties."""
def __init__(self, message=None):
"""Initialize an MHMessage instance."""
self._sequences = []
Message.__init__(self, message)
def get_sequences(self):
"""Return a list of sequences that include the message."""
return self._sequences[:]
def set_sequences(self, sequences):
"""Set the list of sequences that include the message."""
self._sequences = list(sequences)
def add_sequence(self, sequence):
"""Add sequence to list of sequences including the message."""
if isinstance(sequence, str):
if not sequence in self._sequences:
self._sequences.append(sequence)
else:
raise TypeError('sequence must be a string: %s' % type(sequence))
def remove_sequence(self, sequence):
"""Remove sequence from the list of sequences including the message."""
try:
self._sequences.remove(sequence)
except ValueError:
pass
def _explain_to(self, message):
"""Copy MH-specific state to message insofar as possible."""
if isinstance(message, MaildirMessage):
sequences = set(self.get_sequences())
if 'unseen' in sequences:
message.set_subdir('cur')
else:
message.set_subdir('cur')
message.add_flag('S')
if 'flagged' in sequences:
message.add_flag('F')
if 'replied' in sequences:
message.add_flag('R')
elif isinstance(message, _mboxMMDFMessage):
sequences = set(self.get_sequences())
if 'unseen' not in sequences:
message.add_flag('RO')
else:
message.add_flag('O')
if 'flagged' in sequences:
message.add_flag('F')
if 'replied' in sequences:
message.add_flag('A')
elif isinstance(message, MHMessage):
for sequence in self.get_sequences():
message.add_sequence(sequence)
elif isinstance(message, BabylMessage):
sequences = set(self.get_sequences())
if 'unseen' in sequences:
message.add_label('unseen')
if 'replied' in sequences:
message.add_label('answered')
elif isinstance(message, Message):
pass
else:
raise TypeError('Cannot convert to specified type: %s' %
type(message))
class BabylMessage(Message):
"""Message with Babyl-specific properties."""
def __init__(self, message=None):
"""Initialize an BabylMessage instance."""
self._labels = []
self._visible = Message()
Message.__init__(self, message)
def get_labels(self):
"""Return a list of labels on the message."""
return self._labels[:]
def set_labels(self, labels):
"""Set the list of labels on the message."""
self._labels = list(labels)
def add_label(self, label):
"""Add label to list of labels on the message."""
if isinstance(label, str):
if label not in self._labels:
self._labels.append(label)
else:
raise TypeError('label must be a string: %s' % type(label))
def remove_label(self, label):
"""Remove label from the list of labels on the message."""
try:
self._labels.remove(label)
except ValueError:
pass
def get_visible(self):
"""Return a Message representation of visible headers."""
return Message(self._visible)
def set_visible(self, visible):
"""Set the Message representation of visible headers."""
self._visible = Message(visible)
def update_visible(self):
"""Update and/or sensibly generate a set of visible headers."""
for header in self._visible.keys():
if header in self:
self._visible.replace_header(header, self[header])
else:
del self._visible[header]
for header in ('Date', 'From', 'Reply-To', 'To', 'CC', 'Subject'):
if header in self and header not in self._visible:
self._visible[header] = self[header]
def _explain_to(self, message):
"""Copy Babyl-specific state to message insofar as possible."""
if isinstance(message, MaildirMessage):
labels = set(self.get_labels())
if 'unseen' in labels:
message.set_subdir('cur')
else:
message.set_subdir('cur')
message.add_flag('S')
if 'forwarded' in labels or 'resent' in labels:
message.add_flag('P')
if 'answered' in labels:
message.add_flag('R')
if 'deleted' in labels:
message.add_flag('T')
elif isinstance(message, _mboxMMDFMessage):
labels = set(self.get_labels())
if 'unseen' not in labels:
message.add_flag('RO')
else:
message.add_flag('O')
if 'deleted' in labels:
message.add_flag('D')
if 'answered' in labels:
message.add_flag('A')
elif isinstance(message, MHMessage):
labels = set(self.get_labels())
if 'unseen' in labels:
message.add_sequence('unseen')
if 'answered' in labels:
message.add_sequence('replied')
elif isinstance(message, BabylMessage):
message.set_visible(self.get_visible())
for label in self.get_labels():
message.add_label(label)
elif isinstance(message, Message):
pass
else:
raise TypeError('Cannot convert to specified type: %s' %
type(message))
class MMDFMessage(_mboxMMDFMessage):
"""Message with MMDF-specific properties."""
class _ProxyFile:
"""A read-only wrapper of a file."""
def __init__(self, f, pos=None):
"""Initialize a _ProxyFile."""
self._file = f
if pos is None:
self._pos = f.tell()
else:
self._pos = pos
def read(self, size=None):
"""Read bytes."""
return self._read(size, self._file.read)
def readline(self, size=None):
"""Read a line."""
return self._read(size, self._file.readline)
def readlines(self, sizehint=None):
"""Read multiple lines."""
result = []
for line in self:
result.append(line)
if sizehint is not None:
sizehint -= len(line)
if sizehint <= 0:
break
return result
def __iter__(self):
"""Iterate over lines."""
return iter(self.readline, "")
def tell(self):
"""Return the position."""
return self._pos
def seek(self, offset, whence=0):
"""Change position."""
if whence == 1:
self._file.seek(self._pos)
self._file.seek(offset, whence)
self._pos = self._file.tell()
def close(self):
"""Close the file."""
if hasattr(self, '_file'):
if hasattr(self._file, 'close'):
self._file.close()
del self._file
def _read(self, size, read_method):
"""Read size bytes using read_method."""
if size is None:
size = -1
self._file.seek(self._pos)
result = read_method(size)
self._pos = self._file.tell()
return result
class _PartialFile(_ProxyFile):
"""A read-only wrapper of part of a file."""
def __init__(self, f, start=None, stop=None):
"""Initialize a _PartialFile."""
_ProxyFile.__init__(self, f, start)
self._start = start
self._stop = stop
def tell(self):
"""Return the position with respect to start."""
return _ProxyFile.tell(self) - self._start
def seek(self, offset, whence=0):
"""Change position, possibly with respect to start or stop."""
if whence == 0:
self._pos = self._start
whence = 1
elif whence == 2:
self._pos = self._stop
whence = 1
_ProxyFile.seek(self, offset, whence)
def _read(self, size, read_method):
"""Read size bytes using read_method, honoring start and stop."""
remaining = self._stop - self._pos
if remaining <= 0:
return ''
if size is None or size < 0 or size > remaining:
size = remaining
return _ProxyFile._read(self, size, read_method)
def close(self):
# do *not* close the underlying file object for partial files,
# since it's global to the mailbox object
if hasattr(self, '_file'):
del self._file
def _lock_file(f, dotlock=True):
"""Lock file f using lockf and dot locking."""
dotlock_done = False
try:
if fcntl:
try:
fcntl.lockf(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError, e:
if e.errno in (errno.EAGAIN, errno.EACCES, errno.EROFS):
raise ExternalClashError('lockf: lock unavailable: %s' %
f.name)
else:
raise
if dotlock:
try:
pre_lock = _create_temporary(f.name + '.lock')
pre_lock.close()
except IOError, e:
if e.errno in (errno.EACCES, errno.EROFS):
return # Without write access, just skip dotlocking.
else:
raise
try:
if hasattr(os, 'link'):
os.link(pre_lock.name, f.name + '.lock')
dotlock_done = True
os.unlink(pre_lock.name)
else:
os.rename(pre_lock.name, f.name + '.lock')
dotlock_done = True
except OSError, e:
if e.errno == errno.EEXIST or \
(os.name == 'os2' and e.errno == errno.EACCES):
os.remove(pre_lock.name)
raise ExternalClashError('dot lock unavailable: %s' %
f.name)
else:
raise
except:
if fcntl:
fcntl.lockf(f, fcntl.LOCK_UN)
if dotlock_done:
os.remove(f.name + '.lock')
raise
def _unlock_file(f):
"""Unlock file f using lockf and dot locking."""
if fcntl:
fcntl.lockf(f, fcntl.LOCK_UN)
if os.path.exists(f.name + '.lock'):
os.remove(f.name + '.lock')
def _create_carefully(path):
"""Create a file if it doesn't exist and open for reading and writing."""
fd = os.open(path, os.O_CREAT | os.O_EXCL | os.O_RDWR, 0666)
try:
return open(path, 'rb+')
finally:
os.close(fd)
def _create_temporary(path):
"""Create a temp file based on path and open for reading and writing."""
return _create_carefully('%s.%s.%s.%s' % (path, int(time.time()),
socket.gethostname(),
os.getpid()))
def _sync_flush(f):
"""Ensure changes to file f are physically on disk."""
f.flush()
if hasattr(os, 'fsync'):
os.fsync(f.fileno())
def _sync_close(f):
"""Close file f, ensuring all changes are physically on disk."""
_sync_flush(f)
f.close()
## Start: classes from the original module (for backward compatibility).
# Note that the Maildir class, whose name is unchanged, itself offers a next()
# method for backward compatibility.
class _Mailbox:
def __init__(self, fp, factory=rfc822.Message):
self.fp = fp
self.seekp = 0
self.factory = factory
def __iter__(self):
return iter(self.next, None)
def next(self):
while 1:
self.fp.seek(self.seekp)
try:
self._search_start()
except EOFError:
self.seekp = self.fp.tell()
return None
start = self.fp.tell()
self._search_end()
self.seekp = stop = self.fp.tell()
if start != stop:
break
return self.factory(_PartialFile(self.fp, start, stop))
# Recommended to use PortableUnixMailbox instead!
class UnixMailbox(_Mailbox):
def _search_start(self):
while 1:
pos = self.fp.tell()
line = self.fp.readline()
if not line:
raise EOFError
if line[:5] == 'From ' and self._isrealfromline(line):
self.fp.seek(pos)
return
def _search_end(self):
self.fp.readline() # Throw away header line
while 1:
pos = self.fp.tell()
line = self.fp.readline()
if not line:
return
if line[:5] == 'From ' and self._isrealfromline(line):
self.fp.seek(pos)
return
# An overridable mechanism to test for From-line-ness. You can either
# specify a different regular expression or define a whole new
# _isrealfromline() method. Note that this only gets called for lines
# starting with the 5 characters "From ".
#
# BAW: According to
#http://home.netscape.com/eng/mozilla/2.0/relnotes/demo/content-length.html
# the only portable, reliable way to find message delimiters in a BSD (i.e
# Unix mailbox) style folder is to search for "\n\nFrom .*\n", or at the
# beginning of the file, "^From .*\n". While _fromlinepattern below seems
# like a good idea, in practice, there are too many variations for more
# strict parsing of the line to be completely accurate.
#
# _strict_isrealfromline() is the old version which tries to do stricter
# parsing of the From_ line. _portable_isrealfromline() simply returns
# true, since it's never called if the line doesn't already start with
# "From ".
#
# This algorithm, and the way it interacts with _search_start() and
# _search_end() may not be completely correct, because it doesn't check
# that the two characters preceding "From " are \n\n or the beginning of
# the file. Fixing this would require a more extensive rewrite than is
# necessary. For convenience, we've added a PortableUnixMailbox class
# which does no checking of the format of the 'From' line.
_fromlinepattern = (r"From \s*[^\s]+\s+\w\w\w\s+\w\w\w\s+\d?\d\s+"
r"\d?\d:\d\d(:\d\d)?(\s+[^\s]+)?\s+\d\d\d\d\s*"
r"[^\s]*\s*"
"$")
_regexp = None
def _strict_isrealfromline(self, line):
if not self._regexp:
import re
self._regexp = re.compile(self._fromlinepattern)
return self._regexp.match(line)
def _portable_isrealfromline(self, line):
return True
_isrealfromline = _strict_isrealfromline
class PortableUnixMailbox(UnixMailbox):
_isrealfromline = UnixMailbox._portable_isrealfromline
class MmdfMailbox(_Mailbox):
def _search_start(self):
while 1:
line = self.fp.readline()
if not line:
raise EOFError
if line[:5] == '\001\001\001\001\n':
return
def _search_end(self):
while 1:
pos = self.fp.tell()
line = self.fp.readline()
if not line:
return
if line == '\001\001\001\001\n':
self.fp.seek(pos)
return
class MHMailbox:
def __init__(self, dirname, factory=rfc822.Message):
import re
pat = re.compile('^[1-9][0-9]*$')
self.dirname = dirname
# the three following lines could be combined into:
# list = map(long, filter(pat.match, os.listdir(self.dirname)))
list = os.listdir(self.dirname)
list = filter(pat.match, list)
list = map(long, list)
list.sort()
# This only works in Python 1.6 or later;
# before that str() added 'L':
self.boxes = map(str, list)
self.boxes.reverse()
self.factory = factory
def __iter__(self):
return iter(self.next, None)
def next(self):
if not self.boxes:
return None
fn = self.boxes.pop()
fp = open(os.path.join(self.dirname, fn))
msg = self.factory(fp)
try:
msg._mh_msgno = fn
except (AttributeError, TypeError):
pass
return msg
class BabylMailbox(_Mailbox):
def _search_start(self):
while 1:
line = self.fp.readline()
if not line:
raise EOFError
if line == '*** EOOH ***\n':
return
def _search_end(self):
while 1:
pos = self.fp.tell()
line = self.fp.readline()
if not line:
return
if line == '\037\014\n' or line == '\037':
self.fp.seek(pos)
return
## End: classes from the original module (for backward compatibility).
class Error(Exception):
"""Raised for module-specific errors."""
class NoSuchMailboxError(Error):
"""The specified mailbox does not exist and won't be created."""
class NotEmptyError(Error):
"""The specified mailbox is not empty and deletion was requested."""
class ExternalClashError(Error):
"""Another process caused an action to fail."""
class FormatError(Error):
"""A file appears to have an invalid format."""
| gpl-2.0 |
DirkWilhelmi/mikro_tms | rpi_functions.py | 1 | 1501 | import ConfigParser
from r515.basic_functions import BasicFunctions
from r515.basic_settings import BasicSettings
from r515.connection import Connection
config = ConfigParser.ConfigParser()
config.read(['r515.cfg'])
conn = Connection(config.get('Connection', 'IP'), config.get('Connection', 'USR'), config.get('Connection', 'PWD'))
prj = BasicFunctions(conn)
settings = BasicSettings(conn)
def lamp_on():
prj.power_on()
def lamp_off():
prj.power_standby()
def lamp_eighty():
settings.set_lamp_power(80.0)
def lamp_hundred():
settings.set_lamp_power(100.0)
def stop():
prj.stop()
prj.close_douser()
def pause():
prj.pause()
def play():
prj.play()
def dci_flat():
settings.load_format(1)
def dci_cs():
settings.load_format(2)
def hdmi_flat():
settings.load_format(3)
def hdmi_cs():
settings.load_format(4)
def dowser_close():
prj.close_douser()
def dowser_open():
prj.open_douser()
def start_zoom_positive():
settings.start_zoom()
def stop_zoom_positive():
settings.stop_zoom()
def start_zoom_down():
settings.start_zoom('down')
def stop_zoom_down():
settings.stop_zoom('down')
def start_focus_positive():
settings.start_focus()
def stop_focus_positive():
settings.stop_focus()
def start_focus_down():
settings.start_focus('down')
def stop_focus_down():
settings.stop_focus('down')
def is_hdmi():
return settings.get_projector_settings()['media_block'] == 'UNKNOWN' | gpl-2.0 |
skoslowski/gnuradio | gr-blocks/python/blocks/qa_logger.py | 4 | 3225 | #!/usr/bin/env python
#
# Copyright 2016 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr, gr_unittest, blocks
class test_logger (gr_unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def set_and_assert_log_level(self, block, level):
block.set_log_level(level)
self.assertEqual(block.log_level(), level)
def test_log_level_for_block(self):
# Test the python API for getting and setting log levels of individual block
# go through all of the documented log_levels
ns = blocks.null_source(1)
self.set_and_assert_log_level(ns, "notset")
self.set_and_assert_log_level(ns, "debug")
self.set_and_assert_log_level(ns, "info")
self.set_and_assert_log_level(ns, "notice")
self.set_and_assert_log_level(ns, "warn")
self.set_and_assert_log_level(ns, "error")
self.set_and_assert_log_level(ns, "crit")
self.set_and_assert_log_level(ns, "alert")
self.set_and_assert_log_level(ns, "emerg")
# There's a couple of special cases. "off" == "notset" (specific to gr)
# and "fatal" == "emerg" (built in to log4cpp)
ns.set_log_level("off")
self.assertEqual(ns.log_level(), "notset")
ns.set_log_level("fatal")
self.assertEqual(ns.log_level(), "emerg")
# Make sure exception is throw on bogus data
self.assertRaises(RuntimeError, ns.set_log_level, "11")
def test_log_level_for_tb(self):
# Test the python API for getting and setting log levels for a top_block
nsrc = blocks.null_source(4)
nsnk = blocks.null_sink(4)
# Set all log levels to a known state
nsrc.set_log_level("debug")
nsnk.set_log_level("debug")
tb = gr.top_block()
tb.connect(nsrc, nsnk)
# confirm that the tb has log_level of first block
self.assertEqual(tb.log_level(), "debug")
# confirm that changing tb log_level propagates to connected blocks
tb.set_log_level("alert")
self.assertEqual(tb.log_level(), "alert")
self.assertEqual(nsrc.log_level(), "alert")
self.assertEqual(nsnk.log_level(), "alert")
def test_log_level_for_hier_block(self):
# Test the python API for getting and setting log levels for hier blocks
nsrc = blocks.null_source(4)
nsnk = blocks.null_sink(4)
b = blocks.stream_to_vector_decimator(4, 1, 1, 1) # just a random hier block that exists
tb = gr.top_block()
tb.connect(nsrc, b, nsnk)
tb.set_log_level("debug")
self.assertEqual(tb.log_level(), "debug")
self.assertEqual(nsrc.log_level(), "debug")
self.assertEqual(nsnk.log_level(), "debug")
self.assertEqual(b.one_in_n.log_level(), "debug")
tb.set_log_level("alert")
self.assertEqual(tb.log_level(), "alert")
self.assertEqual(nsrc.log_level(), "alert")
self.assertEqual(nsnk.log_level(), "alert")
self.assertEqual(b.one_in_n.log_level(), "alert")
if __name__ == '__main__':
gr_unittest.run(test_logger, "test_logger.xml")
| gpl-3.0 |
mfherbst/spack | var/spack/repos/builtin.mock/packages/othervirtual/package.py | 5 | 1509 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Othervirtual(Package):
homepage = "http://somewhere.com"
url = "http://somewhere.com/stuff-1.0.tar.gz"
version('1.0', '67890abcdef1234567890abcdef12345')
provides('stuff')
def install(self, spec, prefix):
pass
| lgpl-2.1 |
dr4g0nsr/phoenix-kodi-addon | plugin.video.phstreams/resources/lib/sources/muchmoviesv2_mv_tv.py | 7 | 7118 | # -*- coding: utf-8 -*-
'''
Genesis Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse
from resources.lib.libraries import cleantitle
from resources.lib.libraries import cloudflare
from resources.lib.libraries import client
class source:
def __init__(self):
self.base_link = 'http://123movies.to'
self.search_link = '/movie/search/%s'
def get_movie(self, imdb, title, year):
try:
query = self.search_link % urllib.quote(title)
query = urlparse.urljoin(self.base_link, query)
result = cloudflare.source(query)
title = cleantitle.movie(title)
years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)]
result = client.parseDOM(result, 'div', attrs = {'class': 'ml-item'})
result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'h2'), re.compile('class *= *[\'|\"]jt-info[\'|\"]>(\d{4})<').findall(i)) for i in result]
result = [(i[0][0], i[1][0], i[2][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0]
result = [i for i in result if any(x in i[2] for x in years)]
result = [(i[0], re.sub('\d{4}$', '', i[1]).strip()) for i in result]
result = [i[0] for i in result if title == cleantitle.movie(i[1])][0]
try: url = re.compile('//.+?(/.+)').findall(result)[0]
except: url = result
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def get_show(self, imdb, tvdb, tvshowtitle, year):
try:
url = '%s (%s)' % (tvshowtitle, year)
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def get_episode(self, url, imdb, tvdb, title, date, season, episode):
try:
tvshowtitle, year = re.compile('(.+?) [(](\d{4})[)]$').findall(url)[0]
query = self.search_link % urllib.quote(tvshowtitle)
query = urlparse.urljoin(self.base_link, query)
result = cloudflare.source(query)
tvshowtitle = cleantitle.tv(tvshowtitle)
season = '%01d' % int(season)
episode = '%01d' % int(episode)
years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)]
result = client.parseDOM(result, 'div', attrs = {'class': 'ml-item'})
result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'h2'), re.compile('class *= *[\'|\"]jt-info[\'|\"]>(\d{4})<').findall(i)) for i in result]
result = [(i[0][0], i[1][0], i[2][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0]
result = [(i[0], re.compile('(.+?) - Season (\d*)$').findall(i[1]), i[2]) for i in result]
result = [(i[0], i[1][0][0], i[1][0][1], i[2]) for i in result if len(i[1]) > 0]
result = [i for i in result if tvshowtitle == cleantitle.tv(i[1])]
result = [i for i in result if season == i[2]]
result = [(i[0], i[1], str(int(i[3]) - int(i[2]) + 1)) for i in result]
result = [i[0] for i in result if any(x in i[2] for x in years)][0]
result += '?S%02dE%02d' % (int(season), int(episode))
try: url = re.compile('//.+?(/.+)').findall(result)[0]
except: url = result
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def get_sources(self, url, hosthdDict, hostDict, locDict):
try:
sources = []
if url == None: return sources
content = re.compile('(.+?)\?S\d*E\d*$').findall(url)
try: url, season, episode = re.compile('(.+?)\?S(\d*)E(\d*)$').findall(url)[0]
except: pass
url = urlparse.urljoin(self.base_link, url)
url = urlparse.urljoin(url, 'watching.html')
referer = url
result = cloudflare.source(url)
try: quality = client.parseDOM(result, 'span', attrs = {'class': 'quality'})[0]
except: quality = 'HD'
if '1080p' in quality: quality = '1080p'
else: quality = 'HD'
url = re.compile('var\s+url_playlist *= *"(.+?)"').findall(result)[0]
result = client.parseDOM(result, 'div', attrs = {'class': 'les-content'})
result = zip(client.parseDOM(result, 'a', ret='onclick'), client.parseDOM(result, 'a'))
result = [(i[0], re.compile('(\d+)').findall(i[1])) for i in result]
result = [(i[0], '%01d' % int(i[1][0])) for i in result if len(i[1]) > 0]
result = [(i[0], i[1]) for i in result]
result = [(re.compile('(\d+)').findall(i[0]), i[1]) for i in result]
result = [('%s/%s/%s' % (url, i[0][0], i[0][1]), i[1]) for i in result]
if len(content) == 0:
url = [i[0] for i in result]
else:
episode = '%01d' % int(episode)
url = [i[0] for i in result if episode == i[1]]
url = ['%s|User-Agent=%s&Referer=%s' % (i, urllib.quote_plus(client.agent()), urllib.quote_plus(referer)) for i in url]
for u in url: sources.append({'source': 'Muchmovies', 'quality': quality, 'provider': 'Muchmoviesv2', 'url': u})
return sources
except:
return sources
def resolve(self, url):
try:
url, headers = url.split('|')
idx = int(re.compile('/(\d+)').findall(url)[-1])
result = cloudflare.request(url)
url = client.parseDOM(result, 'item')
url = [i for i in url if not 'youtube.com' in i and not '>Intro<' in i][idx]
url = re.compile("file *= *[\'|\"](.+?)[\'|\"]").findall(url)
url = [i for i in url if not i.endswith('.srt')][0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
if 'google' in url:
url = client.request(url, output='geturl')
if 'requiressl=yes' in url: url = url.replace('http://', 'https://')
else: url = url.replace('https://', 'http://')
else:
url = '%s|%s' % (url, headers)
return url
except:
return
| gpl-2.0 |
raybuhr/grab | test/spider_stat.py | 12 | 1676 | import six
from grab.spider import Spider, Task
from tempfile import mkstemp
import os
from six import StringIO
import mock
import sys
from test.util import TMP_DIR
from test.util import BaseGrabTestCase, build_spider
class BasicSpiderTestCase(BaseGrabTestCase):
def setUp(self):
self.server.reset()
def test_stop_timer_invalid_input(self):
class TestSpider(Spider):
pass
bot = build_spider(TestSpider)
self.assertRaises(KeyError, bot.timer.stop, 'zzz')
def test_counters_and_collections(self):
class TestSpider(Spider):
def prepare(self):
self.stat.logging_period = 0
self.stat.inc('foo')
def task_page_valid(self, grab, task):
self.stat.inc('foo')
def task_page_fail(self, grab, task):
1/0
bot = build_spider(TestSpider)
bot.setup_queue()
bot.add_task(Task('page_valid', url=self.server.get_url()))
bot.add_task(Task('page_fail', url=self.server.get_url()))
bot.run()
self.assertEqual(2, bot.stat.counters['foo'])
self.assertEqual(1, len(bot.stat.collections['fatal']))
def test_render_stats(self):
class TestSpider(Spider):
def prepare(self):
self.stat.logging_period = 0
self.stat.inc('foo')
def task_page(self, grab, task):
pass
bot = build_spider(TestSpider)
bot.setup_queue()
bot.add_task(Task('page', url=self.server.get_url()))
bot.run()
stats = bot.render_stats()
stats = bot.render_stats(timing=True)
| mit |
bretlowery/snakr | lib/django/contrib/flatpages/views.py | 475 | 2777 | from django.conf import settings
from django.contrib.flatpages.models import FlatPage
from django.contrib.sites.shortcuts import get_current_site
from django.http import Http404, HttpResponse, HttpResponsePermanentRedirect
from django.shortcuts import get_object_or_404
from django.template import loader
from django.utils.safestring import mark_safe
from django.views.decorators.csrf import csrf_protect
DEFAULT_TEMPLATE = 'flatpages/default.html'
# This view is called from FlatpageFallbackMiddleware.process_response
# when a 404 is raised, which often means CsrfViewMiddleware.process_view
# has not been called even if CsrfViewMiddleware is installed. So we need
# to use @csrf_protect, in case the template needs {% csrf_token %}.
# However, we can't just wrap this view; if no matching flatpage exists,
# or a redirect is required for authentication, the 404 needs to be returned
# without any CSRF checks. Therefore, we only
# CSRF protect the internal implementation.
def flatpage(request, url):
"""
Public interface to the flat page view.
Models: `flatpages.flatpages`
Templates: Uses the template defined by the ``template_name`` field,
or :template:`flatpages/default.html` if template_name is not defined.
Context:
flatpage
`flatpages.flatpages` object
"""
if not url.startswith('/'):
url = '/' + url
site_id = get_current_site(request).id
try:
f = get_object_or_404(FlatPage,
url=url, sites=site_id)
except Http404:
if not url.endswith('/') and settings.APPEND_SLASH:
url += '/'
f = get_object_or_404(FlatPage,
url=url, sites=site_id)
return HttpResponsePermanentRedirect('%s/' % request.path)
else:
raise
return render_flatpage(request, f)
@csrf_protect
def render_flatpage(request, f):
"""
Internal interface to the flat page view.
"""
# If registration is required for accessing this page, and the user isn't
# logged in, redirect to the login page.
if f.registration_required and not request.user.is_authenticated():
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(request.path)
if f.template_name:
template = loader.select_template((f.template_name, DEFAULT_TEMPLATE))
else:
template = loader.get_template(DEFAULT_TEMPLATE)
# To avoid having to always use the "|safe" filter in flatpage templates,
# mark the title and content as already safe (since they are raw HTML
# content in the first place).
f.title = mark_safe(f.title)
f.content = mark_safe(f.content)
response = HttpResponse(template.render({'flatpage': f}, request))
return response
| bsd-3-clause |
newswangerd/ansible | test/units/module_utils/test_distro.py | 35 | 1512 |
# (c) 2018 Adrian Likins <[email protected]>
# Copyright (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# or
# Apache License v2.0 (see http://www.apache.org/licenses/LICENSE-2.0)
#
# Dual licensed so any test cases could potentially be included by the upstream project
# that module_utils/distro.py is from (https://github.com/nir0s/distro)
# Note that nir0s/distro has many more tests in it's test suite. The tests here are
# primarily for testing the vendoring.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils import distro
from ansible.module_utils.six import string_types
# Generic test case with minimal assertions about specific returned values.
class TestDistro():
# should run on any platform without errors, even if non-linux without any
# useful info to return
def test_info(self):
info = distro.info()
assert isinstance(info, dict), \
'distro.info() returned %s (%s) which is not a dist' % (info, type(info))
def test_linux_distribution(self):
linux_dist = distro.linux_distribution()
assert isinstance(linux_dist, tuple), \
'linux_distrution() returned %s (%s) which is not a tuple' % (linux_dist, type(linux_dist))
def test_id(self):
id = distro.id()
assert isinstance(id, string_types), 'distro.id() returned %s (%s) which is not a string' % (id, type(id))
| gpl-3.0 |
nozuono/calibre-webserver | src/calibre/ebooks/rtf2xml/group_styles.py | 24 | 8744 | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
import sys, os, re
from calibre.ebooks.rtf2xml import copy
from calibre.ptempfile import better_mktemp
class GroupStyles:
"""
Form lists.
Use RTF's own formatting to determine if a paragraph definition is part of a
list.
Use indents to determine items and how lists are nested.
"""
def __init__(self,
in_file,
bug_handler,
copy = None,
run_level = 1,
wrap = 0,
):
"""
Required:
'file'
Optional:
'copy'-- whether to make a copy of result for debugging
'temp_dir' --where to output temporary results (default is
directory from which the script is run.)
Returns:
nothing
"""
self.__file = in_file
self.__bug_handler = bug_handler
self.__copy = copy
self.__run_level = run_level
self.__write_to = better_mktemp()
self.__wrap = wrap
def __initiate_values(self):
"""
Required:
Nothing
Return:
Nothing
Logic:
The self.__end_list is a list of tokens that will force a list to end.
Likewise, the self.__end_lines is a list of lines that forces a list to end.
"""
self.__state = "default"
self.__left_indent = 0
self.__list_type = 'not-defined'
self.__pard_def = ""
self.__all_lists = []
self.__list_chunk = ''
self.__state_dict={
'default' : self.__default_func,
'in_pard' : self.__in_pard_func,
'after_pard' : self.__after_pard_func,
}
# section end
self.__end_list = [
# section end
'mi<mk<sect-close',
'mi<mk<sect-start',
# table begin
'mi<mk<tabl-start',
# field block begin
'mi<mk<fldbk-end_',
'mi<mk<fldbkstart',
# cell end
'mi<mk<close_cell',
# item end
'mi<tg<item_end__',
# footnote end
'mi<mk<foot___clo',
'mi<mk<footnt-ope',
# heading end
'mi<mk<header-beg',
'mi<mk<header-end',
'mi<mk<head___clo',
# lists
'mi<tg<item_end__',
'mi<tg<item_end__',
'mi<mk<list_start'
# body close
# don't use
# 'mi<mk<body-close',
# 'mi<mk<par-in-fld',
# 'cw<tb<cell______',
# 'cw<tb<row-def___',
# 'cw<tb<row_______',
# 'mi<mk<sec-fd-beg',
]
self.__name_regex = re.compile(r'<name>')
self.__found_appt = 0
self.__line_num = 0
def __in_pard_func(self, line):
"""
Required:
line -- the line of current text.
Return:
Nothing
Logic:
You are in a list, but in the middle of a paragraph definition.
Don't do anything until you find the end of the paragraph definition.
"""
if self.__token_info == 'mi<tg<close_____' \
and line[17:-1] == 'paragraph-definition':
self.__state = 'after_pard'
else:
self.__write_obj.write(line)
def __after_pard_func(self, line):
"""
Required:
line -- the line of current text.
Return:
Nothing
Logic:
"""
if self.__token_info == 'mi<tg<open-att__' \
and line[17:37] == 'paragraph-definition':
# found paragraph definition
self.__pard_after_par_def_func(line)
elif self.__token_info == 'mi<tg<close_____' \
and line[17:-1] == 'paragraph-definition':
sys.stderr.write('Wrong flag in __after_pard_func\n')
if self.__run_level > 2:
msg = 'wrong flag'
raise self.__bug_handler, msg
elif self.__token_info in self.__end_list:
self.__write_obj.write('mi<tg<close_____<paragraph-definition\n')
self.__write_end_wrap()
self.__write_obj.write(self.__list_chunk)
self.__list_chunk = ''
self.__state = 'default'
self.__write_obj.write(line)
else:
self.__list_chunk += line
def __close_pard_(self, line):
self.__write_obj.write(self.__list_chunk)
self.__write_obj.write('mi<tg<close_____<paragraph-definition\n')
self.__write_end_wrap()
self.__list_chunk = ''
self.__state = 'default'
def __write_start_wrap(self, name):
if self.__wrap:
self.__write_obj.write('mi<mk<style-grp_<%s\n' % name)
self.__write_obj.write('mi<tg<open-att__<style-group<name>%s\n' % name)
self.__write_obj.write('mi<mk<style_grp_<%s\n' % name)
def __write_end_wrap(self):
if self.__wrap:
self.__write_obj.write('mi<mk<style_gend\n' )
self.__write_obj.write('mi<tg<close_____<style-group\n')
self.__write_obj.write('mi<mk<stylegend_\n' )
def __pard_after_par_def_func(self, line):
"""
Required:
line -- the line of current text.
id -- the id of the current list
Return:
Nothing
Logic:
"""
if self.__last_style_name == self.__style_name:
# just keep going
if self.__wrap:
self.__write_obj.write('mi<tg<close_____<paragraph-definition\n')
self.__write_obj.write(self.__list_chunk)
self.__list_chunk = ''
self.__state = 'in_pard'
if self.__wrap:
self.__write_obj.write(line)
else:
# different name for the paragraph definition
self.__write_obj.write('mi<tg<close_____<paragraph-definition\n')
self.__write_end_wrap()
self.__write_obj.write(self.__list_chunk)
self.__write_start_wrap(self.__style_name)
self.__write_obj.write(line)
self.__state = 'in_pard'
self.__last_style_name = self.__style_name
self.__list_chunk = ''
def __default_func(self, line):
"""
Required:
self, line
Returns:
Nothing
Logic
Look for the start of a paragraph defintion. If one is found, check if
it contains a list-id. If it does, start a list. Change the state to
in_pard.
"""
if self.__token_info == 'mi<tg<open-att__' \
and line[17:37] == 'paragraph-definition':
self.__state = 'in_pard'
self.__last_style_name = self.__style_name
self.__write_start_wrap(self.__last_style_name)
self.__write_obj.write(line)
else:
self.__write_obj.write(line)
def __get_style_name(self, line):
if self.__token_info == 'mi<mk<style-name':
self.__style_name = line[17:-1]
def group_styles(self):
"""
Required:
nothing
Returns:
original file will be changed
Logic:
"""
self.__initiate_values()
read_obj = open(self.__file, 'r')
self.__write_obj = open(self.__write_to, 'w')
line_to_read = 1
while line_to_read:
line_to_read = read_obj.readline()
line = line_to_read
self.__token_info = line[:16]
self.__get_style_name(line)
action = self.__state_dict.get(self.__state)
action(line)
read_obj.close()
self.__write_obj.close()
copy_obj = copy.Copy(bug_handler = self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "group_styles.data")
copy_obj.rename(self.__write_to, self.__file)
os.remove(self.__write_to)
| gpl-3.0 |
gaddman/ansible | lib/ansible/modules/cloud/cloudstack/cs_securitygroup.py | 78 | 5313 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_securitygroup
short_description: Manages security groups on Apache CloudStack based clouds.
description:
- Create and remove security groups.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
name:
description:
- Name of the security group.
required: true
description:
description:
- Description of the security group.
state:
description:
- State of the security group.
default: present
choices: [ present, absent ]
domain:
description:
- Domain the security group is related to.
account:
description:
- Account the security group is related to.
project:
description:
- Name of the project the security group to be created in.
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
- name: create a security group
local_action:
module: cs_securitygroup
name: default
description: default security group
- name: remove a security group
local_action:
module: cs_securitygroup
name: default
state: absent
'''
RETURN = '''
---
id:
description: UUID of the security group.
returned: success
type: string
sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
name:
description: Name of security group.
returned: success
type: string
sample: app
description:
description: Description of security group.
returned: success
type: string
sample: application security group
tags:
description: List of resource tags associated with the security group.
returned: success
type: dict
sample: '[ { "key": "foo", "value": "bar" } ]'
project:
description: Name of project the security group is related to.
returned: success
type: string
sample: Production
domain:
description: Domain the security group is related to.
returned: success
type: string
sample: example domain
account:
description: Account the security group is related to.
returned: success
type: string
sample: example account
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import AnsibleCloudStack, cs_argument_spec, cs_required_together
class AnsibleCloudStackSecurityGroup(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackSecurityGroup, self).__init__(module)
self.security_group = None
def get_security_group(self):
if not self.security_group:
args = {
'projectid': self.get_project(key='id'),
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'securitygroupname': self.module.params.get('name'),
}
sgs = self.query_api('listSecurityGroups', **args)
if sgs:
self.security_group = sgs['securitygroup'][0]
return self.security_group
def create_security_group(self):
security_group = self.get_security_group()
if not security_group:
self.result['changed'] = True
args = {
'name': self.module.params.get('name'),
'projectid': self.get_project(key='id'),
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'description': self.module.params.get('description'),
}
if not self.module.check_mode:
res = self.query_api('createSecurityGroup', **args)
security_group = res['securitygroup']
return security_group
def remove_security_group(self):
security_group = self.get_security_group()
if security_group:
self.result['changed'] = True
args = {
'name': self.module.params.get('name'),
'projectid': self.get_project(key='id'),
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
}
if not self.module.check_mode:
self.query_api('deleteSecurityGroup', **args)
return security_group
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
description=dict(),
state=dict(choices=['present', 'absent'], default='present'),
project=dict(),
account=dict(),
domain=dict(),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
acs_sg = AnsibleCloudStackSecurityGroup(module)
state = module.params.get('state')
if state in ['absent']:
sg = acs_sg.remove_security_group()
else:
sg = acs_sg.create_security_group()
result = acs_sg.get_result(sg)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
ThiagoGarciaAlves/intellij-community | python/helpers/pydev/third_party/pep8/lib2to3/lib2to3/patcomp.py | 304 | 7091 | # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Pattern compiler.
The grammer is taken from PatternGrammar.txt.
The compiler compiles a pattern to a pytree.*Pattern instance.
"""
__author__ = "Guido van Rossum <[email protected]>"
# Python imports
import os
import StringIO
# Fairly local imports
from .pgen2 import driver, literals, token, tokenize, parse, grammar
# Really local imports
from . import pytree
from . import pygram
# The pattern grammar file
_PATTERN_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__),
"PatternGrammar.txt")
class PatternSyntaxError(Exception):
pass
def tokenize_wrapper(input):
"""Tokenizes a string suppressing significant whitespace."""
skip = set((token.NEWLINE, token.INDENT, token.DEDENT))
tokens = tokenize.generate_tokens(StringIO.StringIO(input).readline)
for quintuple in tokens:
type, value, start, end, line_text = quintuple
if type not in skip:
yield quintuple
class PatternCompiler(object):
def __init__(self, grammar_file=_PATTERN_GRAMMAR_FILE):
"""Initializer.
Takes an optional alternative filename for the pattern grammar.
"""
self.grammar = driver.load_grammar(grammar_file)
self.syms = pygram.Symbols(self.grammar)
self.pygrammar = pygram.python_grammar
self.pysyms = pygram.python_symbols
self.driver = driver.Driver(self.grammar, convert=pattern_convert)
def compile_pattern(self, input, debug=False, with_tree=False):
"""Compiles a pattern string to a nested pytree.*Pattern object."""
tokens = tokenize_wrapper(input)
try:
root = self.driver.parse_tokens(tokens, debug=debug)
except parse.ParseError as e:
raise PatternSyntaxError(str(e))
if with_tree:
return self.compile_node(root), root
else:
return self.compile_node(root)
def compile_node(self, node):
"""Compiles a node, recursively.
This is one big switch on the node type.
"""
# XXX Optimize certain Wildcard-containing-Wildcard patterns
# that can be merged
if node.type == self.syms.Matcher:
node = node.children[0] # Avoid unneeded recursion
if node.type == self.syms.Alternatives:
# Skip the odd children since they are just '|' tokens
alts = [self.compile_node(ch) for ch in node.children[::2]]
if len(alts) == 1:
return alts[0]
p = pytree.WildcardPattern([[a] for a in alts], min=1, max=1)
return p.optimize()
if node.type == self.syms.Alternative:
units = [self.compile_node(ch) for ch in node.children]
if len(units) == 1:
return units[0]
p = pytree.WildcardPattern([units], min=1, max=1)
return p.optimize()
if node.type == self.syms.NegatedUnit:
pattern = self.compile_basic(node.children[1:])
p = pytree.NegatedPattern(pattern)
return p.optimize()
assert node.type == self.syms.Unit
name = None
nodes = node.children
if len(nodes) >= 3 and nodes[1].type == token.EQUAL:
name = nodes[0].value
nodes = nodes[2:]
repeat = None
if len(nodes) >= 2 and nodes[-1].type == self.syms.Repeater:
repeat = nodes[-1]
nodes = nodes[:-1]
# Now we've reduced it to: STRING | NAME [Details] | (...) | [...]
pattern = self.compile_basic(nodes, repeat)
if repeat is not None:
assert repeat.type == self.syms.Repeater
children = repeat.children
child = children[0]
if child.type == token.STAR:
min = 0
max = pytree.HUGE
elif child.type == token.PLUS:
min = 1
max = pytree.HUGE
elif child.type == token.LBRACE:
assert children[-1].type == token.RBRACE
assert len(children) in (3, 5)
min = max = self.get_int(children[1])
if len(children) == 5:
max = self.get_int(children[3])
else:
assert False
if min != 1 or max != 1:
pattern = pattern.optimize()
pattern = pytree.WildcardPattern([[pattern]], min=min, max=max)
if name is not None:
pattern.name = name
return pattern.optimize()
def compile_basic(self, nodes, repeat=None):
# Compile STRING | NAME [Details] | (...) | [...]
assert len(nodes) >= 1
node = nodes[0]
if node.type == token.STRING:
value = unicode(literals.evalString(node.value))
return pytree.LeafPattern(_type_of_literal(value), value)
elif node.type == token.NAME:
value = node.value
if value.isupper():
if value not in TOKEN_MAP:
raise PatternSyntaxError("Invalid token: %r" % value)
if nodes[1:]:
raise PatternSyntaxError("Can't have details for token")
return pytree.LeafPattern(TOKEN_MAP[value])
else:
if value == "any":
type = None
elif not value.startswith("_"):
type = getattr(self.pysyms, value, None)
if type is None:
raise PatternSyntaxError("Invalid symbol: %r" % value)
if nodes[1:]: # Details present
content = [self.compile_node(nodes[1].children[1])]
else:
content = None
return pytree.NodePattern(type, content)
elif node.value == "(":
return self.compile_node(nodes[1])
elif node.value == "[":
assert repeat is None
subpattern = self.compile_node(nodes[1])
return pytree.WildcardPattern([[subpattern]], min=0, max=1)
assert False, node
def get_int(self, node):
assert node.type == token.NUMBER
return int(node.value)
# Map named tokens to the type value for a LeafPattern
TOKEN_MAP = {"NAME": token.NAME,
"STRING": token.STRING,
"NUMBER": token.NUMBER,
"TOKEN": None}
def _type_of_literal(value):
if value[0].isalpha():
return token.NAME
elif value in grammar.opmap:
return grammar.opmap[value]
else:
return None
def pattern_convert(grammar, raw_node_info):
"""Converts raw node information to a Node or Leaf instance."""
type, value, context, children = raw_node_info
if children or type in grammar.number2symbol:
return pytree.Node(type, children, context=context)
else:
return pytree.Leaf(type, value, context=context)
def compile_pattern(pattern):
return PatternCompiler().compile_pattern(pattern)
| apache-2.0 |
cz-maria/Pawel_the_Mage | wrogowie.py | 1 | 3434 | import pygame
import stale
class Enemy(pygame.sprite.Sprite):
walking_frames_l = []
walking_frames_r = []
def __init__(self):
super().__init__()
image = pygame.image.load("knight.png").convert()
image.set_colorkey(stale.WHITE)
self.walking_frames_l.append(image)
image = pygame.transform.flip(image, True, False)
self.walking_frames_r.append(image)
image = pygame.image.load("knight2.png").convert()
image.set_colorkey(stale.WHITE)
self.walking_frames_l.append(image)
image = pygame.transform.flip(image, True, False)
self.walking_frames_r.append(image)
self.image = knight_image = self.walking_frames_l[0]
self.rect = self.image.get_rect()
# Szybkosc rycerza
self.change_x = -1
self.change_y = 0
# List of sprites we can bump against
self.level = None
def update(self):
""" Ruch rycerza """
# Gravity
self.calc_grav()
# Move left/right
self.rect.x += self.change_x
pos = self.rect.x
if self.change_x < 0:
frame = (pos // 20) % len(self.walking_frames_l)
self.image = self.walking_frames_l[frame]
else:
frame = (pos // 20) % len(self.walking_frames_r)
self.image = self.walking_frames_r[frame]
# See if we hit anything
block_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)
for block in block_hit_list:
# If we are moving right,
# set our right side to the left side of the item we hit
if self.change_x > 0:
self.rect.right = block.rect.left
self.change_x = - self.change_x
elif self.change_x < 0:
# Otherwise if we are moving left, do the opposite.
self.rect.left = block.rect.right
self.change_x = -self.change_x
self.direction = "R"
if self.rect.x > stale.SCREEN_WIDTH - self.rect.width or self.rect.x < 0:
self.change_x = - self.change_x
# Move up/down
self.rect.y += self.change_y
# Check and see if we hit anything
block_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)
for block in block_hit_list:
# Reset our position based on the top/bottom of the object.
if self.change_y > 0:
self.rect.bottom = block.rect.top
if self.change_x > 0 and self.rect.x > block.rect.right - 85:
self.change_x = -1
elif self.change_x < 0 and self.rect.x < block.rect.left:
self.change_x = 1
elif self.change_y < 0:
self.rect.top = block.rect.bottom
# Stop our vertical movement
self.change_y = 0
def calc_grav(self):
""" Calculate effect of gravity. """
if self.change_y == 0:
self.change_y = 1
else:
self.change_y += .35
# See if we are on the ground.
if self.rect.y >= stale.SCREEN_HEIGHT - self.rect.height and self.change_y >= 0:
self.change_y = 0
self.rect.y = stale.SCREEN_HEIGHT - self.rect.height
class Boss(Enemy):
def __init__(self, player):
super().__init__()
| mit |
holygits/incubator-airflow | tests/contrib/hooks/test_zendesk_hook.py | 37 | 3566 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import mock
from airflow.hooks.zendesk_hook import ZendeskHook
from zdesk import RateLimitError
class TestZendeskHook(unittest.TestCase):
@mock.patch("airflow.hooks.zendesk_hook.time")
def test_sleeps_for_correct_interval(self, mocked_time):
sleep_time = 10
# To break out of the otherwise infinite tries
mocked_time.sleep = mock.Mock(side_effect=ValueError, return_value=3)
conn_mock = mock.Mock()
mock_response = mock.Mock()
mock_response.headers.get.return_value = sleep_time
conn_mock.call = mock.Mock(
side_effect=RateLimitError(msg="some message", code="some code",
response=mock_response))
zendesk_hook = ZendeskHook("conn_id")
zendesk_hook.get_conn = mock.Mock(return_value=conn_mock)
with self.assertRaises(ValueError):
zendesk_hook.call("some_path", get_all_pages=False)
mocked_time.sleep.assert_called_with(sleep_time)
@mock.patch("airflow.hooks.zendesk_hook.Zendesk")
def test_returns_single_page_if_get_all_pages_false(self, _):
zendesk_hook = ZendeskHook("conn_id")
mock_connection = mock.Mock()
mock_connection.host = "some_host"
zendesk_hook.get_connection = mock.Mock(return_value=mock_connection)
zendesk_hook.get_conn()
mock_conn = mock.Mock()
mock_call = mock.Mock(
return_value={'next_page': 'https://some_host/something', 'path':
[]})
mock_conn.call = mock_call
zendesk_hook.get_conn = mock.Mock(return_value=mock_conn)
zendesk_hook.call("path", get_all_pages=False)
mock_call.assert_called_once_with("path", None)
@mock.patch("airflow.hooks.zendesk_hook.Zendesk")
def test_returns_multiple_pages_if_get_all_pages_true(self, _):
zendesk_hook = ZendeskHook("conn_id")
mock_connection = mock.Mock()
mock_connection.host = "some_host"
zendesk_hook.get_connection = mock.Mock(return_value=mock_connection)
zendesk_hook.get_conn()
mock_conn = mock.Mock()
mock_call = mock.Mock(
return_value={'next_page': 'https://some_host/something', 'path': []})
mock_conn.call = mock_call
zendesk_hook.get_conn = mock.Mock(return_value=mock_conn)
zendesk_hook.call("path", get_all_pages=True)
assert mock_call.call_count == 2
@mock.patch("airflow.hooks.zendesk_hook.Zendesk")
def test_zdesk_is_inited_correctly(self, mock_zendesk):
conn_mock = mock.Mock()
conn_mock.host = "conn_host"
conn_mock.login = "conn_login"
conn_mock.password = "conn_pass"
zendesk_hook = ZendeskHook("conn_id")
zendesk_hook.get_connection = mock.Mock(return_value=conn_mock)
zendesk_hook.get_conn()
mock_zendesk.assert_called_with('https://conn_host', 'conn_login',
'conn_pass', True)
| apache-2.0 |
AmeristralianDollar/AmeristralianDollar | share/qt/extract_strings_qt.py | 2945 | 1844 | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
import operator
OUT_CPP="src/qt/bitcoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = glob.glob('src/*.cpp') + glob.glob('src/*.h')
# xgettext -n --keyword=_ $FILES
child = Popen(['xgettext','--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *bitcoin_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("bitcoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};')
f.close()
| mit |
pde/tosback2 | web-frontend/beautifulsoup4/bs4/builder/_htmlparser.py | 9 | 7507 | """Use the HTMLParser library to parse HTML files that aren't too bad."""
__all__ = [
'HTMLParserTreeBuilder',
]
from HTMLParser import HTMLParser
import sys
# Starting in Python 3.2, the HTMLParser constructor takes a 'strict'
# argument, which we'd like to set to False. Unfortunately,
# http://bugs.python.org/issue13273 makes strict=True a better bet
# before Python 3.2.3.
#
# At the end of this file, we monkeypatch HTMLParser so that
# strict=True works well on Python 3.2.2.
major, minor, release = sys.version_info[:3]
CONSTRUCTOR_TAKES_STRICT = (
major > 3
or (major == 3 and minor > 2)
or (major == 3 and minor == 2 and release >= 3))
from bs4.element import (
CData,
Comment,
Declaration,
Doctype,
ProcessingInstruction,
)
from bs4.dammit import EntitySubstitution, UnicodeDammit
from bs4.builder import (
HTML,
HTMLTreeBuilder,
STRICT,
)
HTMLPARSER = 'html.parser'
class BeautifulSoupHTMLParser(HTMLParser):
def handle_starttag(self, name, attrs):
# XXX namespace
self.soup.handle_starttag(name, None, None, dict(attrs))
def handle_endtag(self, name):
self.soup.handle_endtag(name)
def handle_data(self, data):
self.soup.handle_data(data)
def handle_charref(self, name):
# XXX workaround for a bug in HTMLParser. Remove this once
# it's fixed.
if name.startswith('x'):
real_name = int(name.lstrip('x'), 16)
else:
real_name = int(name)
try:
data = unichr(real_name)
except (ValueError, OverflowError), e:
data = u"\N{REPLACEMENT CHARACTER}"
self.handle_data(data)
def handle_entityref(self, name):
character = EntitySubstitution.HTML_ENTITY_TO_CHARACTER.get(name)
if character is not None:
data = character
else:
data = "&%s;" % name
self.handle_data(data)
def handle_comment(self, data):
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(Comment)
def handle_decl(self, data):
self.soup.endData()
if data.startswith("DOCTYPE "):
data = data[len("DOCTYPE "):]
self.soup.handle_data(data)
self.soup.endData(Doctype)
def unknown_decl(self, data):
if data.upper().startswith('CDATA['):
cls = CData
data = data[len('CDATA['):]
else:
cls = Declaration
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(cls)
def handle_pi(self, data):
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(ProcessingInstruction)
class HTMLParserTreeBuilder(HTMLTreeBuilder):
is_xml = False
features = [HTML, STRICT, HTMLPARSER]
def __init__(self, *args, **kwargs):
if CONSTRUCTOR_TAKES_STRICT:
kwargs['strict'] = False
self.parser_args = (args, kwargs)
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None):
"""
:return: A 4-tuple (markup, original encoding, encoding
declared within markup, whether any characters had to be
replaced with REPLACEMENT CHARACTER).
"""
if isinstance(markup, unicode):
return markup, None, None, False
try_encodings = [user_specified_encoding, document_declared_encoding]
dammit = UnicodeDammit(markup, try_encodings, is_html=True)
return (dammit.markup, dammit.original_encoding,
dammit.declared_html_encoding,
dammit.contains_replacement_characters)
def feed(self, markup):
args, kwargs = self.parser_args
parser = BeautifulSoupHTMLParser(*args, **kwargs)
parser.soup = self.soup
parser.feed(markup)
# Patch 3.2 versions of HTMLParser earlier than 3.2.3 to use some
# 3.2.3 code. This ensures they don't treat markup like <p></p> as a
# string.
#
# XXX This code can be removed once most Python 3 users are on 3.2.3.
if major == 3 and minor == 2 and not CONSTRUCTOR_TAKES_STRICT:
import re
attrfind_tolerant = re.compile(
r'\s*((?<=[\'"\s])[^\s/>][^\s/=>]*)(\s*=+\s*'
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?')
HTMLParserTreeBuilder.attrfind_tolerant = attrfind_tolerant
locatestarttagend = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:\s+ # whitespace before attribute name
(?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name
(?:\s*=\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|\"[^\"]*\" # LIT-enclosed value
|[^'\">\s]+ # bare value
)
)?
)
)*
\s* # trailing whitespace
""", re.VERBOSE)
BeautifulSoupHTMLParser.locatestarttagend = locatestarttagend
from html.parser import tagfind, attrfind
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = rawdata[i+1:k].lower()
while k < endpos:
if self.strict:
m = attrfind.match(rawdata, k)
else:
m = attrfind_tolerant.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
if self.strict:
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
self.handle_data(rawdata[i:endpos])
return endpos
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag)
return endpos
def set_cdata_mode(self, elem):
self.cdata_elem = elem.lower()
self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I)
BeautifulSoupHTMLParser.parse_starttag = parse_starttag
BeautifulSoupHTMLParser.set_cdata_mode = set_cdata_mode
CONSTRUCTOR_TAKES_STRICT = True
| gpl-2.0 |
I-prefer-the-front-end/I-prefer-the-front-end | iptfe/blog/views.py | 1 | 1283 | from django.shortcuts import render
from django.http import HttpResponse
from blog.models import Post, Comment
from django.http import HttpResponseRedirect, HttpResponse
from django.core.urlresolvers import reverse
def archive(request):
all_posts = Post.objects.all();
context = {}
context['posts'] = all_posts
context['count'] = all_posts.count()
context['page'] = 'archive'
return render(request, 'blog/archive.html', context)
def post(request, slug):
if slug:
post = Post.objects.filter(slug = slug).first()
else:
post = Post.objects.latest();
context = {}
context['title'] = post.title
context['post_date'] = post.post_date
context['content'] = post.content
context['comments'] = post.comment_set.all()
return render(request, 'blog/post.html', context)
def comment(request):
comment = request.POST.get("comment", "")
author = request.POST.get("author", "")
email = request.POST.get("email", "")
post_title = request.POST.get("post", "")
post = Post.objects.filter(title = post_title).first()
user_comment = Comment(author=author, comment=comment, email=email, post=post)
user_comment.save()
return HttpResponseRedirect(reverse('blog:post', args=(post.slug,)))
| mit |
jeromeetienne/neoip | libxml2-2.6.27/python/tests/ctxterror.py | 87 | 1318 | #!/usr/bin/python -u
#
# This test exercise the redirection of error messages with a
# functions defined in Python.
#
import sys
import libxml2
# Memory debug specific
libxml2.debugMemory(1)
expect="""--> (3) xmlns: URI foo is not absolute
--> (4) Opening and ending tag mismatch: x line 0 and y
"""
err=""
def callback(arg,msg,severity,reserved):
global err
err = err + "%s (%d) %s" % (arg,severity,msg)
s = """<x xmlns="foo"></y>"""
parserCtxt = libxml2.createPushParser(None,"",0,"test.xml")
parserCtxt.setErrorHandler(callback, "-->")
if parserCtxt.getErrorHandler() != (callback,"-->"):
print "getErrorHandler failed"
sys.exit(1)
parserCtxt.parseChunk(s,len(s),1)
doc = parserCtxt.doc()
doc.freeDoc()
parserCtxt = None
if err != expect:
print "error"
print "received %s" %(err)
print "expected %s" %(expect)
sys.exit(1)
i = 10000
while i > 0:
parserCtxt = libxml2.createPushParser(None,"",0,"test.xml")
parserCtxt.setErrorHandler(callback, "-->")
parserCtxt.parseChunk(s,len(s),1)
doc = parserCtxt.doc()
doc.freeDoc()
parserCtxt = None
err = ""
i = i - 1
# Memory debug specific
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
print "OK"
else:
print "Memory leak %d bytes" % (libxml2.debugMemory(1))
libxml2.dumpMemory()
| gpl-3.0 |
l0b0/tilde | .config/calibre/global.py | 1 | 3641 | # calibre wide preferences
### Begin group: DEFAULT
# database path
# Path to the database in which books are stored
database_path = '/home/username/library1.db'
# filename pattern
# Pattern to guess metadata from filenames
filename_pattern = u'(?P<title>.+) - (?P<author>[^_]+)'
# isbndb com key
# Access key for isbndb.com
isbndb_com_key = ''
# network timeout
# Default timeout for network operations (seconds)
network_timeout = 5
# library path
# Path to directory in which your library of books is stored
library_path = u'.calibre'
# language
# The language in which to display the user interface
language = 'en'
# output format
# The default output format for ebook conversions.
output_format = 'epub'
# input format order
# Ordered list of formats to prefer for input.
input_format_order = cPickle.loads('\x80\x02]q\x01(U\x04EPUBq\x02U\x04AZW3q\x03U\x04MOBIq\x04U\x03LITq\x05U\x03PRCq\x06U\x03FB2q\x07U\x04HTMLq\x08U\x03HTMq\tU\x04XHTMq\nU\x05SHTMLq\x0bU\x05XHTMLq\x0cU\x03ZIPq\rU\x03ODTq\x0eU\x03RTFq\x0fU\x03PDFq\x10U\x03TXTq\x11e.')
# read file metadata
# Read metadata from files
read_file_metadata = True
# worker process priority
# The priority of worker processes. A higher priority means they run faster and consume more resources. Most tasks like conversion/news download/adding books/etc. are affected by this setting.
worker_process_priority = 'normal'
# swap author names
# Swap author first and last names when reading metadata
swap_author_names = False
# add formats to existing
# Add new formats to existing book records
add_formats_to_existing = False
# check for dupes on ctl
# Check for duplicates when copying to another library
check_for_dupes_on_ctl = False
# installation uuid
# Installation UUID
installation_uuid = '33ac5ee3-f090-4bf9-a6e0-322a803d5c7f'
# new book tags
# Tags to apply to books added to the library
new_book_tags = cPickle.loads('\x80\x02]q\x01.')
# mark new books
# Mark newly added books. The mark is a temporary mark that is automatically removed when calibre is restarted.
mark_new_books = False
# saved searches
# List of named saved searches
saved_searches = cPickle.loads('\x80\x02}q\x01.')
# user categories
# User-created tag browser categories
user_categories = cPickle.loads('\x80\x02}q\x01.')
# manage device metadata
# How and when calibre updates metadata on the device.
manage_device_metadata = 'manual'
# limit search columns
# When searching for text without using lookup prefixes, as for example, Red instead of title:Red, limit the columns searched to those named below.
limit_search_columns = False
# limit search columns to
# Choose columns to be searched when not using prefixes, as for example, when searching for Red instead of title:Red. Enter a list of search/lookup names separated by commas. Only takes effect if you set the option to limit search columns above.
limit_search_columns_to = cPickle.loads('\x80\x02]q\x01(U\x05titleq\x02U\x07authorsq\x03U\x04tagsq\x04U\x06seriesq\x05U\tpublisherq\x06e.')
# use primary find in search
# Characters typed in the search box will match their accented versions, based on the language you have chosen for the calibre interface. For example, in English, searching for n will match both ñ and n, but if your language is Spanish it will only match n. Note that this is much slower than a simple search on very large libraries. Also, this option will have no effect if you turn on case-sensitive searching
use_primary_find_in_search = True
# case sensitive
# Make searches case-sensitive
case_sensitive = False
# migrated
# For Internal use. Don't modify.
migrated = False
| gpl-3.0 |
philippze/django-cms | menus/templatetags/menu_tags.py | 35 | 14631 | # -*- coding: utf-8 -*-
from __future__ import with_statement
from classytags.arguments import IntegerArgument, Argument, StringArgument
from classytags.core import Options
from classytags.helpers import InclusionTag
from cms.utils.i18n import force_language, get_language_objects
from django import template
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse, NoReverseMatch
from django.utils.encoding import force_text
from django.utils.six.moves.urllib.parse import unquote
from django.utils.translation import get_language, ugettext
from menus.menu_pool import menu_pool
from menus.utils import DefaultLanguageChanger
register = template.Library()
class NOT_PROVIDED:
pass
def cut_after(node, levels, removed):
"""
given a tree of nodes cuts after N levels
"""
if levels == 0:
removed.extend(node.children)
node.children = []
else:
removed_local = []
for child in node.children:
if child.visible:
cut_after(child, levels - 1, removed)
else:
removed_local.append(child)
for removed_child in removed_local:
node.children.remove(removed_child)
removed.extend(removed_local)
def remove(node, removed):
removed.append(node)
if node.parent:
if node in node.parent.children:
node.parent.children.remove(node)
def cut_levels(nodes, from_level, to_level, extra_inactive, extra_active):
"""
cutting nodes away from menus
"""
final = []
removed = []
selected = None
for node in nodes:
if not hasattr(node, 'level'):
# remove and ignore nodes that don't have level information
remove(node, removed)
continue
if node.level == from_level:
# turn nodes that are on from_level into root nodes
final.append(node)
node.parent = None
if not node.ancestor and not node.selected and not node.descendant:
# cut inactive nodes to extra_inactive, but not of descendants of
# the selected node
cut_after(node, extra_inactive, removed)
if node.level > to_level and node.parent:
# remove nodes that are too deep, but not nodes that are on
# from_level (local root nodes)
remove(node, removed)
if node.selected:
selected = node
if not node.visible:
remove(node, removed)
if selected:
cut_after(selected, extra_active, removed)
if removed:
for node in removed:
if node in final:
final.remove(node)
return final
def flatten(nodes):
flat = []
for node in nodes:
flat.append(node)
flat.extend(flatten(node.children))
return flat
class ShowMenu(InclusionTag):
"""
render a nested list of all children of the pages
- from_level: starting level
- to_level: max level
- extra_inactive: how many levels should be rendered of the not active tree?
- extra_active: how deep should the children of the active node be rendered?
- namespace: the namespace of the menu. if empty will use all namespaces
- root_id: the id of the root node
- template: template used to render the menu
"""
name = 'show_menu'
template = 'menu/dummy.html'
options = Options(
IntegerArgument('from_level', default=0, required=False),
IntegerArgument('to_level', default=100, required=False),
IntegerArgument('extra_inactive', default=0, required=False),
IntegerArgument('extra_active', default=1000, required=False),
StringArgument('template', default='menu/menu.html', required=False),
StringArgument('namespace', default=None, required=False),
StringArgument('root_id', default=None, required=False),
Argument('next_page', default=None, required=False),
)
def get_context(self, context, from_level, to_level, extra_inactive,
extra_active, template, namespace, root_id, next_page):
try:
# If there's an exception (500), default context_processors may not be called.
request = context['request']
except KeyError:
return {'template': 'menu/empty.html'}
if next_page:
children = next_page.children
else:
# new menu... get all the data so we can save a lot of queries
nodes = menu_pool.get_nodes(request, namespace, root_id)
if root_id: # find the root id and cut the nodes
id_nodes = menu_pool.get_nodes_by_attribute(nodes, "reverse_id", root_id)
if id_nodes:
node = id_nodes[0]
nodes = node.children
for remove_parent in nodes:
remove_parent.parent = None
from_level += node.level + 1
to_level += node.level + 1
nodes = flatten(nodes)
else:
nodes = []
children = cut_levels(nodes, from_level, to_level, extra_inactive, extra_active)
children = menu_pool.apply_modifiers(children, request, namespace, root_id, post_cut=True)
try:
context['children'] = children
context['template'] = template
context['from_level'] = from_level
context['to_level'] = to_level
context['extra_inactive'] = extra_inactive
context['extra_active'] = extra_active
context['namespace'] = namespace
except:
context = {"template": template}
return context
register.tag(ShowMenu)
class ShowMenuBelowId(ShowMenu):
name = 'show_menu_below_id'
options = Options(
Argument('root_id', default=None, required=False),
IntegerArgument('from_level', default=0, required=False),
IntegerArgument('to_level', default=100, required=False),
IntegerArgument('extra_inactive', default=0, required=False),
IntegerArgument('extra_active', default=1000, required=False),
Argument('template', default='menu/menu.html', required=False),
Argument('namespace', default=None, required=False),
Argument('next_page', default=None, required=False),
)
register.tag(ShowMenuBelowId)
class ShowSubMenu(InclusionTag):
"""
show the sub menu of the current nav-node.
- levels: how many levels deep
- root_level: the level to start the menu at
- nephews: the level of descendants of siblings (nephews) to show
- template: template used to render the navigation
"""
name = 'show_sub_menu'
template = 'menu/dummy.html'
options = Options(
IntegerArgument('levels', default=100, required=False),
Argument('root_level', default=None, required=False),
IntegerArgument('nephews', default=100, required=False),
Argument('template', default='menu/sub_menu.html', required=False),
)
def get_context(self, context, levels, root_level, nephews, template):
# Django 1.4 doesn't accept 'None' as a tag value and resolve to ''
# So we need to force it to None again
if not root_level and root_level != 0:
root_level = None
try:
# If there's an exception (500), default context_processors may not be called.
request = context['request']
except KeyError:
return {'template': 'menu/empty.html'}
nodes = menu_pool.get_nodes(request)
children = []
# adjust root_level so we cut before the specified level, not after
include_root = False
if root_level is not None and root_level > 0:
root_level -= 1
elif root_level is not None and root_level == 0:
include_root = True
for node in nodes:
if root_level is None:
if node.selected:
# if no root_level specified, set it to the selected nodes level
root_level = node.level
# is this the ancestor of current selected node at the root level?
is_root_ancestor = (node.ancestor and node.level == root_level)
# is a node selected on the root_level specified
root_selected = (node.selected and node.level == root_level)
if is_root_ancestor or root_selected:
cut_after(node, levels, [])
children = node.children
for child in children:
if child.sibling:
cut_after(child, nephews, [])
# if root_level was 0 we need to give the menu the entire tree
# not just the children
if include_root:
children = menu_pool.apply_modifiers([node], request, post_cut=True)
else:
children = menu_pool.apply_modifiers(children, request, post_cut=True)
context['children'] = children
context['template'] = template
context['from_level'] = 0
context['to_level'] = 0
context['extra_inactive'] = 0
context['extra_active'] = 0
return context
register.tag(ShowSubMenu)
class ShowBreadcrumb(InclusionTag):
"""
Shows the breadcrumb from the node that has the same url as the current request
- start level: after which level should the breadcrumb start? 0=home
- template: template used to render the breadcrumb
"""
name = 'show_breadcrumb'
template = 'menu/dummy.html'
options = Options(
Argument('start_level', default=0, required=False),
Argument('template', default='menu/breadcrumb.html', required=False),
Argument('only_visible', default=True, required=False),
)
def get_context(self, context, start_level, template, only_visible):
try:
# If there's an exception (500), default context_processors may not be called.
request = context['request']
except KeyError:
return {'template': 'cms/content.html'}
if not (isinstance(start_level, int) or start_level.isdigit()):
only_visible = template
template = start_level
start_level = 0
try:
only_visible = bool(int(only_visible))
except:
only_visible = bool(only_visible)
ancestors = []
nodes = menu_pool.get_nodes(request, breadcrumb=True)
# Find home
home = None
root_url = unquote(reverse("pages-root"))
home = next((node for node in nodes if node.get_absolute_url() == root_url), None)
# Find selected
selected = None
selected = next((node for node in nodes if node.selected), None)
if selected and selected != home:
node = selected
while node:
if node.visible or not only_visible:
ancestors.append(node)
node = node.parent
if not ancestors or (ancestors and ancestors[-1] != home) and home:
ancestors.append(home)
ancestors.reverse()
if len(ancestors) >= start_level:
ancestors = ancestors[start_level:]
else:
ancestors = []
context['ancestors'] = ancestors
context['template'] = template
return context
register.tag(ShowBreadcrumb)
def _raw_language_marker(language, lang_code):
return language
def _native_language_marker(language, lang_code):
with force_language(lang_code):
return force_text(ugettext(language))
def _current_language_marker(language, lang_code):
return force_text(ugettext(language))
def _short_language_marker(language, lang_code):
return lang_code
MARKERS = {
'raw': _raw_language_marker,
'native': _native_language_marker,
'current': _current_language_marker,
'short': _short_language_marker,
}
class LanguageChooser(InclusionTag):
"""
Displays a language chooser
- template: template used to render the language chooser
"""
name = 'language_chooser'
template = 'menu/dummy.html'
options = Options(
Argument('template', default=NOT_PROVIDED, required=False),
Argument('i18n_mode', default='raw', required=False),
)
def get_context(self, context, template, i18n_mode):
if template in MARKERS:
_tmp = template
if i18n_mode not in MARKERS:
template = i18n_mode
else:
template = NOT_PROVIDED
i18n_mode = _tmp
if template is NOT_PROVIDED:
template = "menu/language_chooser.html"
if not i18n_mode in MARKERS:
i18n_mode = 'raw'
if 'request' not in context:
# If there's an exception (500), default context_processors may not be called.
return {'template': 'cms/content.html'}
marker = MARKERS[i18n_mode]
current_lang = get_language()
site = Site.objects.get_current()
languages = []
for lang in get_language_objects(site.pk):
if lang.get('public', True):
languages.append((lang['code'], marker(lang['name'], lang['code'])))
context['languages'] = languages
context['current_language'] = current_lang
context['template'] = template
return context
register.tag(LanguageChooser)
class PageLanguageUrl(InclusionTag):
"""
Displays the url of the current page in the defined language.
You can set a language_changer function with the set_language_changer function in the utils.py if there is no page.
This is needed if you have slugs in more than one language.
"""
name = 'page_language_url'
template = 'cms/content.html'
options = Options(
Argument('lang'),
)
def get_context(self, context, lang):
try:
# If there's an exception (500), default context_processors may not be called.
request = context['request']
except KeyError:
return {'template': 'cms/content.html'}
if hasattr(request, "_language_changer"):
try:
url = request._language_changer(lang)
except NoReverseMatch:
url = DefaultLanguageChanger(request)(lang)
else:
# use the default language changer
url = DefaultLanguageChanger(request)(lang)
return {'content': url}
register.tag(PageLanguageUrl)
| bsd-3-clause |
home-assistant/home-assistant | homeassistant/components/skybell/camera.py | 27 | 2818 | """Camera support for the Skybell HD Doorbell."""
from datetime import timedelta
import logging
import requests
import voluptuous as vol
from homeassistant.components.camera import PLATFORM_SCHEMA, Camera
from homeassistant.const import CONF_MONITORED_CONDITIONS
import homeassistant.helpers.config_validation as cv
from . import DOMAIN as SKYBELL_DOMAIN, SkybellDevice
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=90)
IMAGE_AVATAR = "avatar"
IMAGE_ACTIVITY = "activity"
CONF_ACTIVITY_NAME = "activity_name"
CONF_AVATAR_NAME = "avatar_name"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_MONITORED_CONDITIONS, default=[IMAGE_AVATAR]): vol.All(
cv.ensure_list, [vol.In([IMAGE_AVATAR, IMAGE_ACTIVITY])]
),
vol.Optional(CONF_ACTIVITY_NAME): cv.string,
vol.Optional(CONF_AVATAR_NAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the platform for a Skybell device."""
cond = config[CONF_MONITORED_CONDITIONS]
names = {}
names[IMAGE_ACTIVITY] = config.get(CONF_ACTIVITY_NAME)
names[IMAGE_AVATAR] = config.get(CONF_AVATAR_NAME)
skybell = hass.data.get(SKYBELL_DOMAIN)
sensors = []
for device in skybell.get_devices():
for camera_type in cond:
sensors.append(SkybellCamera(device, camera_type, names.get(camera_type)))
add_entities(sensors, True)
class SkybellCamera(SkybellDevice, Camera):
"""A camera implementation for Skybell devices."""
def __init__(self, device, camera_type, name=None):
"""Initialize a camera for a Skybell device."""
self._type = camera_type
SkybellDevice.__init__(self, device)
Camera.__init__(self)
if name is not None:
self._name = f"{self._device.name} {name}"
else:
self._name = self._device.name
self._url = None
self._response = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def image_url(self):
"""Get the camera image url based on type."""
if self._type == IMAGE_ACTIVITY:
return self._device.activity_image
return self._device.image
def camera_image(self):
"""Get the latest camera image."""
super().update()
if self._url != self.image_url:
self._url = self.image_url
try:
self._response = requests.get(self._url, stream=True, timeout=10)
except requests.HTTPError as err:
_LOGGER.warning("Failed to get camera image: %s", err)
self._response = None
if not self._response:
return None
return self._response.content
| apache-2.0 |
chdecultot/erpnext | erpnext/erpnext_integrations/utils.py | 5 | 1165 | import frappe
from frappe import _
import base64, hashlib, hmac
from six.moves.urllib.parse import urlparse
def validate_webhooks_request(doctype, hmac_key, secret_key='secret'):
def innerfn(fn):
settings = frappe.get_doc(doctype)
if frappe.request and settings and settings.get(secret_key) and not frappe.flags.in_test:
sig = base64.b64encode(
hmac.new(
settings.get(secret_key).encode('utf8'),
frappe.request.data,
hashlib.sha256
).digest()
)
if frappe.request.data and \
frappe.get_request_header(hmac_key) and \
not sig == bytes(frappe.get_request_header(hmac_key).encode()):
frappe.throw(_("Unverified Webhook Data"))
frappe.set_user(settings.modified_by)
return fn
return innerfn
def get_webhook_address(connector_name, method, exclude_uri=False):
endpoint = "erpnext.erpnext_integrations.connectors.{0}.{1}".format(connector_name, method)
if exclude_uri:
return endpoint
try:
url = frappe.request.url
except RuntimeError:
url = "http://localhost:8000"
server_url = '{uri.scheme}://{uri.netloc}/api/method/{endpoint}'.format(uri=urlparse(url), endpoint=endpoint)
return server_url | gpl-3.0 |
jdahlin/stoq-wubi | src/bittorrent/Uploader.py | 12 | 8059 | # Written by Bram Cohen
# see LICENSE.txt for license information
from CurrentRateMeasure import Measure
class Upload:
def __init__(self, connection, choker, storage,
max_slice_length, max_rate_period, fudge):
self.connection = connection
self.choker = choker
self.storage = storage
self.max_slice_length = max_slice_length
self.max_rate_period = max_rate_period
self.choked = True
self.interested = False
self.buffer = []
self.measure = Measure(max_rate_period, fudge)
if storage.do_I_have_anything():
connection.send_bitfield(storage.get_have_list())
def got_not_interested(self):
if self.interested:
self.interested = False
del self.buffer[:]
self.choker.not_interested(self.connection)
def got_interested(self):
if not self.interested:
self.interested = True
self.choker.interested(self.connection)
def flushed(self):
while len(self.buffer) > 0 and self.connection.is_flushed():
index, begin, length = self.buffer[0]
del self.buffer[0]
piece = self.storage.get_piece(index, begin, length)
if piece is None:
self.connection.close()
return
self.measure.update_rate(len(piece))
self.connection.send_piece(index, begin, piece)
def got_request(self, index, begin, length):
if not self.interested or length > self.max_slice_length:
self.connection.close()
return
if not self.choked:
self.buffer.append((index, begin, length))
self.flushed()
def got_cancel(self, index, begin, length):
try:
self.buffer.remove((index, begin, length))
except ValueError:
pass
def choke(self):
if not self.choked:
self.choked = True
del self.buffer[:]
self.connection.send_choke()
def unchoke(self):
if self.choked:
self.choked = False
self.connection.send_unchoke()
def is_choked(self):
return self.choked
def is_interested(self):
return self.interested
def has_queries(self):
return len(self.buffer) > 0
def get_rate(self):
return self.measure.get_rate()
class DummyConnection:
def __init__(self, events):
self.events = events
self.flushed = False
def send_bitfield(self, bitfield):
self.events.append(('bitfield', bitfield))
def is_flushed(self):
return self.flushed
def close(self):
self.events.append('closed')
def send_piece(self, index, begin, piece):
self.events.append(('piece', index, begin, piece))
def send_choke(self):
self.events.append('choke')
def send_unchoke(self):
self.events.append('unchoke')
class DummyChoker:
def __init__(self, events):
self.events = events
def interested(self, connection):
self.events.append('interested')
def not_interested(self, connection):
self.events.append('not interested')
class DummyStorage:
def __init__(self, events):
self.events = events
def do_I_have_anything(self):
self.events.append('do I have')
return True
def get_have_list(self):
self.events.append('get have list')
return [False, True]
def get_piece(self, index, begin, length):
self.events.append(('get piece', index, begin, length))
if length == 4:
return None
return 'a' * length
def test_skip_over_choke():
events = []
dco = DummyConnection(events)
dch = DummyChoker(events)
ds = DummyStorage(events)
u = Upload(dco, dch, ds, 100, 20, 5)
assert u.is_choked()
assert not u.is_interested()
u.got_interested()
assert u.is_interested()
u.got_request(0, 0, 3)
dco.flushed = True
u.flushed()
assert events == ['do I have', 'get have list',
('bitfield', [False, True]), 'interested']
def test_bad_piece():
events = []
dco = DummyConnection(events)
dch = DummyChoker(events)
ds = DummyStorage(events)
u = Upload(dco, dch, ds, 100, 20, 5)
assert u.is_choked()
assert not u.is_interested()
u.got_interested()
assert u.is_interested()
u.unchoke()
assert not u.is_choked()
u.got_request(0, 0, 4)
dco.flushed = True
u.flushed()
assert events == ['do I have', 'get have list',
('bitfield', [False, True]), 'interested', 'unchoke',
('get piece', 0, 0, 4), 'closed']
def test_still_rejected_after_unchoke():
events = []
dco = DummyConnection(events)
dch = DummyChoker(events)
ds = DummyStorage(events)
u = Upload(dco, dch, ds, 100, 20, 5)
assert u.is_choked()
assert not u.is_interested()
u.got_interested()
assert u.is_interested()
u.unchoke()
assert not u.is_choked()
u.got_request(0, 0, 3)
u.choke()
u.unchoke()
dco.flushed = True
u.flushed()
assert events == ['do I have', 'get have list',
('bitfield', [False, True]), 'interested', 'unchoke',
'choke', 'unchoke']
def test_sends_when_flushed():
events = []
dco = DummyConnection(events)
dch = DummyChoker(events)
ds = DummyStorage(events)
u = Upload(dco, dch, ds, 100, 20, 5)
u.unchoke()
u.got_interested()
u.got_request(0, 1, 3)
dco.flushed = True
u.flushed()
u.flushed()
assert events == ['do I have', 'get have list',
('bitfield', [False, True]), 'unchoke', 'interested',
('get piece', 0, 1, 3), ('piece', 0, 1, 'aaa')]
def test_sends_immediately():
events = []
dco = DummyConnection(events)
dch = DummyChoker(events)
ds = DummyStorage(events)
u = Upload(dco, dch, ds, 100, 20, 5)
u.unchoke()
u.got_interested()
dco.flushed = True
u.got_request(0, 1, 3)
assert events == ['do I have', 'get have list',
('bitfield', [False, True]), 'unchoke', 'interested',
('get piece', 0, 1, 3), ('piece', 0, 1, 'aaa')]
def test_cancel():
events = []
dco = DummyConnection(events)
dch = DummyChoker(events)
ds = DummyStorage(events)
u = Upload(dco, dch, ds, 100, 20, 5)
u.unchoke()
u.got_interested()
u.got_request(0, 1, 3)
u.got_cancel(0, 1, 3)
u.got_cancel(0, 1, 2)
u.flushed()
dco.flushed = True
assert events == ['do I have', 'get have list',
('bitfield', [False, True]), 'unchoke', 'interested']
def test_clears_on_not_interested():
events = []
dco = DummyConnection(events)
dch = DummyChoker(events)
ds = DummyStorage(events)
u = Upload(dco, dch, ds, 100, 20, 5)
u.unchoke()
u.got_interested()
u.got_request(0, 1, 3)
u.got_not_interested()
dco.flushed = True
u.flushed()
assert events == ['do I have', 'get have list',
('bitfield', [False, True]), 'unchoke', 'interested',
'not interested']
def test_close_when_sends_on_not_interested():
events = []
dco = DummyConnection(events)
dch = DummyChoker(events)
ds = DummyStorage(events)
u = Upload(dco, dch, ds, 100, 20, 5)
u.got_request(0, 1, 3)
assert events == ['do I have', 'get have list',
('bitfield', [False, True]), 'closed']
def test_close_over_max_length():
events = []
dco = DummyConnection(events)
dch = DummyChoker(events)
ds = DummyStorage(events)
u = Upload(dco, dch, ds, 100, 20, 5)
u.got_interested()
u.got_request(0, 1, 101)
assert events == ['do I have', 'get have list',
('bitfield', [False, True]), 'interested', 'closed']
def test_no_bitfield_on_start_empty():
events = []
dco = DummyConnection(events)
dch = DummyChoker(events)
ds = DummyStorage(events)
ds.do_I_have_anything = lambda: False
u = Upload(dco, dch, ds, 100, 20, 5)
assert events == []
| gpl-2.0 |
reinout/django | django/core/files/temp.py | 28 | 2501 | """
The temp module provides a NamedTemporaryFile that can be reopened in the same
process on any platform. Most platforms use the standard Python
tempfile.NamedTemporaryFile class, but Windows users are given a custom class.
This is needed because the Python implementation of NamedTemporaryFile uses the
O_TEMPORARY flag under Windows, which prevents the file from being reopened
if the same flag is not provided [1][2]. Note that this does not address the
more general issue of opening a file for writing and reading in multiple
processes in a manner that works across platforms.
The custom version of NamedTemporaryFile doesn't support the same keyword
arguments available in tempfile.NamedTemporaryFile.
1: https://mail.python.org/pipermail/python-list/2005-December/336957.html
2: http://bugs.python.org/issue14243
"""
import os
import tempfile
from django.core.files.utils import FileProxyMixin
__all__ = ('NamedTemporaryFile', 'gettempdir',)
if os.name == 'nt':
class TemporaryFile(FileProxyMixin):
"""
Temporary file object constructor that supports reopening of the
temporary file in Windows.
Unlike tempfile.NamedTemporaryFile from the standard library,
__init__() doesn't support the 'delete', 'buffering', 'encoding', or
'newline' keyword arguments.
"""
def __init__(self, mode='w+b', bufsize=-1, suffix='', prefix='', dir=None):
fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=dir)
self.name = name
self.file = os.fdopen(fd, mode, bufsize)
self.close_called = False
# Because close can be called during shutdown
# we need to cache os.unlink and access it
# as self.unlink only
unlink = os.unlink
def close(self):
if not self.close_called:
self.close_called = True
try:
self.file.close()
except (OSError, IOError):
pass
try:
self.unlink(self.name)
except OSError:
pass
def __del__(self):
self.close()
def __enter__(self):
self.file.__enter__()
return self
def __exit__(self, exc, value, tb):
self.file.__exit__(exc, value, tb)
NamedTemporaryFile = TemporaryFile
else:
NamedTemporaryFile = tempfile.NamedTemporaryFile
gettempdir = tempfile.gettempdir
| bsd-3-clause |
kennethlove/django | tests/regressiontests/inline_formsets/tests.py | 6 | 6162 | from __future__ import absolute_import, unicode_literals
from django.forms.models import inlineformset_factory
from django.test import TestCase
from .models import Poet, Poem, School, Parent, Child
class DeletionTests(TestCase):
def test_deletion(self):
PoemFormSet = inlineformset_factory(Poet, Poem, can_delete=True)
poet = Poet.objects.create(name='test')
poem = poet.poem_set.create(name='test poem')
data = {
'poem_set-TOTAL_FORMS': '1',
'poem_set-INITIAL_FORMS': '1',
'poem_set-MAX_NUM_FORMS': '0',
'poem_set-0-id': str(poem.pk),
'poem_set-0-poet': str(poet.pk),
'poem_set-0-name': 'test',
'poem_set-0-DELETE': 'on',
}
formset = PoemFormSet(data, instance=poet)
formset.save()
self.assertTrue(formset.is_valid())
self.assertEqual(Poem.objects.count(), 0)
def test_add_form_deletion_when_invalid(self):
"""
Make sure that an add form that is filled out, but marked for deletion
doesn't cause validation errors.
"""
PoemFormSet = inlineformset_factory(Poet, Poem, can_delete=True)
poet = Poet.objects.create(name='test')
data = {
'poem_set-TOTAL_FORMS': '1',
'poem_set-INITIAL_FORMS': '0',
'poem_set-MAX_NUM_FORMS': '0',
'poem_set-0-id': '',
'poem_set-0-poem': '1',
'poem_set-0-name': 'x' * 1000,
}
formset = PoemFormSet(data, instance=poet)
# Make sure this form doesn't pass validation.
self.assertEqual(formset.is_valid(), False)
self.assertEqual(Poem.objects.count(), 0)
# Then make sure that it *does* pass validation and delete the object,
# even though the data isn't actually valid.
data['poem_set-0-DELETE'] = 'on'
formset = PoemFormSet(data, instance=poet)
self.assertEqual(formset.is_valid(), True)
formset.save()
self.assertEqual(Poem.objects.count(), 0)
def test_change_form_deletion_when_invalid(self):
"""
Make sure that a change form that is filled out, but marked for deletion
doesn't cause validation errors.
"""
PoemFormSet = inlineformset_factory(Poet, Poem, can_delete=True)
poet = Poet.objects.create(name='test')
poem = poet.poem_set.create(name='test poem')
data = {
'poem_set-TOTAL_FORMS': '1',
'poem_set-INITIAL_FORMS': '1',
'poem_set-MAX_NUM_FORMS': '0',
'poem_set-0-id': unicode(poem.id),
'poem_set-0-poem': unicode(poem.id),
'poem_set-0-name': 'x' * 1000,
}
formset = PoemFormSet(data, instance=poet)
# Make sure this form doesn't pass validation.
self.assertEqual(formset.is_valid(), False)
self.assertEqual(Poem.objects.count(), 1)
# Then make sure that it *does* pass validation and delete the object,
# even though the data isn't actually valid.
data['poem_set-0-DELETE'] = 'on'
formset = PoemFormSet(data, instance=poet)
self.assertEqual(formset.is_valid(), True)
formset.save()
self.assertEqual(Poem.objects.count(), 0)
def test_save_new(self):
"""
Make sure inlineformsets respect commit=False
regression for #10750
"""
# exclude some required field from the forms
ChildFormSet = inlineformset_factory(School, Child, exclude=['father', 'mother'])
school = School.objects.create(name='test')
mother = Parent.objects.create(name='mother')
father = Parent.objects.create(name='father')
data = {
'child_set-TOTAL_FORMS': '1',
'child_set-INITIAL_FORMS': '0',
'child_set-MAX_NUM_FORMS': '0',
'child_set-0-name': 'child',
}
formset = ChildFormSet(data, instance=school)
self.assertEqual(formset.is_valid(), True)
objects = formset.save(commit=False)
for obj in objects:
obj.mother = mother
obj.father = father
obj.save()
self.assertEqual(school.child_set.count(), 1)
class InlineFormsetFactoryTest(TestCase):
def test_inline_formset_factory(self):
"""
These should both work without a problem.
"""
inlineformset_factory(Parent, Child, fk_name='mother')
inlineformset_factory(Parent, Child, fk_name='father')
def test_exception_on_unspecified_foreign_key(self):
"""
Child has two ForeignKeys to Parent, so if we don't specify which one
to use for the inline formset, we should get an exception.
"""
self.assertRaisesRegexp(Exception,
"<class 'regressiontests.inline_formsets.models.Child'> has more than 1 ForeignKey to <class 'regressiontests.inline_formsets.models.Parent'>",
inlineformset_factory, Parent, Child
)
def test_fk_name_not_foreign_key_field_from_child(self):
"""
If we specify fk_name, but it isn't a ForeignKey from the child model
to the parent model, we should get an exception.
"""
self.assertRaises(Exception,
"fk_name 'school' is not a ForeignKey to <class 'regressiontests.inline_formsets.models.Parent'>",
inlineformset_factory, Parent, Child, fk_name='school'
)
def test_non_foreign_key_field(self):
"""
If the field specified in fk_name is not a ForeignKey, we should get an
exception.
"""
self.assertRaisesRegexp(Exception,
"<class 'regressiontests.inline_formsets.models.Child'> has no field named 'test'",
inlineformset_factory, Parent, Child, fk_name='test'
)
def test_any_iterable_allowed_as_argument_to_exclude(self):
# Regression test for #9171.
inlineformset_factory(
Parent, Child, exclude=['school'], fk_name='mother'
)
inlineformset_factory(
Parent, Child, exclude=('school',), fk_name='mother'
)
| bsd-3-clause |
rouge8/pip | tests/functional/test_download.py | 1 | 21568 | import os.path
import shutil
import textwrap
import pytest
from pip._internal.cli.status_codes import ERROR
from tests.lib.path import Path
def fake_wheel(data, wheel_path):
shutil.copy(
data.packages.joinpath('simple.dist-0.1-py2.py3-none-any.whl'),
data.packages.joinpath(wheel_path),
)
@pytest.mark.network
def test_download_if_requested(script):
"""
It should download (in the scratch path) and not install if requested.
"""
result = script.pip(
'download', '-d', 'pip_downloads', 'INITools==0.1'
)
assert Path('scratch') / 'pip_downloads' / 'INITools-0.1.tar.gz' \
in result.files_created
assert script.site_packages / 'initools' not in result.files_created
@pytest.mark.network
def test_basic_download_setuptools(script):
"""
It should download (in the scratch path) and not install if requested.
"""
result = script.pip('download', 'setuptools')
setuptools_prefix = str(Path('scratch') / 'setuptools')
assert any(
path.startswith(setuptools_prefix) for path in result.files_created
)
def test_download_wheel(script, data):
"""
Test using "pip download" to download a *.whl archive.
"""
result = script.pip(
'download',
'--no-index',
'-f', data.packages,
'-d', '.', 'meta'
)
assert (
Path('scratch') / 'meta-1.0-py2.py3-none-any.whl'
in result.files_created
)
assert script.site_packages / 'piptestpackage' not in result.files_created
@pytest.mark.network
def test_single_download_from_requirements_file(script):
"""
It should support download (in the scratch path) from PyPI from a
requirements file
"""
script.scratch_path.joinpath("test-req.txt").write_text(textwrap.dedent("""
INITools==0.1
"""))
result = script.pip(
'download', '-r', script.scratch_path / 'test-req.txt', '-d', '.',
)
assert Path('scratch') / 'INITools-0.1.tar.gz' in result.files_created
assert script.site_packages / 'initools' not in result.files_created
@pytest.mark.network
def test_basic_download_should_download_dependencies(script):
"""
It should download dependencies (in the scratch path)
"""
result = script.pip(
'download', 'Paste[openid]==1.7.5.1', '-d', '.'
)
assert Path('scratch') / 'Paste-1.7.5.1.tar.gz' in result.files_created
openid_tarball_prefix = str(Path('scratch') / 'python-openid-')
assert any(
path.startswith(openid_tarball_prefix) for path in result.files_created
)
assert script.site_packages / 'openid' not in result.files_created
def test_download_wheel_archive(script, data):
"""
It should download a wheel archive path
"""
wheel_filename = 'colander-0.9.9-py2.py3-none-any.whl'
wheel_path = '/'.join((data.find_links, wheel_filename))
result = script.pip(
'download', wheel_path,
'-d', '.', '--no-deps'
)
assert Path('scratch') / wheel_filename in result.files_created
def test_download_should_download_wheel_deps(script, data):
"""
It should download dependencies for wheels(in the scratch path)
"""
wheel_filename = 'colander-0.9.9-py2.py3-none-any.whl'
dep_filename = 'translationstring-1.1.tar.gz'
wheel_path = '/'.join((data.find_links, wheel_filename))
result = script.pip(
'download', wheel_path,
'-d', '.', '--find-links', data.find_links, '--no-index'
)
assert Path('scratch') / wheel_filename in result.files_created
assert Path('scratch') / dep_filename in result.files_created
@pytest.mark.network
def test_download_should_skip_existing_files(script):
"""
It should not download files already existing in the scratch dir
"""
script.scratch_path.joinpath("test-req.txt").write_text(textwrap.dedent("""
INITools==0.1
"""))
result = script.pip(
'download', '-r', script.scratch_path / 'test-req.txt', '-d', '.',
)
assert Path('scratch') / 'INITools-0.1.tar.gz' in result.files_created
assert script.site_packages / 'initools' not in result.files_created
# adding second package to test-req.txt
script.scratch_path.joinpath("test-req.txt").write_text(textwrap.dedent("""
INITools==0.1
python-openid==2.2.5
"""))
# only the second package should be downloaded
result = script.pip(
'download', '-r', script.scratch_path / 'test-req.txt', '-d', '.',
)
openid_tarball_prefix = str(Path('scratch') / 'python-openid-')
assert any(
path.startswith(openid_tarball_prefix) for path in result.files_created
)
assert Path('scratch') / 'INITools-0.1.tar.gz' not in result.files_created
assert script.site_packages / 'initools' not in result.files_created
assert script.site_packages / 'openid' not in result.files_created
@pytest.mark.network
def test_download_vcs_link(script):
"""
It should allow -d flag for vcs links, regression test for issue #798.
"""
result = script.pip(
'download', '-d', '.', 'git+git://github.com/pypa/pip-test-package.git'
)
assert (
Path('scratch') / 'pip-test-package-0.1.1.zip'
in result.files_created
)
assert script.site_packages / 'piptestpackage' not in result.files_created
def test_only_binary_set_then_download_specific_platform(script, data):
"""
Confirm that specifying an interpreter/platform constraint
is allowed when ``--only-binary=:all:`` is set.
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', 'linux_x86_64',
'fake'
)
assert (
Path('scratch') / 'fake-1.0-py2.py3-none-any.whl'
in result.files_created
)
def test_no_deps_set_then_download_specific_platform(script, data):
"""
Confirm that specifying an interpreter/platform constraint
is allowed when ``--no-deps`` is set.
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--no-deps',
'--dest', '.',
'--platform', 'linux_x86_64',
'fake'
)
assert (
Path('scratch') / 'fake-1.0-py2.py3-none-any.whl'
in result.files_created
)
def test_download_specific_platform_fails(script, data):
"""
Confirm that specifying an interpreter/platform constraint
enforces that ``--no-deps`` or ``--only-binary=:all:`` is set.
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--dest', '.',
'--platform', 'linux_x86_64',
'fake',
expect_error=True,
)
assert '--only-binary=:all:' in result.stderr
def test_no_binary_set_then_download_specific_platform_fails(script, data):
"""
Confirm that specifying an interpreter/platform constraint
enforces that ``--only-binary=:all:`` is set without ``--no-binary``.
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--no-binary=fake',
'--dest', '.',
'--platform', 'linux_x86_64',
'fake',
expect_error=True,
)
assert '--only-binary=:all:' in result.stderr
def test_download_specify_platform(script, data):
"""
Test using "pip download --platform" to download a .whl archive
supported for a specific platform
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
# Confirm that universal wheels are returned even for specific
# platforms.
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', 'linux_x86_64',
'fake'
)
assert (
Path('scratch') / 'fake-1.0-py2.py3-none-any.whl'
in result.files_created
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', 'macosx_10_9_x86_64',
'fake'
)
data.reset()
fake_wheel(data, 'fake-1.0-py2.py3-none-macosx_10_9_x86_64.whl')
fake_wheel(data, 'fake-2.0-py2.py3-none-linux_x86_64.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', 'macosx_10_10_x86_64',
'fake'
)
assert (
Path('scratch') /
'fake-1.0-py2.py3-none-macosx_10_9_x86_64.whl'
in result.files_created
)
# OSX platform wheels are not backward-compatible.
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', 'macosx_10_8_x86_64',
'fake',
expect_error=True,
)
# No linux wheel provided for this version.
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', 'linux_x86_64',
'fake==1',
expect_error=True,
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', 'linux_x86_64',
'fake==2'
)
assert (
Path('scratch') / 'fake-2.0-py2.py3-none-linux_x86_64.whl'
in result.files_created
)
class TestDownloadPlatformManylinuxes(object):
"""
"pip download --platform" downloads a .whl archive supported for
manylinux platforms.
"""
@pytest.mark.parametrize("platform", [
"linux_x86_64",
"manylinux1_x86_64",
"manylinux2010_x86_64",
])
def test_download_universal(self, platform, script, data):
"""
Universal wheels are returned even for specific platforms.
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', platform,
'fake',
)
assert (
Path('scratch') / 'fake-1.0-py2.py3-none-any.whl'
in result.files_created
)
@pytest.mark.parametrize("wheel_abi,platform", [
("manylinux1_x86_64", "manylinux1_x86_64"),
("manylinux1_x86_64", "manylinux2010_x86_64"),
("manylinux2010_x86_64", "manylinux2010_x86_64"),
])
def test_download_compatible_manylinuxes(
self, wheel_abi, platform, script, data,
):
"""
Earlier manylinuxes are compatible with later manylinuxes.
"""
wheel = 'fake-1.0-py2.py3-none-{}.whl'.format(wheel_abi)
fake_wheel(data, wheel)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', platform,
'fake',
)
assert Path('scratch') / wheel in result.files_created
def test_explicit_platform_only(self, data, script):
"""
When specifying the platform, manylinux1 needs to be the
explicit platform--it won't ever be added to the compatible
tags.
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-linux_x86_64.whl')
script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', 'linux_x86_64',
'fake',
)
def test_download__python_version(script, data):
"""
Test using "pip download --python-version" to download a .whl archive
supported for a specific interpreter
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--python-version', '2',
'fake'
)
assert (
Path('scratch') / 'fake-1.0-py2.py3-none-any.whl'
in result.files_created
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--python-version', '3',
'fake'
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--python-version', '27',
'fake'
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--python-version', '33',
'fake'
)
data.reset()
fake_wheel(data, 'fake-1.0-py2-none-any.whl')
fake_wheel(data, 'fake-2.0-py3-none-any.whl')
# No py3 provided for version 1.
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--python-version', '3',
'fake==1.0',
expect_error=True,
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--python-version', '2',
'fake'
)
assert (
Path('scratch') / 'fake-1.0-py2-none-any.whl'
in result.files_created
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--python-version', '26',
'fake'
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--python-version', '3',
'fake'
)
assert (
Path('scratch') / 'fake-2.0-py3-none-any.whl'
in result.files_created
)
def make_wheel_with_python_requires(script, package_name, python_requires):
"""
Create a wheel using the given python_requires.
:return: the path to the wheel file.
"""
package_dir = script.scratch_path / package_name
package_dir.mkdir()
text = textwrap.dedent("""\
from setuptools import setup
setup(name='{}',
python_requires='{}',
version='1.0')
""").format(package_name, python_requires)
package_dir.joinpath('setup.py').write_text(text)
script.run(
'python', 'setup.py', 'bdist_wheel', '--universal', cwd=package_dir,
)
file_name = '{}-1.0-py2.py3-none-any.whl'.format(package_name)
return package_dir / 'dist' / file_name
def test_download__python_version_used_for_python_requires(
script, data, with_wheel,
):
"""
Test that --python-version is used for the Requires-Python check.
"""
wheel_path = make_wheel_with_python_requires(
script, 'mypackage', python_requires='==3.2',
)
wheel_dir = os.path.dirname(wheel_path)
def make_args(python_version):
return [
'download', '--no-index', '--find-links', wheel_dir,
'--only-binary=:all:',
'--dest', '.',
'--python-version', python_version,
'mypackage==1.0',
]
args = make_args('33')
result = script.pip(*args, expect_error=True)
expected_err = (
"ERROR: Package 'mypackage' requires a different Python: "
"3.3.0 not in '==3.2'"
)
assert expected_err in result.stderr, 'stderr: {}'.format(result.stderr)
# Now try with a --python-version that satisfies the Requires-Python.
args = make_args('32')
script.pip(*args) # no exception
def test_download_specify_abi(script, data):
"""
Test using "pip download --abi" to download a .whl archive
supported for a specific abi
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--implementation', 'fk',
'--abi', 'fake_abi',
'fake'
)
assert (
Path('scratch') / 'fake-1.0-py2.py3-none-any.whl'
in result.files_created
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--implementation', 'fk',
'--abi', 'none',
'fake'
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--abi', 'cp27m',
'fake',
)
data.reset()
fake_wheel(data, 'fake-1.0-fk2-fakeabi-fake_platform.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--python-version', '2',
'--implementation', 'fk',
'--platform', 'fake_platform',
'--abi', 'fakeabi',
'fake'
)
assert (
Path('scratch') / 'fake-1.0-fk2-fakeabi-fake_platform.whl'
in result.files_created
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--implementation', 'fk',
'--platform', 'fake_platform',
'--abi', 'none',
'fake',
expect_error=True,
)
def test_download_specify_implementation(script, data):
"""
Test using "pip download --abi" to download a .whl archive
supported for a specific abi
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--implementation', 'fk',
'fake'
)
assert (
Path('scratch') / 'fake-1.0-py2.py3-none-any.whl'
in result.files_created
)
data.reset()
fake_wheel(data, 'fake-1.0-fk2.fk3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--implementation', 'fk',
'fake'
)
assert (
Path('scratch') / 'fake-1.0-fk2.fk3-none-any.whl'
in result.files_created
)
data.reset()
fake_wheel(data, 'fake-1.0-fk3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--implementation', 'fk',
'--python-version', '3',
'fake'
)
assert (
Path('scratch') / 'fake-1.0-fk3-none-any.whl'
in result.files_created
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--implementation', 'fk',
'--python-version', '2',
'fake',
expect_error=True,
)
def test_download_exit_status_code_when_no_requirements(script):
"""
Test download exit status code when no requirements specified
"""
result = script.pip('download', expect_error=True)
assert (
"You must give at least one requirement to download" in result.stderr
)
assert result.returncode == ERROR
def test_download_exit_status_code_when_blank_requirements_file(script):
"""
Test download exit status code when blank requirements file specified
"""
script.scratch_path.joinpath("blank.txt").write_text("\n")
script.pip('download', '-r', 'blank.txt')
def test_download_prefer_binary_when_tarball_higher_than_wheel(script, data):
fake_wheel(data, 'source-0.8-py2.py3-none-any.whl')
result = script.pip(
'download',
'--prefer-binary',
'--no-index',
'-f', data.packages,
'-d', '.', 'source'
)
assert (
Path('scratch') / 'source-0.8-py2.py3-none-any.whl'
in result.files_created
)
assert (
Path('scratch') / 'source-1.0.tar.gz'
not in result.files_created
)
def test_download_prefer_binary_when_wheel_doesnt_satisfy_req(script, data):
fake_wheel(data, 'source-0.8-py2.py3-none-any.whl')
script.scratch_path.joinpath("test-req.txt").write_text(textwrap.dedent("""
source>0.9
"""))
result = script.pip(
'download',
'--prefer-binary',
'--no-index',
'-f', data.packages,
'-d', '.',
'-r', script.scratch_path / 'test-req.txt'
)
assert (
Path('scratch') / 'source-1.0.tar.gz'
in result.files_created
)
assert (
Path('scratch') / 'source-0.8-py2.py3-none-any.whl'
not in result.files_created
)
def test_download_prefer_binary_when_only_tarball_exists(script, data):
result = script.pip(
'download',
'--prefer-binary',
'--no-index',
'-f', data.packages,
'-d', '.', 'source'
)
assert (
Path('scratch') / 'source-1.0.tar.gz'
in result.files_created
)
| mit |
TensorVision/MediSeg | eval.py | 1 | 3028 | """Evaluation of the Model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import math
import time
import tensorflow.python.platform
from tensorflow.python.platform import gfile
import numpy as np
import tensorflow as tf
import os
import utils
import logging
import sys
import imp
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.DEBUG,
stream=sys.stdout)
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('eval_data', 'test',
"""Either 'test' or 'train_eval'.""")
# TODO: Iterate over all possible Values
# Write Values to Tensorboard
def evaluate(train_dir):
"""Loads the model and runs evaluation
"""
target_dir = os.path.join(train_dir, "model_files")
params = imp.load_source("params", os.path.join(target_dir, "params.py"))
data_input = imp.load_source("input", os.path.join(target_dir, "input.py"))
network = imp.load_source("network", os.path.join(target_dir, "network.py"))
with tf.Graph().as_default():
# Retrieve images and labels
eval_data = FLAGS.eval_data == 'test'
images, labels = data_input.inputs(eval_data=eval_data, data_dir=utils.cfg.data_dir,
batch_size=params.batch_size)
# Generate placeholders for the images and labels.
keep_prob = utils.placeholder_inputs(params.batch_size)
# Build a Graph that computes predictions from the inference model.
logits = network.inference(images, keep_prob)
# Add to the Graph the Ops for loss calculation.
loss = network.loss(logits, labels)
# Calculate predictions.
top_k_op = tf.nn.in_top_k(logits, labels, 1)
# Add the Op to compare the logits to the labels during evaluation.
eval_correct = network.evaluation(logits, labels)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.merge_all_summaries()
# Create a saver for writing training checkpoints.
saver = tf.train.Saver()
# Create a session for running Ops on the Graph.
sess = tf.Session()
# Run the Op to initialize the variables.
init = tf.initialize_all_variables()
sess.run(init)
# Start the queue runners.
tf.train.start_queue_runners(sess=sess)
ckpt = tf.train.get_checkpoint_state(train_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
else:
print("No checkpoints found! ")
exit(1)
print("Doing Evaluation with lots of data")
utils.do_eval(sess=sess,
eval_correct=eval_correct,
keep_prob=keep_prob,
num_examples=params.num_examples_per_epoch_for_eval,
params=params,
name="eval")
def main(_):
train_dir = utils.get_train_dir()
evaluate(train_dir)
if __name__ == '__main__':
tf.app.run()
| mit |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.