repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
aloeffler/linux308
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py
|
12980
|
5411
|
# SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <[email protected]>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
|
gpl-2.0
|
mibanescu/pulp
|
server/test/unit/server/agent/test_connector.py
|
15
|
1197
|
from unittest import TestCase
from mock import patch
from pulp.server.agent.connector import get_url, add_connector
messaging = {
'url': 'atlantis',
'cacert': '/path/ca',
'clientcert': '/path/cert',
'transport': 'monkey'
}
conf = {
'messaging': messaging
}
class Config(object):
@staticmethod
def get(section, _property):
return conf[section][_property]
class TestConnector(TestCase):
@patch('pulp.server.agent.connector.config', Config)
def test_get_url(self):
url = messaging['url']
adapter = messaging['transport']
self.assertEqual('+'.join((adapter, url)), get_url())
@patch('pulp.server.agent.connector.Connector')
@patch('pulp.server.agent.connector.get_url')
@patch('pulp.server.agent.connector.config', Config)
def test_add_connector(self, _get_url, _connector):
add_connector()
_connector.assert_called_with(_get_url.return_value)
_connector.return_value.add.assert_called_with()
self.assertEqual(_connector.return_value.ssl.ca_certificate, messaging['cacert'])
self.assertEqual(_connector.return_value.ssl.client_certificate, messaging['clientcert'])
|
gpl-2.0
|
adaptivelogic/django-cms
|
cms/test_utils/project/placeholderapp/admin.py
|
5
|
2712
|
from cms.admin.placeholderadmin import PlaceholderAdmin
from cms.test_utils.project.placeholderapp.models import (Example1, Example2,
Example3, Example4, Example5)
from django.contrib import admin
class MixinAdmin(admin.ModelAdmin):
def formfield_for_dbfield(self, db_field, **kwargs):
"""
Hook for specifying the form Field instance for a given database Field
instance.
If kwargs are given, they're passed to the form Field's constructor.
"""
# silly test that placeholderadmin doesn't fuck stuff up
request = kwargs.pop('request', None)
return super(MixinAdmin, self).formfield_for_dbfield(db_field, request=request, **kwargs)
class Example1Admin(PlaceholderAdmin, MixinAdmin):
pass
class Example2Admin(PlaceholderAdmin):
fieldsets = (
('Placeholder + more fields', {
'classes': ('wide',),
'fields': ('char_1', 'placeholder', 'char_2',)
}),
('Other fields', {
'classes': ('wide',),
'fields': ('char_3', 'char_4',)
}),
)
class Example3Admin(PlaceholderAdmin):
fieldsets = (
('Only chars', {
'classes': ('wide',),
'fields': ('char_1', 'char_2',)
}),
(u'Only Placeholder with rigth classes', {
'classes': ('plugin-holder', 'plugin-holder-nopage',),
'fields': ('placeholder',)
}),
('Only chars', {
'classes': ('wide',),
'fields': ('char_3', 'char_4',)
}),
)
class Example4Admin(PlaceholderAdmin):
fieldsets = (
('Only chars', {
'classes': ('wide',),
'fields': ('char_1', 'char_2',)
}),
(u'Only Placeholder, with wrong classes', {
'classes': ('wide', 'plugin-holder-nopage',),
'fields': ('placeholder',)
}),
('Only chars', {
'classes': ('wide',),
'fields': ('char_3', 'char_4',)
}),
)
class Example5Admin(PlaceholderAdmin):
fieldsets = (
('Only chars', {
'classes': ('wide',),
'fields': ('char_1', 'char_2',)
}),
(u'Two Placeholder, with right classes', {
'classes': ('plugin', 'plugin-holder-nopage',),
'fields': ('placeholder_1', 'placeholder_2',)
}),
('Only chars', {
'classes': ('wide',),
'fields': ('char_3', 'char_4',)
}),
)
admin.site.register(Example1, Example1Admin)
admin.site.register(Example2, Example2Admin)
admin.site.register(Example3, Example3Admin)
admin.site.register(Example4, Example4Admin)
admin.site.register(Example5, Example5Admin)
|
bsd-3-clause
|
idrogeno/FusionOE
|
lib/python/Components/Renderer/FrontpanelLed.py
|
51
|
1163
|
from Components.Element import Element
from os import path
# this is not a GUI renderer.
class FrontpanelLed(Element):
def __init__(self, which=0, patterns=None, boolean=True):
if not patterns: patterns = [(20, 0, 0xffffffff), (20, 0x55555555, 0x84fc8c04)]
self.which = which
self.boolean = boolean
self.patterns = patterns
Element.__init__(self)
def changed(self, *args, **kwargs):
if self.boolean:
val = self.source.boolean and 0 or 1
else:
val = self.source.value
(speed, pattern, pattern_4bit) = self.patterns[val]
if path.exists("/proc/stb/fp/led%d_pattern" % self.which):
f = open("/proc/stb/fp/led%d_pattern" % self.which, "w")
f.write("%08x" % pattern)
f.close()
if self.which == 0:
if path.exists("/proc/stb/fp/led_set_pattern"):
f = open("/proc/stb/fp/led_set_pattern", "w")
f.write("%08x" % pattern_4bit)
f.close()
if path.exists("/proc/stb/fp/led_set_speed"):
f = open("/proc/stb/fp/led_set_speed", "w")
f.write("%d" % speed)
f.close()
if path.exists("/proc/stb/fp/led_pattern_speed"):
f = open("/proc/stb/fp/led_pattern_speed", "w")
f.write("%d" % speed)
f.close()
|
gpl-2.0
|
uchuugaka/anaconda
|
anaconda_lib/ioloop.py
|
5
|
9224
|
# -*- coding: utf8 -*-
# Copyright (C) 2013 - Oscar Campos <[email protected]>
# This program is Free Software see LICENSE file for details
"""
Minimalist asynchronous network library just to fit Anaconda's needs and
replace the horrible asyncore/asynchat
Example of usage:
import ioloop
class TestClient(ioloop.EventHandler):
'''Client for test
'''
def __init__(self, host, port):
ioloop.EventHandler.__init__(self, (host, port))
self.message = []
def ready_to_write(self):
return True if self.outbuffer else False
def handle_read(self, data):
self.message.append(data)
def process_message(self):
print(b''.join(self.message))
self.message = []
"""
import os
import sys
import time
import errno
import socket
import select
import logging
import traceback
import threading
NOT_TERMINATE = True
class IOHandlers(object):
"""Class that register and unregister IOHandler
"""
_shared_state = {}
def __init__(self):
self.__dict__ = IOHandlers._shared_state
if hasattr(self, 'instanced') and self.instanced is True:
return
self._handler_pool = {}
self._lock = threading.Lock()
self.instanced = True
def ready_to_read(self):
"""Return back all the handlers that are ready to read
"""
return [h for h in self._handler_pool.values() if h.ready_to_read()]
def ready_to_write(self):
"""Return back all the handlers that are ready to write
"""
return [h for h in self._handler_pool.values() if h.ready_to_write()]
def register(self, handler):
"""Register a new handler
"""
logging.info(
'Registering handler with address {}'.format(handler.address))
with self._lock:
if handler.fileno() not in self._handler_pool:
self._handler_pool.update({handler.fileno(): handler})
def unregister(self, handler):
"""Unregister the given handler
"""
with self._lock:
if handler.fileno() in self._handler_pool:
self._handler_pool.pop(handler.fileno())
class EventHandler(object):
"""Event handler class
"""
def __init__(self, address, sock=None):
self._write_lock = threading.RLock()
self._read_lock = threading.RLock()
self.address = address
self.outbuffer = b''
self.inbuffer = b''
if sock is None:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect(address)
self.connected = True
self.sock.setblocking(False)
IOHandlers().register(self)
def __del__(self):
if self in IOHandlers()._handler_pool.values():
IOHandlers().unregister(self)
def fileno(self):
"""Return the associated file descriptor
"""
return self.sock.fileno()
def send(self):
"""Send outgoing data
"""
with self._write_lock:
while len(self.outbuffer) > 0:
try:
sent = self.sock.send(self.outbuffer)
self.outbuffer = self.outbuffer[sent:]
except socket.error as error:
if error.args[0] == errno.EAGAIN:
time.sleep(0.1)
elif error.args[0] in (
errno.ECONNRESET, errno.ENOTCONN, errno.ESHUTDOWN,
errno.ECONNABORTED, errno.EPIPE
):
self.close()
return 0
elif os.name == 'posix':
# Windows doesn't seems to have EBADFD
if sys.platform == 'darwin':
# OS X uses EBADF as EBADFD. why? no idea asks Tim
if error.args[0] == errno.EBADF:
self.close()
return 0
else:
if error.args[0] == errno.EBADFD:
self.close()
return 0
raise
else:
raise
def recv(self):
"""Receive some data
"""
try:
data = self.sock.recv(4096)
except socket.error as error:
if error.args[0] in (errno.EWOULDBLOCK, errno.EAGAIN):
return None
elif error.args[0] == errno.ECONNRESET:
self.close()
return None
else:
raise
if not data:
self.close()
return None
self.inbuffer += data
while self.inbuffer:
match = b'\r\n'
index = self.inbuffer.find(match)
if index != -1:
if index > 0:
self.handle_read(self.inbuffer[:index])
self.inbuffer = self.inbuffer[index+len(match):]
self.process_message()
else:
index = len(match) - 1
while index and not self.inbuffer.endswith(match[:index]):
index -= 1
if index:
if index != len(self.inbuffer):
self.handle_read(self.inbuffer[:-index])
self.inbuffer = self.inbuffer[-index:]
break
else:
self.handle_read(self.inbuffer)
self.inbuffer = b''
def push(self, data):
"""Push some bytes into the write buffer
"""
self.outbuffer += data
def handle_read(self, data):
"""Handle data readign from select
"""
raise RuntimeError('You have to implement this method')
def process_message(self):
"""Process the full message
"""
raise RuntimeError('You have to implement this method')
def ready_to_read(self):
"""This handler is ready to read
"""
return True
def ready_to_write(self):
"""This handler is ready to write
"""
return True
def close(self):
"""Close the socket and unregister the handler
"""
if self in IOHandlers()._handler_pool.values():
IOHandlers().unregister(self)
self.sock.close()
self.connected = False
def poll():
"""Poll the select
"""
recv = send = []
try:
if os.name != 'posix':
if IOHandlers()._handler_pool:
recv, send, _ = select.select(
IOHandlers().ready_to_read(),
IOHandlers().ready_to_write(),
[], 0
)
else:
recv, send, _ = select.select(
IOHandlers().ready_to_read(), IOHandlers().ready_to_write(),
[], 0
)
except select.error:
err = sys.exc_info()[1]
if err.args[0] == errno.EINTR:
return
raise
for handler in recv:
if handler is None or handler.ready_to_read() is not True:
continue
handler.recv()
for handler in send:
if handler is None or handler.ready_to_write() is not True:
continue
handler.send()
def loop():
"""Main event loop
"""
def restart_poll(error):
logging.error(
'Unhandled exception in poll, restarting the poll request')
logging.error(error)
for traceback_line in traceback.format_exc().splitlines():
logging.error(traceback_line)
with IOHandlers()._lock:
for handler in IOHandlers()._handler_pool.values():
handler.close()
IOHandlers()._handler_pool = {}
def inner_loop():
while NOT_TERMINATE:
try:
poll()
time.sleep(0.01)
except OSError as error:
if os.name != 'posix' and error.errno == os.errno.WSAENOTSOCK:
msg = (
'Unfortunately, the Windows socket is in inconsistent'
' state, restart your sublime text 3. If the problem '
'persist, fill an issue report on:'
' https://github.com/DamnWidget/anaconda/issues'
)
logging.error(msg)
import sublime
sublime.error_message(msg)
terminate()
else:
restart_poll(error)
except Exception as error:
restart_poll(error)
# cleanup
for handler in IOHandlers()._handler_pool.values():
handler.close()
threading.Thread(target=inner_loop).start()
def terminate():
"""Terminate the loop
"""
global NOT_TERMINATE
NOT_TERMINATE = False
def restart():
"""Restart the loop
"""
global NOT_TERMINATE
if NOT_TERMINATE is True:
NOT_TERMINATE = False
terminate()
NOT_TERMINATE = True
loop()
|
gpl-3.0
|
guptaankita/python-novaclient
|
novaclient/v2/contrib/host_evacuate_live.py
|
2
|
2475
|
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from novaclient.i18n import _
from novaclient.openstack.common import cliutils
from novaclient import utils
def _server_live_migrate(cs, server, args):
class HostEvacuateLiveResponse(object):
def __init__(self, server_uuid, live_migration_accepted,
error_message):
self.server_uuid = server_uuid
self.live_migration_accepted = live_migration_accepted
self.error_message = error_message
success = True
error_message = ""
try:
cs.servers.live_migrate(server['uuid'], args.target_host,
args.block_migrate, args.disk_over_commit)
except Exception as e:
success = False
error_message = _("Error while live migrating instance: %s") % e
return HostEvacuateLiveResponse(server['uuid'],
success,
error_message)
@cliutils.arg('host', metavar='<host>', help='Name of host.')
@cliutils.arg(
'--target-host',
metavar='<target_host>',
default=None,
help=_('Name of target host.'))
@cliutils.arg(
'--block-migrate',
action='store_true',
default=False,
help=_('Enable block migration.'))
@cliutils.arg(
'--disk-over-commit',
action='store_true',
default=False,
help=_('Enable disk overcommit.'))
def do_host_evacuate_live(cs, args):
"""Live migrate all instances of the specified host
to other available hosts.
"""
hypervisors = cs.hypervisors.search(args.host, servers=True)
response = []
for hyper in hypervisors:
for server in getattr(hyper, 'servers', []):
response.append(_server_live_migrate(cs, server, args))
utils.print_list(response, ["Server UUID", "Live Migration Accepted",
"Error Message"])
|
apache-2.0
|
Bysmyyr/chromium-crosswalk
|
tools/telemetry/third_party/gsutilz/third_party/boto/setup.py
|
93
|
4874
|
#!/usr/bin/env python
# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from __future__ import print_function
try:
from setuptools import setup
extra = dict(test_suite="tests.test.suite", include_package_data=True)
except ImportError:
from distutils.core import setup
extra = {}
import sys
from boto import __version__
if sys.version_info <= (2, 5):
error = "ERROR: boto requires Python Version 2.6 or above...exiting."
print(error, file=sys.stderr)
sys.exit(1)
def readme():
with open("README.rst") as f:
return f.read()
setup(name = "boto",
version = __version__,
description = "Amazon Web Services Library",
long_description = readme(),
author = "Mitch Garnaat",
author_email = "[email protected]",
scripts = ["bin/sdbadmin", "bin/elbadmin", "bin/cfadmin",
"bin/s3put", "bin/fetch_file", "bin/launch_instance",
"bin/list_instances", "bin/taskadmin", "bin/kill_instance",
"bin/bundle_image", "bin/pyami_sendmail", "bin/lss3",
"bin/cq", "bin/route53", "bin/cwutil", "bin/instance_events",
"bin/asadmin", "bin/glacier", "bin/mturk",
"bin/dynamodb_dump", "bin/dynamodb_load"],
url = "https://github.com/boto/boto/",
packages = ["boto", "boto.sqs", "boto.s3", "boto.gs", "boto.file",
"boto.ec2", "boto.ec2.cloudwatch", "boto.ec2.autoscale",
"boto.ec2.elb", "boto.sdb", "boto.cacerts",
"boto.sdb.db", "boto.sdb.db.manager",
"boto.mturk", "boto.pyami",
"boto.pyami.installers", "boto.pyami.installers.ubuntu",
"boto.mashups", "boto.contrib", "boto.manage",
"boto.services", "boto.cloudfront",
"boto.roboto", "boto.rds", "boto.vpc", "boto.fps",
"boto.fps", "boto.emr", "boto.emr", "boto.sns",
"boto.ecs", "boto.iam", "boto.route53", "boto.ses",
"boto.cloudformation", "boto.sts", "boto.dynamodb",
"boto.swf", "boto.mws", "boto.cloudsearch", "boto.glacier",
"boto.beanstalk", "boto.datapipeline", "boto.elasticache",
"boto.elastictranscoder", "boto.opsworks", "boto.redshift",
"boto.dynamodb2", "boto.support", "boto.cloudtrail",
"boto.directconnect", "boto.kinesis", "boto.rds2",
"boto.cloudsearch2", "boto.logs", "boto.vendored",
"boto.route53.domains", "boto.cognito",
"boto.cognito.identity", "boto.cognito.sync",
"boto.cloudsearchdomain", "boto.kms",
"boto.awslambda", "boto.codedeploy", "boto.configservice",
"boto.cloudhsm", "boto.ec2containerservice",
"boto.machinelearning"],
package_data = {
"boto.cacerts": ["cacerts.txt"],
"boto": ["endpoints.json"],
},
license = "MIT",
platforms = "Posix; MacOS X; Windows",
classifiers = ["Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Internet",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4"],
**extra
)
|
bsd-3-clause
|
antb/TPT----My-old-mod
|
src/python/stdlib/io.py
|
191
|
3624
|
"""The io module provides the Python interfaces to stream handling. The
builtin open function is defined in this module.
At the top of the I/O hierarchy is the abstract base class IOBase. It
defines the basic interface to a stream. Note, however, that there is no
separation between reading and writing to streams; implementations are
allowed to throw an IOError if they do not support a given operation.
Extending IOBase is RawIOBase which deals simply with the reading and
writing of raw bytes to a stream. FileIO subclasses RawIOBase to provide
an interface to OS files.
BufferedIOBase deals with buffering on a raw byte stream (RawIOBase). Its
subclasses, BufferedWriter, BufferedReader, and BufferedRWPair buffer
streams that are readable, writable, and both respectively.
BufferedRandom provides a buffered interface to random access
streams. BytesIO is a simple stream of in-memory bytes.
Another IOBase subclass, TextIOBase, deals with the encoding and decoding
of streams into text. TextIOWrapper, which extends it, is a buffered text
interface to a buffered raw stream (`BufferedIOBase`). Finally, StringIO
is a in-memory stream for text.
Argument names are not part of the specification, and only the arguments
of open() are intended to be used as keyword arguments.
data:
DEFAULT_BUFFER_SIZE
An int containing the default buffer size used by the module's buffered
I/O classes. open() uses the file's blksize (as obtained by os.stat) if
possible.
"""
# New I/O library conforming to PEP 3116.
# XXX edge cases when switching between reading/writing
# XXX need to support 1 meaning line-buffered
# XXX whenever an argument is None, use the default value
# XXX read/write ops should check readable/writable
# XXX buffered readinto should work with arbitrary buffer objects
# XXX use incremental encoder for text output, at least for UTF-16 and UTF-8-SIG
# XXX check writable, readable and seekable in appropriate places
__author__ = ("Guido van Rossum <[email protected]>, "
"Mike Verdone <[email protected]>, "
"Mark Russell <[email protected]>, "
"Antoine Pitrou <[email protected]>, "
"Amaury Forgeot d'Arc <[email protected]>, "
"Benjamin Peterson <[email protected]>")
__all__ = ["BlockingIOError", "open", "IOBase", "RawIOBase", "FileIO",
"BytesIO", "StringIO", "BufferedIOBase",
"BufferedReader", "BufferedWriter", "BufferedRWPair",
"BufferedRandom", "TextIOBase", "TextIOWrapper",
"UnsupportedOperation", "SEEK_SET", "SEEK_CUR", "SEEK_END"]
import _io
import abc
from _io import (DEFAULT_BUFFER_SIZE, BlockingIOError, UnsupportedOperation,
open, FileIO, BytesIO, StringIO, BufferedReader,
BufferedWriter, BufferedRWPair, BufferedRandom,
IncrementalNewlineDecoder, TextIOWrapper)
OpenWrapper = _io.open # for compatibility with _pyio
# for seek()
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
# Declaring ABCs in C is tricky so we do it here.
# Method descriptions and default implementations are inherited from the C
# version however.
class IOBase(_io._IOBase):
__metaclass__ = abc.ABCMeta
class RawIOBase(_io._RawIOBase, IOBase):
pass
class BufferedIOBase(_io._BufferedIOBase, IOBase):
pass
class TextIOBase(_io._TextIOBase, IOBase):
pass
RawIOBase.register(FileIO)
for klass in (BytesIO, BufferedReader, BufferedWriter, BufferedRandom,
BufferedRWPair):
BufferedIOBase.register(klass)
for klass in (StringIO, TextIOWrapper):
TextIOBase.register(klass)
del klass
|
gpl-2.0
|
FranMachio/Plugin.Video.Fran.machio
|
servers/filebox.py
|
35
|
3840
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para filebox
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os
from core import scrapertools
from core import logger
from core import config
def test_video_exists( page_url ):
logger.info("[streamcloud.py] test_video_exists(page_url='%s')" % page_url)
data = scrapertools.cache_page( url = page_url )
if "<b>File Not Found</b>" in data:
return False,"El archivo no existe<br/>en filebox o ha sido borrado."
else:
return True,""
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
logger.info("[filebox.py] get_video_url(page_url='%s')" % page_url)
video_urls = []
'''
<input type="hidden" name="op" value="download2">
<input type="hidden" name="id" value="235812b1j9w1">
<input type="hidden" name="rand" value="na73zeeooqyfkndsv4uxzzpbajwi6mhbmixtogi">
<input type="hidden" name="referer" value="http://www.seriesyonkis.com/s/ngo/2/5/1/8/773">
'''
logger.info("[filebox.py] URL ")
data = scrapertools.cache_page(page_url)
# Espera los 5 segundos
try:
from platformcode.xbmc import xbmctools
xbmctools.handle_wait(5,"filebox",'')
except:
import time
time.sleep(5)
codigo = scrapertools.get_match(data,'<input type="hidden" name="id" value="([^"]+)">[^<]+')
rand = scrapertools.get_match(data,'<input type="hidden" name="rand" value="([^"]+)">')
#op=download2&id=xuquejiv6xdf&rand=r6dq7hn7so2ygpnxv2zg2i3cu3sbdsunf57gtni&referer=&method_free=&method_premium=&down_direct=1
post = "op=download2&id="+codigo+"&rand="+rand+"&referer=&method_free=&method_premium=&down_direct=1"
data = scrapertools.cache_page( page_url , post=post, headers=[['User-Agent','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14'],['Referer',page_url]] )
logger.info("data="+data)
media_url = scrapertools.get_match(data,"this.play\('([^']+)'")
video_urls.append( [ scrapertools.get_filename_from_url(media_url)[-4:]+" [filebox]",media_url])
for video_url in video_urls:
logger.info("[filebox.py] %s - %s" % (video_url[0],video_url[1]))
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
# http://www.filebox.com/embed-wa5p8wzh7tlq-700x385.html
patronvideos = 'filebox.com/embed-([0-9a-zA-Z]+)'
logger.info("[filebox.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[filebox]"
url = "http://www.filebox.com/"+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'filebox' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
# http://www.filebox.com/729x1eo9zrx1
patronvideos = 'filebox.com/([0-9a-zA-Z]+)'
logger.info("[filebox.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[filebox]"
url = "http://www.filebox.com/"+match
if url!="http://www.filebox.com/embed" and url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'filebox' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
return devuelve
def test():
video_urls = get_video_url("http://www.filebox.com/sstr2hlxt398")
return len(video_urls)>0
|
gpl-2.0
|
shishaochen/TensorFlow-0.8-Win
|
tensorflow/contrib/distributions/python/kernel_tests/gaussian_test.py
|
3
|
4995
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for initializers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
import tensorflow as tf
class GaussianTest(tf.test.TestCase):
def testGaussianLogPDF(self):
with tf.Session():
batch_size = 6
mu = tf.constant([3.0] * batch_size)
sigma = tf.constant([math.sqrt(10.0)] * batch_size)
mu_v = 3.0
sigma_v = np.sqrt(10.0)
x = np.array([-2.5, 2.5, 4.0, 0.0, -1.0, 2.0], dtype=np.float32)
gaussian = tf.contrib.distributions.Gaussian(mu=mu, sigma=sigma)
expected_log_pdf = np.log(
1 / np.sqrt(2 * np.pi) / sigma_v
* np.exp(-1.0 / (2 * sigma_v**2) * (x - mu_v)**2))
log_pdf = gaussian.log_pdf(x)
self.assertAllClose(expected_log_pdf, log_pdf.eval())
pdf = gaussian.pdf(x)
self.assertAllClose(np.exp(expected_log_pdf), pdf.eval())
def testGaussianLogPDFMultidimensional(self):
with tf.Session():
batch_size = 6
mu = tf.constant([[3.0, -3.0]] * batch_size)
sigma = tf.constant([[math.sqrt(10.0), math.sqrt(15.0)]] * batch_size)
mu_v = np.array([3.0, -3.0])
sigma_v = np.array([np.sqrt(10.0), np.sqrt(15.0)])
x = np.array([[-2.5, 2.5, 4.0, 0.0, -1.0, 2.0]], dtype=np.float32).T
gaussian = tf.contrib.distributions.Gaussian(mu=mu, sigma=sigma)
expected_log_pdf = np.log(
1 / np.sqrt(2 * np.pi) / sigma_v
* np.exp(-1.0 / (2 * sigma_v**2) * (x - mu_v)**2))
log_pdf = gaussian.log_pdf(x)
log_pdf_values = log_pdf.eval()
self.assertEqual(log_pdf.get_shape(), (6, 2))
self.assertAllClose(expected_log_pdf, log_pdf_values)
pdf = gaussian.pdf(x)
pdf_values = pdf.eval()
self.assertEqual(pdf.get_shape(), (6, 2))
self.assertAllClose(np.exp(expected_log_pdf), pdf_values)
def testGaussianCDF(self):
with tf.Session():
batch_size = 6
mu = tf.constant([3.0] * batch_size)
sigma = tf.constant([math.sqrt(10.0)] * batch_size)
mu_v = 3.0
sigma_v = np.sqrt(10.0)
x = np.array([-2.5, 2.5, 4.0, 0.0, -1.0, 2.0], dtype=np.float32)
gaussian = tf.contrib.distributions.Gaussian(mu=mu, sigma=sigma)
erf_fn = np.vectorize(math.erf)
# From Wikipedia
expected_cdf = 0.5 * (1.0 + erf_fn((x - mu_v)/(sigma_v*np.sqrt(2))))
cdf = gaussian.cdf(x)
self.assertAllClose(expected_cdf, cdf.eval())
def testGaussianEntropy(self):
with tf.Session():
mu_v = np.array([1.0, 1.0, 1.0])
sigma_v = np.array([[1.0, 2.0, 3.0]]).T
gaussian = tf.contrib.distributions.Gaussian(mu=mu_v, sigma=sigma_v)
sigma_broadcast = mu_v * sigma_v
expected_entropy = 0.5 * np.log(2*np.pi*np.exp(1)*sigma_broadcast**2)
self.assertAllClose(expected_entropy, gaussian.entropy().eval())
def testGaussianSample(self):
with tf.Session():
mu = tf.constant(3.0)
sigma = tf.constant(math.sqrt(10.0))
mu_v = 3.0
sigma_v = np.sqrt(10.0)
n = tf.constant(100000)
gaussian = tf.contrib.distributions.Gaussian(mu=mu, sigma=sigma)
samples = gaussian.sample(n, seed=137)
sample_values = samples.eval()
self.assertEqual(sample_values.shape, (100000,))
self.assertAllClose(sample_values.mean(), mu_v, atol=1e-2)
self.assertAllClose(sample_values.std(), sigma_v, atol=1e-1)
def testGaussianSampleMultiDimensional(self):
with tf.Session():
batch_size = 2
mu = tf.constant([[3.0, -3.0]] * batch_size)
sigma = tf.constant([[math.sqrt(10.0), math.sqrt(15.0)]] * batch_size)
mu_v = [3.0, -3.0]
sigma_v = [np.sqrt(10.0), np.sqrt(15.0)]
n = tf.constant(100000)
gaussian = tf.contrib.distributions.Gaussian(mu=mu, sigma=sigma)
samples = gaussian.sample(n, seed=137)
sample_values = samples.eval()
self.assertEqual(samples.get_shape(), (100000, batch_size, 2))
self.assertAllClose(sample_values[:, 0, 0].mean(), mu_v[0], atol=1e-2)
self.assertAllClose(sample_values[:, 0, 0].std(), sigma_v[0], atol=1e-1)
self.assertAllClose(sample_values[:, 0, 1].mean(), mu_v[1], atol=1e-2)
self.assertAllClose(sample_values[:, 0, 1].std(), sigma_v[1], atol=1e-1)
if __name__ == '__main__':
tf.test.main()
|
apache-2.0
|
rrrene/django
|
tests/gis_tests/test_geoforms.py
|
292
|
14830
|
from unittest import skipUnless
from django.contrib.gis import forms
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.geos import GEOSGeometry
from django.forms import ValidationError
from django.test import SimpleTestCase, skipUnlessDBFeature
from django.utils import six
from django.utils.html import escape
@skipUnless(HAS_GDAL, "GeometryFieldTest needs GDAL support")
@skipUnlessDBFeature("gis_enabled")
class GeometryFieldTest(SimpleTestCase):
def test_init(self):
"Testing GeometryField initialization with defaults."
fld = forms.GeometryField()
for bad_default in ('blah', 3, 'FoO', None, 0):
self.assertRaises(ValidationError, fld.clean, bad_default)
def test_srid(self):
"Testing GeometryField with a SRID set."
# Input that doesn't specify the SRID is assumed to be in the SRID
# of the input field.
fld = forms.GeometryField(srid=4326)
geom = fld.clean('POINT(5 23)')
self.assertEqual(4326, geom.srid)
# Making the field in a different SRID from that of the geometry, and
# asserting it transforms.
fld = forms.GeometryField(srid=32140)
tol = 0.0000001
xform_geom = GEOSGeometry('POINT (951640.547328465 4219369.26171664)', srid=32140)
# The cleaned geometry should be transformed to 32140.
cleaned_geom = fld.clean('SRID=4326;POINT (-95.363151 29.763374)')
self.assertTrue(xform_geom.equals_exact(cleaned_geom, tol))
def test_null(self):
"Testing GeometryField's handling of null (None) geometries."
# Form fields, by default, are required (`required=True`)
fld = forms.GeometryField()
with six.assertRaisesRegex(self, forms.ValidationError,
"No geometry value provided."):
fld.clean(None)
# This will clean None as a geometry (See #10660).
fld = forms.GeometryField(required=False)
self.assertIsNone(fld.clean(None))
def test_geom_type(self):
"Testing GeometryField's handling of different geometry types."
# By default, all geometry types are allowed.
fld = forms.GeometryField()
for wkt in ('POINT(5 23)', 'MULTIPOLYGON(((0 0, 0 1, 1 1, 1 0, 0 0)))', 'LINESTRING(0 0, 1 1)'):
self.assertEqual(GEOSGeometry(wkt), fld.clean(wkt))
pnt_fld = forms.GeometryField(geom_type='POINT')
self.assertEqual(GEOSGeometry('POINT(5 23)'), pnt_fld.clean('POINT(5 23)'))
# a WKT for any other geom_type will be properly transformed by `to_python`
self.assertEqual(GEOSGeometry('LINESTRING(0 0, 1 1)'), pnt_fld.to_python('LINESTRING(0 0, 1 1)'))
# but rejected by `clean`
self.assertRaises(forms.ValidationError, pnt_fld.clean, 'LINESTRING(0 0, 1 1)')
def test_to_python(self):
"""
Testing to_python returns a correct GEOSGeometry object or
a ValidationError
"""
fld = forms.GeometryField()
# to_python returns the same GEOSGeometry for a WKT
for wkt in ('POINT(5 23)', 'MULTIPOLYGON(((0 0, 0 1, 1 1, 1 0, 0 0)))', 'LINESTRING(0 0, 1 1)'):
self.assertEqual(GEOSGeometry(wkt), fld.to_python(wkt))
# but raises a ValidationError for any other string
for wkt in ('POINT(5)', 'MULTI POLYGON(((0 0, 0 1, 1 1, 1 0, 0 0)))', 'BLAH(0 0, 1 1)'):
self.assertRaises(forms.ValidationError, fld.to_python, wkt)
def test_field_with_text_widget(self):
class PointForm(forms.Form):
pt = forms.PointField(srid=4326, widget=forms.TextInput)
form = PointForm()
cleaned_pt = form.fields['pt'].clean('POINT(5 23)')
self.assertEqual(cleaned_pt, GEOSGeometry('POINT(5 23)'))
self.assertEqual(4326, cleaned_pt.srid)
point = GEOSGeometry('SRID=4326;POINT(5 23)')
form = PointForm(data={'pt': 'POINT(5 23)'}, initial={'pt': point})
self.assertFalse(form.has_changed())
@skipUnless(HAS_GDAL, "SpecializedFieldTest needs GDAL support")
@skipUnlessDBFeature("gis_enabled")
class SpecializedFieldTest(SimpleTestCase):
def setUp(self):
self.geometries = {
'point': GEOSGeometry("SRID=4326;POINT(9.052734375 42.451171875)"),
'multipoint': GEOSGeometry("SRID=4326;MULTIPOINT("
"(13.18634033203125 14.504356384277344),"
"(13.207969665527 14.490966796875),"
"(13.177070617675 14.454917907714))"),
'linestring': GEOSGeometry("SRID=4326;LINESTRING("
"-8.26171875 -0.52734375,"
"-7.734375 4.21875,"
"6.85546875 3.779296875,"
"5.44921875 -3.515625)"),
'multilinestring': GEOSGeometry("SRID=4326;MULTILINESTRING("
"(-16.435546875 -2.98828125,"
"-17.2265625 2.98828125,"
"-0.703125 3.515625,"
"-1.494140625 -3.33984375),"
"(-8.0859375 -5.9765625,"
"8.525390625 -8.7890625,"
"12.392578125 -0.87890625,"
"10.01953125 7.646484375))"),
'polygon': GEOSGeometry("SRID=4326;POLYGON("
"(-1.669921875 6.240234375,"
"-3.8671875 -0.615234375,"
"5.9765625 -3.955078125,"
"18.193359375 3.955078125,"
"9.84375 9.4921875,"
"-1.669921875 6.240234375))"),
'multipolygon': GEOSGeometry("SRID=4326;MULTIPOLYGON("
"((-17.578125 13.095703125,"
"-17.2265625 10.8984375,"
"-13.974609375 10.1953125,"
"-13.359375 12.744140625,"
"-15.732421875 13.7109375,"
"-17.578125 13.095703125)),"
"((-8.525390625 5.537109375,"
"-8.876953125 2.548828125,"
"-5.888671875 1.93359375,"
"-5.09765625 4.21875,"
"-6.064453125 6.240234375,"
"-8.525390625 5.537109375)))"),
'geometrycollection': GEOSGeometry("SRID=4326;GEOMETRYCOLLECTION("
"POINT(5.625 -0.263671875),"
"POINT(6.767578125 -3.603515625),"
"POINT(8.525390625 0.087890625),"
"POINT(8.0859375 -2.13134765625),"
"LINESTRING("
"6.273193359375 -1.175537109375,"
"5.77880859375 -1.812744140625,"
"7.27294921875 -2.230224609375,"
"7.657470703125 -1.25244140625))"),
}
def assertMapWidget(self, form_instance):
"""
Make sure the MapWidget js is passed in the form media and a MapWidget
is actually created
"""
self.assertTrue(form_instance.is_valid())
rendered = form_instance.as_p()
self.assertIn('new MapWidget(options);', rendered)
self.assertIn('gis/js/OLMapWidget.js', str(form_instance.media))
def assertTextarea(self, geom, rendered):
"""Makes sure the wkt and a textarea are in the content"""
self.assertIn('<textarea ', rendered)
self.assertIn('required', rendered)
self.assertIn(geom.wkt, rendered)
def test_pointfield(self):
class PointForm(forms.Form):
p = forms.PointField()
geom = self.geometries['point']
form = PointForm(data={'p': geom})
self.assertTextarea(geom, form.as_p())
self.assertMapWidget(form)
self.assertFalse(PointForm().is_valid())
invalid = PointForm(data={'p': 'some invalid geom'})
self.assertFalse(invalid.is_valid())
self.assertIn('Invalid geometry value', str(invalid.errors))
for invalid in [geo for key, geo in self.geometries.items() if key != 'point']:
self.assertFalse(PointForm(data={'p': invalid.wkt}).is_valid())
def test_multipointfield(self):
class PointForm(forms.Form):
p = forms.MultiPointField()
geom = self.geometries['multipoint']
form = PointForm(data={'p': geom})
self.assertTextarea(geom, form.as_p())
self.assertMapWidget(form)
self.assertFalse(PointForm().is_valid())
for invalid in [geo for key, geo in self.geometries.items() if key != 'multipoint']:
self.assertFalse(PointForm(data={'p': invalid.wkt}).is_valid())
def test_linestringfield(self):
class LineStringForm(forms.Form):
l = forms.LineStringField()
geom = self.geometries['linestring']
form = LineStringForm(data={'l': geom})
self.assertTextarea(geom, form.as_p())
self.assertMapWidget(form)
self.assertFalse(LineStringForm().is_valid())
for invalid in [geo for key, geo in self.geometries.items() if key != 'linestring']:
self.assertFalse(LineStringForm(data={'p': invalid.wkt}).is_valid())
def test_multilinestringfield(self):
class LineStringForm(forms.Form):
l = forms.MultiLineStringField()
geom = self.geometries['multilinestring']
form = LineStringForm(data={'l': geom})
self.assertTextarea(geom, form.as_p())
self.assertMapWidget(form)
self.assertFalse(LineStringForm().is_valid())
for invalid in [geo for key, geo in self.geometries.items() if key != 'multilinestring']:
self.assertFalse(LineStringForm(data={'p': invalid.wkt}).is_valid())
def test_polygonfield(self):
class PolygonForm(forms.Form):
p = forms.PolygonField()
geom = self.geometries['polygon']
form = PolygonForm(data={'p': geom})
self.assertTextarea(geom, form.as_p())
self.assertMapWidget(form)
self.assertFalse(PolygonForm().is_valid())
for invalid in [geo for key, geo in self.geometries.items() if key != 'polygon']:
self.assertFalse(PolygonForm(data={'p': invalid.wkt}).is_valid())
def test_multipolygonfield(self):
class PolygonForm(forms.Form):
p = forms.MultiPolygonField()
geom = self.geometries['multipolygon']
form = PolygonForm(data={'p': geom})
self.assertTextarea(geom, form.as_p())
self.assertMapWidget(form)
self.assertFalse(PolygonForm().is_valid())
for invalid in [geo for key, geo in self.geometries.items() if key != 'multipolygon']:
self.assertFalse(PolygonForm(data={'p': invalid.wkt}).is_valid())
def test_geometrycollectionfield(self):
class GeometryForm(forms.Form):
g = forms.GeometryCollectionField()
geom = self.geometries['geometrycollection']
form = GeometryForm(data={'g': geom})
self.assertTextarea(geom, form.as_p())
self.assertMapWidget(form)
self.assertFalse(GeometryForm().is_valid())
for invalid in [geo for key, geo in self.geometries.items() if key != 'geometrycollection']:
self.assertFalse(GeometryForm(data={'g': invalid.wkt}).is_valid())
@skipUnless(HAS_GDAL, "OSMWidgetTest needs GDAL support")
@skipUnlessDBFeature("gis_enabled")
class OSMWidgetTest(SimpleTestCase):
def setUp(self):
self.geometries = {
'point': GEOSGeometry("SRID=4326;POINT(9.052734375 42.451171875)"),
}
def test_osm_widget(self):
class PointForm(forms.Form):
p = forms.PointField(widget=forms.OSMWidget)
geom = self.geometries['point']
form = PointForm(data={'p': geom})
rendered = form.as_p()
self.assertIn("OpenStreetMap (Mapnik)", rendered)
self.assertIn("id: 'id_p',", rendered)
def test_default_lat_lon(self):
class PointForm(forms.Form):
p = forms.PointField(
widget=forms.OSMWidget(attrs={
'default_lon': 20, 'default_lat': 30
}),
)
form = PointForm()
rendered = form.as_p()
self.assertIn("options['default_lon'] = 20;", rendered)
self.assertIn("options['default_lat'] = 30;", rendered)
if forms.OSMWidget.default_lon != 20:
self.assertNotIn(
"options['default_lon'] = %d;" % forms.OSMWidget.default_lon,
rendered)
if forms.OSMWidget.default_lat != 30:
self.assertNotIn(
"options['default_lat'] = %d;" % forms.OSMWidget.default_lat,
rendered)
@skipUnless(HAS_GDAL, "CustomGeometryWidgetTest needs GDAL support")
@skipUnlessDBFeature("gis_enabled")
class CustomGeometryWidgetTest(SimpleTestCase):
def test_custom_serialization_widget(self):
class CustomGeometryWidget(forms.BaseGeometryWidget):
template_name = 'gis/openlayers.html'
deserialize_called = 0
def serialize(self, value):
return value.json if value else ''
def deserialize(self, value):
self.deserialize_called += 1
return GEOSGeometry(value)
class PointForm(forms.Form):
p = forms.PointField(widget=CustomGeometryWidget)
point = GEOSGeometry("SRID=4326;POINT(9.052734375 42.451171875)")
form = PointForm(data={'p': point})
self.assertIn(escape(point.json), form.as_p())
CustomGeometryWidget.called = 0
widget = form.fields['p'].widget
# Force deserialize use due to a string value
self.assertIn(escape(point.json), widget.render('p', point.json))
self.assertEqual(widget.deserialize_called, 1)
form = PointForm(data={'p': point.json})
self.assertTrue(form.is_valid())
# Ensure that resulting geometry has srid set
self.assertEqual(form.cleaned_data['p'].srid, 4326)
|
bsd-3-clause
|
hugovk/pylast
|
tests/test_librefm.py
|
1
|
1213
|
#!/usr/bin/env python
"""
Integration (not unit) tests for pylast.py
"""
import unittest
from flaky import flaky
import pylast
from .test_pylast import PyLastTestCase, load_secrets
@flaky(max_runs=3, min_passes=1)
class TestPyLastWithLibreFm(PyLastTestCase):
"""Own class for Libre.fm because we don't need the Last.fm setUp"""
def test_libre_fm(self):
# Arrange
secrets = load_secrets()
username = secrets["username"]
password_hash = secrets["password_hash"]
# Act
network = pylast.LibreFMNetwork(password_hash=password_hash, username=username)
artist = network.get_artist("Radiohead")
name = artist.get_name()
# Assert
self.assertEqual(name, "Radiohead")
def test_repr(self):
# Arrange
secrets = load_secrets()
username = secrets["username"]
password_hash = secrets["password_hash"]
network = pylast.LibreFMNetwork(password_hash=password_hash, username=username)
# Act
representation = repr(network)
# Assert
self.assert_startswith(representation, "pylast.LibreFMNetwork(")
if __name__ == "__main__":
unittest.main(failfast=True)
|
apache-2.0
|
darith27/wagtail
|
wagtail/wagtailadmin/utils.py
|
11
|
4020
|
from django.template.loader import render_to_string
from django.core.mail import send_mail as django_send_mail
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db.models import Q
from modelcluster.fields import ParentalKey
from wagtail.wagtailcore.models import Page, PageRevision, GroupPagePermission
from wagtail.wagtailusers.models import UserProfile
from wagtail.utils.compat import get_related_model
def get_object_usage(obj):
"Returns a queryset of pages that link to a particular object"
pages = Page.objects.none()
# get all the relation objects for obj
relations = type(obj)._meta.get_all_related_objects(
include_hidden=True,
include_proxy_eq=True
)
for relation in relations:
related_model = get_related_model(relation)
# if the relation is between obj and a page, get the page
if issubclass(related_model, Page):
pages |= Page.objects.filter(
id__in=related_model._base_manager.filter(**{
relation.field.name: obj.id
}).values_list('id', flat=True)
)
else:
# if the relation is between obj and an object that has a page as a
# property, return the page
for f in related_model._meta.fields:
if isinstance(f, ParentalKey) and issubclass(f.rel.to, Page):
pages |= Page.objects.filter(
id__in=related_model._base_manager.filter(
**{
relation.field.name: obj.id
}).values_list(f.attname, flat=True)
)
return pages
def users_with_page_permission(page, permission_type, include_superusers=True):
# Get user model
User = get_user_model()
# Find GroupPagePermission records of the given type that apply to this page or an ancestor
ancestors_and_self = list(page.get_ancestors()) + [page]
perm = GroupPagePermission.objects.filter(permission_type=permission_type, page__in=ancestors_and_self)
q = Q(groups__page_permissions=perm)
# Include superusers
if include_superusers:
q |= Q(is_superuser=True)
return User.objects.filter(is_active=True).filter(q).distinct()
def send_mail(email_subject, email_content, email_addresses, from_email=None):
if not from_email:
if hasattr(settings, 'WAGTAILADMIN_NOTIFICATION_FROM_EMAIL'):
from_email = settings.WAGTAILADMIN_NOTIFICATION_FROM_EMAIL
elif hasattr(settings, 'DEFAULT_FROM_EMAIL'):
from_email = settings.DEFAULT_FROM_EMAIL
else:
from_email = 'webmaster@localhost'
django_send_mail(email_subject, email_content, from_email, email_addresses)
def send_notification(page_revision_id, notification, excluded_user_id):
# Get revision
revision = PageRevision.objects.get(id=page_revision_id)
# Get list of recipients
if notification == 'submitted':
# Get list of publishers
recipients = users_with_page_permission(revision.page, 'publish')
elif notification in ['rejected', 'approved']:
# Get submitter
recipients = [revision.user]
else:
return
# Get list of email addresses
email_addresses = [
recipient.email for recipient in recipients
if recipient.email and recipient.id != excluded_user_id and getattr(UserProfile.get_for_user(recipient), notification + '_notifications')
]
# Return if there are no email addresses
if not email_addresses:
return
# Get email subject and content
template = 'wagtailadmin/notifications/' + notification + '.html'
rendered_template = render_to_string(template, dict(revision=revision, settings=settings)).split('\n')
email_subject = rendered_template[0]
email_content = '\n'.join(rendered_template[1:])
# Send email
send_mail(email_subject, email_content, email_addresses)
|
bsd-3-clause
|
Cindicator/CindicatorArbitrageBot
|
notify_users.py
|
1
|
2662
|
"""
Copyright 2017 Evgeniy Koltsov, Sergey Zhirnov.
This file is part of CindicatorArbitrageBot.
CindicatorArbitrageBot is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CindicatorArbitrageBot is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CindicatorArbitrageBot. If not, see <http://www.gnu.org/licenses/>.
"""
"""This module contains method for notifying all users with specific text"""
import sys
import time
import telegram
from datetime import datetime
import messages
import mongo_queries as mq
import config.base as base_config
import config.local as local_config
def notify_users(bot, users, text, msg_num_limit=29, msg_time_limit=1):
"""
Send all users specific text (in 'msg_time_limit' can be send only
'msg_num_limit' messages)
Args:
:param bot: <telegram.bot.Bot> instance
:param users: <list> of dict with user's data
:param text: <string> that will send to users
:param msg_num_limit: <int>[optional=29] num of message
:param msg_time_limit: <float>[optional=1] time in seconds
"""
# Num of message that was send in 'msg_num_limit' seconds
msg_num = 0
for user in users:
# New second time starts if num of message equal to zero
if msg_num == 0:
s_time = datetime.utcnow()
try:
bot.send_message(chat_id=int(user[base_config.CHAT_ID]), text=text)
except Exception as e:
print('chat_id: {}; error: {}'.format(user[base_config.CHAT_ID], str(e)))
finally:
msg_num += 1
# If was sent 'msg_time_limit' messages in less than 'msg_num_limit' seconds
if msg_num >= msg_num_limit and (datetime.utcnow() - s_time).total_seconds() < msg_time_limit:
time.sleep(msg_time_limit)
msg_num = 0
# If was sent less than 'msg_time_limit' messages in 'msg_num_limit' seconds
elif (datetime.utcnow() - s_time).total_seconds() > msg_time_limit:
msg_num = 0
if __name__ == '__main__':
if len(sys.argv) > 1:
notify_text = sys.argv[1]
else:
notify_text = messages.OFFLINE_TEXT
bot = telegram.bot.Bot(local_config.TOKEN)
notify_users(bot, mq.get_users(), notify_text)
|
gpl-3.0
|
back-to/streamlink
|
src/streamlink/plugins/zengatv.py
|
5
|
1456
|
import re
from streamlink.plugin import Plugin
from streamlink.plugin.api import useragents
from streamlink.stream import HLSStream
class ZengaTV(Plugin):
"""Streamlink Plugin for livestreams on zengatv.com"""
_url_re = re.compile(r"https?://(www\.)?zengatv\.com/\w+")
_id_re = re.compile(r"""id=(?P<q>["'])dvrid(?P=q)\svalue=(?P=q)(?P<id>[^"']+)(?P=q)""")
_id_2_re = re.compile(r"""LivePlayer\(.+["'](?P<id>D\d+)["']""")
api_url = "http://www.zengatv.com/changeResulation/"
@classmethod
def can_handle_url(cls, url):
return cls._url_re.match(url) is not None
def _get_streams(self):
headers = {
"User-Agent": useragents.FIREFOX,
"Referer": self.url,
}
res = self.session.http.get(self.url, headers=headers)
for id_re in (self._id_re, self._id_2_re):
m = id_re.search(res.text)
if not m:
continue
break
if not m:
self.logger.error("No video id found")
return
dvr_id = m.group("id")
self.logger.debug("Found video id: {0}".format(dvr_id))
data = {"feed": "hd", "dvrId": dvr_id}
res = self.session.http.post(self.api_url, headers=headers, data=data)
if res.status_code == 200:
for s in HLSStream.parse_variant_playlist(self.session, res.text, headers=headers).items():
yield s
__plugin__ = ZengaTV
|
bsd-2-clause
|
adsworth/ldp3
|
ldp/trip/views.py
|
1
|
6387
|
from datetime import datetime, date, time
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required
from django.shortcuts import get_object_or_404
from django.utils.decorators import method_decorator
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView, UpdateView
from skater.models import privacy_exclude_for_user
from trip.forms.forms import TripForm
from trip.models import Trip
class SortMixin(object):
"""
View mixin which provides sorting for ListView.
"""
default_sort_param = None
param_name_sort = 'sort_by'
def sort_queryset(self, qs, sort_by, descending):
return qs
def get_default_sort_param(self):
if self.default_sort_param is None:
raise ImproperlyConfigured(
"'SortMixin' requires the 'default_sort_param' attribute "
"to be set.")
return self.default_sort_param
def get_sort_param(self):
return self.request.GET.get(self.param_name_sort, self.get_default_sort_param())
def get_sort_options(self):
sort_by = self.get_sort_param()
descending = False
if sort_by[0] == '-':
sort_by = sort_by[1:]
descending = True
return (sort_by, descending)
def get_queryset(self):
return self.sort_queryset(
super(SortMixin, self).get_queryset(),
*self.get_sort_options())
def get_context_data(self, *args, **kwargs):
context = super(SortMixin, self).get_context_data(*args, **kwargs)
context.update({
'sort_by': self.get_sort_param(),
})
return context
class FilterMixin(object):
"""
View mixin which provides filtering for ListView.
"""
filter_url_kwarg = 'filter'
default_filter_param = None
def get_default_filter_param(self):
if self.default_filter_param is None:
raise ImproperlyConfigured(
"'FilterMixin' requires the 'default_filter_param' attribute "
"to be set.")
return self.default_filter_param
def filter_queryset(self, qs, filter_param):
"""
Filter the queryset `qs`, given the selected `filter_param`. Default
implementation does no filtering at all.
"""
return qs
def get_filter_param(self):
return self.kwargs.get(self.filter_url_kwarg,
self.get_default_filter_param())
def get_queryset(self):
return self.filter_queryset(
super(FilterMixin, self).get_queryset(),
self.get_filter_param())
def get_context_data(self, *args, **kwargs):
context = super(FilterMixin, self).get_context_data(*args, **kwargs)
context.update({
'filter': self.get_filter_param(),
})
return context
class TripListView(SortMixin, ListView):
default_sort_param = ('-start')
param_name_sort = 's'
model = Trip
paginate_by = 20
def sort_queryset(self, qs, sort_by, descending):
if sort_by == 'distance':
qs = qs.order_by('distance')
elif sort_by == 'skater':
qs = qs.order_by('skater')
elif sort_by == 'start':
qs = qs.order_by('start_utc')
elif sort_by == 'duration':
qs = qs.order_by('duration')
elif sort_by == 'avg_speed':
qs = qs.order_by('avg_speed')
if descending == True:
qs = qs.reverse()
return qs
def get_queryset(self):
return super(TripListView, self).get_queryset().exclude(skater__profile__privacy__in=privacy_exclude_for_user(self.request.user))
class SkaterTripListView(TripListView):
def dispatch(self, *args, **kwargs):
self.skater = get_object_or_404(get_user_model(), username=kwargs['username'])
if self.skater.profile.privacy == 'registered' and \
self.request.user.is_authenticated() == False:
raise PermissionDenied
elif self.skater.profile.privacy == 'closed' and \
self.skater <> self.request.user:
raise PermissionDenied
return super(SkaterTripListView, self).dispatch(*args, **kwargs)
def get_template_names(self):
return ['trip/skater_trip_list.html',]
def get_queryset(self):
return super(SkaterTripListView, self).get_queryset().filter(skater=self.skater)
def get_context_data(self, *args, **kwargs):
context = super(SkaterTripListView, self).get_context_data(*args, **kwargs)
context.update({
'skater': self.skater,
})
return context
class TripCreateView(CreateView):
model = Trip
context_object_name = 'trip'
form_class = TripForm
# fields = ['name']
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(TripCreateView, self).dispatch(*args, **kwargs)
def get_form_kwargs(self):
_kwargs = super(TripCreateView, self).get_form_kwargs()
if not _kwargs['instance']:
_kwargs['instance'] = Trip(skater=self.request.user)
return _kwargs
def get_success_url(self):
return super(TripCreateView, self).get_success_url()
def get_initial(self):
return {'start': datetime.combine(date.today(), time()),
'end': datetime.combine(date.today(), time())
}
class TripUpdateView(UpdateView):
model = Trip
context_object_name = 'trip'
form_class = TripForm
def get_success_url(self):
return super(TripUpdateView, self).get_success_url()
class TripDetailView(DetailView):
model = Trip
context_object_name = 'trip'
def get_object(self, *args, **kwargs):
_object = super(TripDetailView, self).get_object(*args, **kwargs)
if _object.skater.profile.privacy == 'registered' and \
self.request.user.is_authenticated() == False:
raise PermissionDenied
elif _object.skater.profile.privacy == 'closed' and \
_object.skater <> self.request.user:
raise PermissionDenied
return _object
|
mit
|
drexly/openhgsenti
|
lib/django/contrib/gis/db/backends/mysql/operations.py
|
328
|
2746
|
from django.contrib.gis.db.backends.base.adapter import WKTAdapter
from django.contrib.gis.db.backends.base.operations import \
BaseSpatialOperations
from django.contrib.gis.db.backends.utils import SpatialOperator
from django.contrib.gis.db.models import aggregates
from django.db.backends.mysql.operations import DatabaseOperations
from django.utils.functional import cached_property
class MySQLOperations(BaseSpatialOperations, DatabaseOperations):
mysql = True
name = 'mysql'
select = 'AsText(%s)'
from_wkb = 'GeomFromWKB'
from_text = 'GeomFromText'
Adapter = WKTAdapter
Adaptor = Adapter # Backwards-compatibility alias.
gis_operators = {
'bbcontains': SpatialOperator(func='MBRContains'), # For consistency w/PostGIS API
'bboverlaps': SpatialOperator(func='MBROverlaps'), # .. ..
'contained': SpatialOperator(func='MBRWithin'), # .. ..
'contains': SpatialOperator(func='MBRContains'),
'disjoint': SpatialOperator(func='MBRDisjoint'),
'equals': SpatialOperator(func='MBREqual'),
'exact': SpatialOperator(func='MBREqual'),
'intersects': SpatialOperator(func='MBRIntersects'),
'overlaps': SpatialOperator(func='MBROverlaps'),
'same_as': SpatialOperator(func='MBREqual'),
'touches': SpatialOperator(func='MBRTouches'),
'within': SpatialOperator(func='MBRWithin'),
}
function_names = {
'Distance': 'ST_Distance',
'Length': 'GLength',
'Union': 'ST_Union',
}
disallowed_aggregates = (
aggregates.Collect, aggregates.Extent, aggregates.Extent3D,
aggregates.MakeLine, aggregates.Union,
)
@cached_property
def unsupported_functions(self):
unsupported = {
'AsGeoJSON', 'AsGML', 'AsKML', 'AsSVG', 'BoundingCircle',
'Difference', 'ForceRHR', 'GeoHash', 'Intersection', 'MemSize',
'Perimeter', 'PointOnSurface', 'Reverse', 'Scale', 'SnapToGrid',
'SymDifference', 'Transform', 'Translate',
}
if self.connection.mysql_version < (5, 6, 1):
unsupported.update({'Distance', 'Union'})
return unsupported
def geo_db_type(self, f):
return f.geom_type
def get_geom_placeholder(self, f, value, compiler):
"""
The placeholder here has to include MySQL's WKT constructor. Because
MySQL does not support spatial transformations, there is no need to
modify the placeholder based on the contents of the given value.
"""
if hasattr(value, 'as_sql'):
placeholder, _ = compiler.compile(value)
else:
placeholder = '%s(%%s)' % self.from_text
return placeholder
|
apache-2.0
|
sbellem/django
|
tests/model_inheritance_regress/models.py
|
243
|
5863
|
from __future__ import unicode_literals
import datetime
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Place(models.Model):
name = models.CharField(max_length=50)
address = models.CharField(max_length=80)
class Meta:
ordering = ('name',)
def __str__(self):
return "%s the place" % self.name
@python_2_unicode_compatible
class Restaurant(Place):
serves_hot_dogs = models.BooleanField(default=False)
serves_pizza = models.BooleanField(default=False)
def __str__(self):
return "%s the restaurant" % self.name
@python_2_unicode_compatible
class ItalianRestaurant(Restaurant):
serves_gnocchi = models.BooleanField(default=False)
def __str__(self):
return "%s the italian restaurant" % self.name
@python_2_unicode_compatible
class ParkingLot(Place):
# An explicit link to the parent (we can control the attribute name).
parent = models.OneToOneField(Place, models.CASCADE, primary_key=True, parent_link=True)
capacity = models.IntegerField()
def __str__(self):
return "%s the parking lot" % self.name
class ParkingLot2(Place):
# In lieu of any other connector, an existing OneToOneField will be
# promoted to the primary key.
parent = models.OneToOneField(Place, models.CASCADE)
class ParkingLot3(Place):
# The parent_link connector need not be the pk on the model.
primary_key = models.AutoField(primary_key=True)
parent = models.OneToOneField(Place, models.CASCADE, parent_link=True)
class ParkingLot4(models.Model):
# Test parent_link connector can be discovered in abstract classes.
parent = models.OneToOneField(Place, models.CASCADE, parent_link=True)
class Meta:
abstract = True
class ParkingLot4A(ParkingLot4, Place):
pass
class ParkingLot4B(Place, ParkingLot4):
pass
@python_2_unicode_compatible
class Supplier(models.Model):
name = models.CharField(max_length=50)
restaurant = models.ForeignKey(Restaurant, models.CASCADE)
def __str__(self):
return self.name
class Wholesaler(Supplier):
retailer = models.ForeignKey(Supplier, models.CASCADE, related_name='wholesale_supplier')
class Parent(models.Model):
created = models.DateTimeField(default=datetime.datetime.now)
class Child(Parent):
name = models.CharField(max_length=10)
class SelfRefParent(models.Model):
parent_data = models.IntegerField()
self_data = models.ForeignKey('self', models.SET_NULL, null=True)
class SelfRefChild(SelfRefParent):
child_data = models.IntegerField()
@python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateTimeField()
class Meta:
ordering = ('-pub_date', 'headline')
def __str__(self):
return self.headline
class ArticleWithAuthor(Article):
author = models.CharField(max_length=100)
class M2MBase(models.Model):
articles = models.ManyToManyField(Article)
class M2MChild(M2MBase):
name = models.CharField(max_length=50)
class Evaluation(Article):
quality = models.IntegerField()
class Meta:
abstract = True
class QualityControl(Evaluation):
assignee = models.CharField(max_length=50)
@python_2_unicode_compatible
class BaseM(models.Model):
base_name = models.CharField(max_length=100)
def __str__(self):
return self.base_name
@python_2_unicode_compatible
class DerivedM(BaseM):
customPK = models.IntegerField(primary_key=True)
derived_name = models.CharField(max_length=100)
def __str__(self):
return "PK = %d, base_name = %s, derived_name = %s" % (
self.customPK, self.base_name, self.derived_name)
class AuditBase(models.Model):
planned_date = models.DateField()
class Meta:
abstract = True
verbose_name_plural = 'Audits'
class CertificationAudit(AuditBase):
class Meta(AuditBase.Meta):
abstract = True
class InternalCertificationAudit(CertificationAudit):
auditing_dept = models.CharField(max_length=20)
# Check that abstract classes don't get m2m tables autocreated.
@python_2_unicode_compatible
class Person(models.Model):
name = models.CharField(max_length=100)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
@python_2_unicode_compatible
class AbstractEvent(models.Model):
name = models.CharField(max_length=100)
attendees = models.ManyToManyField(Person, related_name="%(class)s_set")
class Meta:
abstract = True
ordering = ('name',)
def __str__(self):
return self.name
class BirthdayParty(AbstractEvent):
pass
class BachelorParty(AbstractEvent):
pass
class MessyBachelorParty(BachelorParty):
pass
# Check concrete -> abstract -> concrete inheritance
class SearchableLocation(models.Model):
keywords = models.CharField(max_length=256)
class Station(SearchableLocation):
name = models.CharField(max_length=128)
class Meta:
abstract = True
class BusStation(Station):
bus_routes = models.CommaSeparatedIntegerField(max_length=128)
inbound = models.BooleanField(default=False)
class TrainStation(Station):
zone = models.IntegerField()
class User(models.Model):
username = models.CharField(max_length=30, unique=True)
class Profile(User):
profile_id = models.AutoField(primary_key=True)
extra = models.CharField(max_length=30, blank=True)
# Check concrete + concrete -> concrete -> concrete
class Politician(models.Model):
politician_id = models.AutoField(primary_key=True)
title = models.CharField(max_length=50)
class Congressman(Person, Politician):
state = models.CharField(max_length=2)
class Senator(Congressman):
pass
|
bsd-3-clause
|
wpoa/wiki-imports
|
lib/python2.7/site-packages/pip/commands/zip.py
|
393
|
14821
|
import sys
import re
import fnmatch
import os
import shutil
import zipfile
from pip.util import display_path, backup_dir, rmtree
from pip.log import logger
from pip.exceptions import InstallationError
from pip.basecommand import Command
class ZipCommand(Command):
"""Zip individual packages."""
name = 'zip'
usage = """
%prog [options] <package> ..."""
summary = 'DEPRECATED. Zip individual packages.'
def __init__(self, *args, **kw):
super(ZipCommand, self).__init__(*args, **kw)
if self.name == 'zip':
self.cmd_opts.add_option(
'--unzip',
action='store_true',
dest='unzip',
help='Unzip (rather than zip) a package.')
else:
self.cmd_opts.add_option(
'--zip',
action='store_false',
dest='unzip',
default=True,
help='Zip (rather than unzip) a package.')
self.cmd_opts.add_option(
'--no-pyc',
action='store_true',
dest='no_pyc',
help='Do not include .pyc files in zip files (useful on Google App Engine).')
self.cmd_opts.add_option(
'-l', '--list',
action='store_true',
dest='list',
help='List the packages available, and their zip status.')
self.cmd_opts.add_option(
'--sort-files',
action='store_true',
dest='sort_files',
help='With --list, sort packages according to how many files they contain.')
self.cmd_opts.add_option(
'--path',
action='append',
dest='paths',
help='Restrict operations to the given paths (may include wildcards).')
self.cmd_opts.add_option(
'-n', '--simulate',
action='store_true',
help='Do not actually perform the zip/unzip operation.')
self.parser.insert_option_group(0, self.cmd_opts)
def paths(self):
"""All the entries of sys.path, possibly restricted by --path"""
if not self.select_paths:
return sys.path
result = []
match_any = set()
for path in sys.path:
path = os.path.normcase(os.path.abspath(path))
for match in self.select_paths:
match = os.path.normcase(os.path.abspath(match))
if '*' in match:
if re.search(fnmatch.translate(match + '*'), path):
result.append(path)
match_any.add(match)
break
else:
if path.startswith(match):
result.append(path)
match_any.add(match)
break
else:
logger.debug("Skipping path %s because it doesn't match %s"
% (path, ', '.join(self.select_paths)))
for match in self.select_paths:
if match not in match_any and '*' not in match:
result.append(match)
logger.debug("Adding path %s because it doesn't match "
"anything already on sys.path" % match)
return result
def run(self, options, args):
logger.deprecated('1.7', "DEPRECATION: 'pip zip' and 'pip unzip` are deprecated, and will be removed in a future release.")
self.select_paths = options.paths
self.simulate = options.simulate
if options.list:
return self.list(options, args)
if not args:
raise InstallationError(
'You must give at least one package to zip or unzip')
packages = []
for arg in args:
module_name, filename = self.find_package(arg)
if options.unzip and os.path.isdir(filename):
raise InstallationError(
'The module %s (in %s) is not a zip file; cannot be unzipped'
% (module_name, filename))
elif not options.unzip and not os.path.isdir(filename):
raise InstallationError(
'The module %s (in %s) is not a directory; cannot be zipped'
% (module_name, filename))
packages.append((module_name, filename))
last_status = None
for module_name, filename in packages:
if options.unzip:
last_status = self.unzip_package(module_name, filename)
else:
last_status = self.zip_package(module_name, filename, options.no_pyc)
return last_status
def unzip_package(self, module_name, filename):
zip_filename = os.path.dirname(filename)
if not os.path.isfile(zip_filename) and zipfile.is_zipfile(zip_filename):
raise InstallationError(
'Module %s (in %s) isn\'t located in a zip file in %s'
% (module_name, filename, zip_filename))
package_path = os.path.dirname(zip_filename)
if not package_path in self.paths():
logger.warn(
'Unpacking %s into %s, but %s is not on sys.path'
% (display_path(zip_filename), display_path(package_path),
display_path(package_path)))
logger.notify('Unzipping %s (in %s)' % (module_name, display_path(zip_filename)))
if self.simulate:
logger.notify('Skipping remaining operations because of --simulate')
return
logger.indent += 2
try:
## FIXME: this should be undoable:
zip = zipfile.ZipFile(zip_filename)
to_save = []
for info in zip.infolist():
name = info.filename
if name.startswith(module_name + os.path.sep):
content = zip.read(name)
dest = os.path.join(package_path, name)
if not os.path.exists(os.path.dirname(dest)):
os.makedirs(os.path.dirname(dest))
if not content and dest.endswith(os.path.sep):
if not os.path.exists(dest):
os.makedirs(dest)
else:
f = open(dest, 'wb')
f.write(content)
f.close()
else:
to_save.append((name, zip.read(name)))
zip.close()
if not to_save:
logger.info('Removing now-empty zip file %s' % display_path(zip_filename))
os.unlink(zip_filename)
self.remove_filename_from_pth(zip_filename)
else:
logger.info('Removing entries in %s/ from zip file %s' % (module_name, display_path(zip_filename)))
zip = zipfile.ZipFile(zip_filename, 'w')
for name, content in to_save:
zip.writestr(name, content)
zip.close()
finally:
logger.indent -= 2
def zip_package(self, module_name, filename, no_pyc):
orig_filename = filename
logger.notify('Zip %s (in %s)' % (module_name, display_path(filename)))
logger.indent += 2
if filename.endswith('.egg'):
dest_filename = filename
else:
dest_filename = filename + '.zip'
try:
## FIXME: I think this needs to be undoable:
if filename == dest_filename:
filename = backup_dir(orig_filename)
logger.notify('Moving %s aside to %s' % (orig_filename, filename))
if not self.simulate:
shutil.move(orig_filename, filename)
try:
logger.info('Creating zip file in %s' % display_path(dest_filename))
if not self.simulate:
zip = zipfile.ZipFile(dest_filename, 'w')
zip.writestr(module_name + '/', '')
for dirpath, dirnames, filenames in os.walk(filename):
if no_pyc:
filenames = [f for f in filenames
if not f.lower().endswith('.pyc')]
for fns, is_dir in [(dirnames, True), (filenames, False)]:
for fn in fns:
full = os.path.join(dirpath, fn)
dest = os.path.join(module_name, dirpath[len(filename):].lstrip(os.path.sep), fn)
if is_dir:
zip.writestr(dest + '/', '')
else:
zip.write(full, dest)
zip.close()
logger.info('Removing old directory %s' % display_path(filename))
if not self.simulate:
rmtree(filename)
except:
## FIXME: need to do an undo here
raise
## FIXME: should also be undone:
self.add_filename_to_pth(dest_filename)
finally:
logger.indent -= 2
def remove_filename_from_pth(self, filename):
for pth in self.pth_files():
f = open(pth, 'r')
lines = f.readlines()
f.close()
new_lines = [
l for l in lines if l.strip() != filename]
if lines != new_lines:
logger.info('Removing reference to %s from .pth file %s'
% (display_path(filename), display_path(pth)))
if not [line for line in new_lines if line]:
logger.info('%s file would be empty: deleting' % display_path(pth))
if not self.simulate:
os.unlink(pth)
else:
if not self.simulate:
f = open(pth, 'wb')
f.writelines(new_lines)
f.close()
return
logger.warn('Cannot find a reference to %s in any .pth file' % display_path(filename))
def add_filename_to_pth(self, filename):
path = os.path.dirname(filename)
dest = filename + '.pth'
if path not in self.paths():
logger.warn('Adding .pth file %s, but it is not on sys.path' % display_path(dest))
if not self.simulate:
if os.path.exists(dest):
f = open(dest)
lines = f.readlines()
f.close()
if lines and not lines[-1].endswith('\n'):
lines[-1] += '\n'
lines.append(filename + '\n')
else:
lines = [filename + '\n']
f = open(dest, 'wb')
f.writelines(lines)
f.close()
def pth_files(self):
for path in self.paths():
if not os.path.exists(path) or not os.path.isdir(path):
continue
for filename in os.listdir(path):
if filename.endswith('.pth'):
yield os.path.join(path, filename)
def find_package(self, package):
for path in self.paths():
full = os.path.join(path, package)
if os.path.exists(full):
return package, full
if not os.path.isdir(path) and zipfile.is_zipfile(path):
zip = zipfile.ZipFile(path, 'r')
try:
zip.read(os.path.join(package, '__init__.py'))
except KeyError:
pass
else:
zip.close()
return package, full
zip.close()
## FIXME: need special error for package.py case:
raise InstallationError(
'No package with the name %s found' % package)
def list(self, options, args):
if args:
raise InstallationError(
'You cannot give an argument with --list')
for path in sorted(self.paths()):
if not os.path.exists(path):
continue
basename = os.path.basename(path.rstrip(os.path.sep))
if os.path.isfile(path) and zipfile.is_zipfile(path):
if os.path.dirname(path) not in self.paths():
logger.notify('Zipped egg: %s' % display_path(path))
continue
if (basename != 'site-packages' and basename != 'dist-packages'
and not path.replace('\\', '/').endswith('lib/python')):
continue
logger.notify('In %s:' % display_path(path))
logger.indent += 2
zipped = []
unzipped = []
try:
for filename in sorted(os.listdir(path)):
ext = os.path.splitext(filename)[1].lower()
if ext in ('.pth', '.egg-info', '.egg-link'):
continue
if ext == '.py':
logger.info('Not displaying %s: not a package' % display_path(filename))
continue
full = os.path.join(path, filename)
if os.path.isdir(full):
unzipped.append((filename, self.count_package(full)))
elif zipfile.is_zipfile(full):
zipped.append(filename)
else:
logger.info('Unknown file: %s' % display_path(filename))
if zipped:
logger.notify('Zipped packages:')
logger.indent += 2
try:
for filename in zipped:
logger.notify(filename)
finally:
logger.indent -= 2
else:
logger.notify('No zipped packages.')
if unzipped:
if options.sort_files:
unzipped.sort(key=lambda x: -x[1])
logger.notify('Unzipped packages:')
logger.indent += 2
try:
for filename, count in unzipped:
logger.notify('%s (%i files)' % (filename, count))
finally:
logger.indent -= 2
else:
logger.notify('No unzipped packages.')
finally:
logger.indent -= 2
def count_package(self, path):
total = 0
for dirpath, dirnames, filenames in os.walk(path):
filenames = [f for f in filenames
if not f.lower().endswith('.pyc')]
total += len(filenames)
return total
|
gpl-3.0
|
XiaominZhang/Impala
|
tests/metadata/test_recover_partitions.py
|
13
|
15294
|
# Copyright (c) 2015 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Impala tests for ALTER TABLE RECOVER PARTITIONS statement
import pytest
from tests.common.test_dimensions import ALL_NODES_ONLY
from tests.common.impala_test_suite import *
from tests.common.skip import SkipIfS3
from tests.util.filesystem_utils import WAREHOUSE, IS_DEFAULT_FS
# Validates ALTER TABLE RECOVER PARTITIONS statement
class TestRecoverPartitions(ImpalaTestSuite):
TEST_DB = "recover_parts_db"
TEST_TBL = "alter_recover_partitions"
TEST_TBL2 = "alter_recover_partitions_all_types"
BASE_DIR = 'test-warehouse/%s.db/%s/' % (TEST_DB, TEST_TBL)
BASE_DIR2 = 'test-warehouse/%s.db/%s/' % (TEST_DB, TEST_TBL2)
DEF_NULL_PART_KEY = "__HIVE_DEFAULT_PARTITION__"
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestRecoverPartitions, cls).add_test_dimensions()
sync_ddl_opts = [0, 1]
if cls.exploration_strategy() != 'exhaustive':
# Only run with sync_ddl on exhaustive since it increases test runtime.
sync_ddl_opts = [0]
cls.TestMatrix.add_dimension(create_exec_option_dimension(
cluster_sizes=ALL_NODES_ONLY,
disable_codegen_options=[False],
batch_sizes=[0],
sync_ddl=sync_ddl_opts))
# There is no reason to run these tests using all dimensions.
cls.TestMatrix.add_dimension(create_uncompressed_text_dimension(cls.get_workload()))
def setup_method(self, method):
self.cleanup_db(self.TEST_DB)
self.client.execute("create database {0} location '{1}/{0}.db'".format(self.TEST_DB,
WAREHOUSE))
self.client.execute("use %s" % self.TEST_DB)
def teardown_method(self, method):
self.cleanup_db(self.TEST_DB)
@SkipIfS3.insert
@pytest.mark.execute_serially
def test_recover_partitions(self, vector):
"""Test that RECOVER PARTITIONS correctly discovers new partitions added externally
by the hdfs client.
"""
part_name = "p2"
leaf_dir = "i=0001/p=%s/" % part_name
malformed_dir = "i=fish/p=%s/" % part_name
file_path = "test"
inserted_value = "2"
null_dir = "i=1/p=%s/" % self.DEF_NULL_PART_KEY
null_inserted_value = "4"
self.execute_query_expect_success(self.client,
"CREATE TABLE %s (c int) PARTITIONED BY (i int, p string)" % (self.TEST_TBL))
self.execute_query_expect_success(self.client,
"INSERT INTO TABLE %s PARTITION(i=1, p='p1') VALUES(1)" % (self.TEST_TBL))
# Create a path for a new partition using hdfs client and add a file with some values.
# Test that the partition can be recovered and that the inserted data are accessible.
self.hdfs_client.make_dir(self.BASE_DIR + leaf_dir)
self.hdfs_client.create_file(self.BASE_DIR + leaf_dir + file_path, inserted_value)
result = self.execute_query_expect_success(self.client,
"SHOW PARTITIONS %s" % (self.TEST_TBL))
assert self.has_value(part_name, result.data) == False
self.execute_query_expect_success(self.client,
"ALTER TABLE %s RECOVER PARTITIONS" % (self.TEST_TBL))
result = self.execute_query_expect_success(self.client,
"SHOW PARTITIONS %s" % (self.TEST_TBL))
assert (self.has_value(part_name, result.data) == True,
"ALTER TABLE %s RECOVER PARTITIONS failed." % (self.TEST_TBL))
result = self.execute_query_expect_success(self.client,
"select c from %s" % self.TEST_TBL)
assert (self.has_value(inserted_value, result.data) == True,
"Failed to load tables after ALTER TABLE %s RECOVER PARTITIONS."
% (self.TEST_TBL))
# Test that invalid partition values are ignored during partition recovery.
result = self.execute_query_expect_success(self.client,
"SHOW PARTITIONS %s" % (self.TEST_TBL))
old_length = len(result.data)
self.hdfs_client.make_dir(self.BASE_DIR + malformed_dir)
self.execute_query_expect_success(self.client,
"ALTER TABLE %s RECOVER PARTITIONS" % (self.TEST_TBL))
result = self.execute_query_expect_success(self.client,
"SHOW PARTITIONS %s" % (self.TEST_TBL))
assert (len(result.data) == old_length,
"ALTER TABLE %s RECOVER PARTITIONS failed to handle invalid partition values."
% (self.TEST_TBL))
# Create a directory whose subdirectory names contain __HIVE_DEFAULT_PARTITION__
# and check that is recovered as a NULL partition.
self.hdfs_client.make_dir(self.BASE_DIR + null_dir)
self.hdfs_client.create_file(self.BASE_DIR + null_dir + file_path, null_inserted_value)
result = self.execute_query_expect_success(self.client,
"SHOW PARTITIONS %s" % (self.TEST_TBL))
assert self.has_value(self.DEF_NULL_PART_KEY, result.data) == False
self.execute_query_expect_success(self.client,
"ALTER TABLE %s RECOVER PARTITIONS" % (self.TEST_TBL))
result = self.execute_query_expect_success(self.client,
"SHOW PARTITIONS %s" % (self.TEST_TBL))
assert (self.has_value("NULL", result.data) == True,
"ALTER TABLE %s RECOVER PARTITIONS failed to handle null partition values."
% (self.TEST_TBL))
result = self.execute_query_expect_success(self.client,
"select c from %s" % self.TEST_TBL)
assert self.has_value(null_inserted_value, result.data) == True
@SkipIfS3.insert
@pytest.mark.execute_serially
def test_nondefault_location_partitions(self, vector):
"""If the location of data files in one partition is changed, test that data files
in the default location will not be loaded after partition recovery."""
file_path = "test"
leaf_dir = "i=1/p=p3/"
inserted_value = "4"
self.execute_query_expect_success(self.client,
"CREATE TABLE %s (c int) PARTITIONED BY (i int, p string)" % (self.TEST_TBL))
self.execute_query_expect_success(self.client,
"INSERT INTO TABLE %s PARTITION(i=1, p='p1') VALUES(1)" % (self.TEST_TBL))
self.execute_query_expect_success(self.client,
"ALTER TABLE %s ADD PARTITION(i=1, p='p3')" % (self.TEST_TBL))
self.execute_query_expect_success(self.client,
"ALTER TABLE %s PARTITION (i=1, p='p3') SET LOCATION '%s/%s.db/tmp' "
% (self.TEST_TBL, WAREHOUSE, self.TEST_DB))
self.hdfs_client.delete_file_dir(self.BASE_DIR + leaf_dir, recursive=True)
self.hdfs_client.make_dir(self.BASE_DIR + leaf_dir);
self.hdfs_client.create_file(self.BASE_DIR + leaf_dir + file_path, inserted_value)
self.execute_query_expect_success(self.client,
"ALTER TABLE %s RECOVER PARTITIONS" % (self.TEST_TBL))
# Ensure that no duplicate partitions are recovered.
result = self.execute_query_expect_success(self.client,
"select c from %s" % self.TEST_TBL)
assert (self.has_value(inserted_value, result.data) == False,
"ALTER TABLE %s RECOVER PARTITIONS failed to handle non-default partition location."
% (self.TEST_TBL))
self.execute_query_expect_success(self.client,
"INSERT INTO TABLE %s PARTITION(i=1, p='p3') VALUES(4)" % (self.TEST_TBL))
result = self.execute_query_expect_success(self.client,
"select c from %s" % self.TEST_TBL)
assert self.has_value(inserted_value, result.data) == True
@SkipIfS3.insert
@pytest.mark.execute_serially
def test_duplicate_partitions(self, vector):
"""Test that RECOVER PARTITIONS does not recover equivalent partitions. Two partitions
are considered equivalent if they correspond to distinct paths but can be converted
to the same partition key values (e.g. "i=0005/p=p2" and "i=05/p=p2")."""
same_value_dir1 = "i=0004/p=p2/"
same_value_dir2 = "i=000004/p=p2/"
file_path = "test"
self.execute_query_expect_success(self.client,
"CREATE TABLE %s (c int) PARTITIONED BY (i int, p string)" % (self.TEST_TBL))
self.execute_query_expect_success(self.client,
"INSERT INTO TABLE %s PARTITION(i=1, p='p1') VALUES(1)" % (self.TEST_TBL))
# Create a partition with path "/i=1/p=p4".
# Create a path "/i=0001/p=p4" using hdfs client, and add a file with some values.
# Test that no new partition will be recovered and the inserted data are not accessible.
leaf_dir = "i=0001/p=p4/"
inserted_value = "5"
self.execute_query_expect_success(self.client,
"ALTER TABLE %s ADD PARTITION(i=1, p='p4')" % (self.TEST_TBL))
self.hdfs_client.make_dir(self.BASE_DIR + leaf_dir);
self.hdfs_client.create_file(self.BASE_DIR + leaf_dir + file_path, inserted_value)
self.execute_query_expect_success(self.client,
"ALTER TABLE %s RECOVER PARTITIONS" % (self.TEST_TBL))
result = self.execute_query_expect_success(self.client,
"select c from %s" % self.TEST_TBL)
assert (self.has_value(inserted_value, result.data) == False,
"ALTER TABLE %s RECOVER PARTITIONS failed to handle duplicate partition key values."
% (self.TEST_TBL))
# Create two paths '/i=0004/p=p2/' and "i=000004/p=p2/" using hdfs client.
# Test that only one partition will be added.
result = self.execute_query_expect_success(self.client,
"SHOW PARTITIONS %s" % (self.TEST_TBL))
old_length = len(result.data)
self.hdfs_client.make_dir(self.BASE_DIR + same_value_dir1)
self.hdfs_client.make_dir(self.BASE_DIR + same_value_dir2)
# Only one partition will be added.
self.execute_query_expect_success(self.client,
"ALTER TABLE %s RECOVER PARTITIONS" % (self.TEST_TBL))
result = self.execute_query_expect_success(self.client,
"SHOW PARTITIONS %s" % (self.TEST_TBL))
assert ((old_length + 1) == len(result.data),
"ALTER TABLE %s RECOVER PARTITIONS failed to handle duplicate partition key values."
% (self.TEST_TBL))
@SkipIfS3.insert
@pytest.mark.execute_serially
def test_post_invalidate(self, vector):
"""Test that RECOVER PARTITIONS works correctly after invalidate."""
leaf_dir = "i=002/p=p2/"
file_path = "test"
inserted_value = "2"
self.execute_query_expect_success(self.client,
"CREATE TABLE %s (c int) PARTITIONED BY (i int, p string)" % (self.TEST_TBL))
self.execute_query_expect_success(self.client,
"INSERT INTO TABLE %s PARTITION(i=1, p='p1') VALUES(1)" % (self.TEST_TBL))
# Test that the recovered partitions are properly stored in Hive MetaStore.
# Invalidate the table metadata and then check if the recovered partitions
# are accessible.
self.hdfs_client.make_dir(self.BASE_DIR + leaf_dir);
self.hdfs_client.create_file(self.BASE_DIR + leaf_dir + file_path, inserted_value)
self.execute_query_expect_success(self.client,
"ALTER TABLE %s RECOVER PARTITIONS" % (self.TEST_TBL))
result = self.execute_query_expect_success(self.client,
"select c from %s" % self.TEST_TBL)
assert self.has_value(inserted_value, result.data) == True
self.client.execute("INVALIDATE METADATA %s" % (self.TEST_TBL))
result = self.execute_query_expect_success(self.client,
"select c from %s" % self.TEST_TBL)
assert (self.has_value(inserted_value, result.data) == True,
"INVALIDATE can't work on partitions recovered by ALTER TABLE %s RECOVER PARTITIONS."
% (self.TEST_TBL))
self.execute_query_expect_success(self.client,
"INSERT INTO TABLE %s PARTITION(i=002, p='p2') VALUES(4)" % (self.TEST_TBL))
result = self.execute_query_expect_success(self.client,
"select c from %s" % self.TEST_TBL)
assert self.has_value('4', result.data) == True
@SkipIfS3.insert
@pytest.mark.execute_serially
def test_support_all_types(self, vector):
"""Test that RECOVER PARTITIONS works correctly on all supported data types."""
normal_values = ["a=1", "b=128", "c=32768", "d=2147483648", "e=11.11",
"f=22.22", "g=33.33", "j=tchar", "k=tvchar", "s=recover"]
malformed_values = ["a=a", "b=b", "c=c", "d=d", "e=e", "f=f", "g=g"]
overflow_values = ["a=128", "b=-32769", "c=-2147483649", "d=9223372036854775808",
"e=11.11111111111111111111111111111111111111111111111111111",
"f=3.40282346638528860e+39", "g=1.79769313486231570e+309"]
self.execute_query_expect_success(self.client,
"CREATE TABLE %s (i INT) PARTITIONED BY (a TINYINT, b SMALLINT, c INT, d BIGINT,"
" e DECIMAL(4,2), f FLOAT, g DOUBLE, j CHAR(5), k VARCHAR(6), s STRING)"
% (self.TEST_TBL2))
self.execute_query_expect_success(self.client,
"INSERT INTO TABLE %s PARTITION(a=1, b=2, c=3, d=4, e=55.55, f=6.6, g=7.7, "
"j=cast('j' as CHAR(5)), k=cast('k' as VARCHAR(6)), s='s') VALUES(1)"
% (self.TEST_TBL2))
# Test valid partition values.
normal_dir = ""
result = self.execute_query_expect_success(self.client,
"SHOW PARTITIONS %s" % (self.TEST_TBL2))
old_length = len(result.data)
normal_dir = '/'.join(normal_values)
self.hdfs_client.make_dir(self.BASE_DIR2 + normal_dir)
# One partition will be added.
self.execute_query_expect_success(self.client,
"ALTER TABLE %s RECOVER PARTITIONS" % (self.TEST_TBL2))
result = self.execute_query_expect_success(self.client,
"SHOW PARTITIONS %s" % (self.TEST_TBL2))
assert (len(result.data) == (old_length + 1),
"ALTER TABLE %s RECOVER PARTITIONS failed to handle some data types."
% (self.TEST_TBL))
# Test malformed partition values.
self.check_invalid_partition_values(normal_values, malformed_values)
# Test overflow partition values.
self.check_invalid_partition_values(normal_values, overflow_values)
def check_invalid_partition_values(self, normal_values, invalid_values):
""""Check that RECOVER PARTITIONS ignores partitions with invalid partition values."""
result = self.execute_query_expect_success(self.client,
"SHOW PARTITIONS %s" % (self.TEST_TBL2))
old_length = len(result.data)
for i in range(len(invalid_values)):
invalid_dir = ""
for j in range(len(normal_values)):
if i != j:
invalid_dir += (normal_values[j] + "/")
else:
invalid_dir += (invalid_values[j] + "/")
self.hdfs_client.make_dir(self.BASE_DIR2 + invalid_dir)
# No partition will be added.
self.execute_query_expect_success(self.client,
"ALTER TABLE %s RECOVER PARTITIONS" % (self.TEST_TBL2))
result = self.execute_query_expect_success(self.client,
"SHOW PARTITIONS %s" % (self.TEST_TBL2))
assert (len(result.data) == old_length,
"ALTER TABLE %s RECOVER PARTITIONS failed to handle invalid partition key values."
% (self.TEST_TBL))
def has_value(self, value, lines):
"""Check if lines contain value."""
return any([line.find(value) != -1 for line in lines])
|
apache-2.0
|
trentinfrederick/name-panel
|
addon/function/preferences/options.py
|
3
|
12582
|
# imports
from . import generate
from ...defaults import defaults
# main
def main(context):
# panel
panel(context)
# properties
properties(context)
# shared
shared(context)
# auto
auto(context)
# batch
batch(context)
# copy
copy(context)
# generate
generate.main(defaults)
# panel
def panel(context):
# option
option = context.scene.NamePanel
# default
default = defaults['name panel']
# options
default['location'] = option.location
default['pin active object'] = option.pinActiveObject
default['pin active bone'] = option.pinActiveObject
default['hide find'] = option.hideFind
default['hide replace'] = option.hideReplace
default['filters'] = option.filters
default['shortcuts'] = option.shortcuts
default['display names'] = option.displayNames
default['search'] = option.search
default['clear search'] = option.clearSearch
default['regex'] = option.regex
default['mode'] = option.mode
default['groups'] = option.groups
default['action'] = option.action
default['grease pencil'] = option.greasePencil
default['constraints'] = option.constraints
default['modifiers'] = option.modifiers
default['bone groups'] = option.boneGroups
default['bone constraints'] = option.boneConstraints
default['vertex groups'] = option.vertexGroups
default['shapekeys'] = option.shapekeys
default['uvs'] = option.uvs
default['vertex colors'] = option.vertexColors
default['materials'] = option.materials
default['textures'] = option.textures
default['particle systems'] = option.particleSystems
default['bone mode'] = option.boneMode
default['display bones'] = option.displayBones
# properties
def properties(context):
# option
option = context.window_manager.PropertyPanel
# default
default = defaults['properties panel']
default['location'] = option.location
# shared
def shared(context):
# option
option = context.window_manager.BatchShared
# default
default = defaults['shared']
# options
default['large popups'] = option.largePopups
default['sort'] = option.sort
default['type'] = option.type
default['axis'] = option.axis
default['invert'] = option.invert
default['count'] = option.count
default['pad'] = option.pad
default['start'] = option.start
default['step'] = option.step
default['separator'] = option.separator
default['link'] = option.link
default['ignore'] = option.ignore
# auto
def auto(context):
# option
option = context.window_manager.AutoName
# default
default = defaults['auto name']
# options
default['mode'] = option.mode
default['objects'] = option.objects
default['constraints'] = option.constraints
default['modifiers'] = option.modifiers
default['object data'] = option.objectData
default['bone constraints'] = option.boneConstraints
default['object type'] = option.objectType
default['constraint type'] = option.constraintType
default['modifier type'] = option.modifierType
# option
option = context.scene.ObjectNames
# default
default = defaults['auto name']['object names']
# options
default['prefix'] = option.prefix
default['mesh'] = option.mesh
default['curve'] = option.curve
default['surface'] = option.surface
default['meta'] = option.meta
default['font'] = option.font
default['armature'] = option.armature
default['lattice'] = option.lattice
default['empty'] = option.empty
default['speaker'] = option.speaker
default['camera'] = option.camera
default['lamp'] = option.lamp
# option
option = context.scene.ConstraintNames
# default
default = defaults['auto name']['constraint names']
# options
default['prefix'] = option.prefix
default['camera solver'] = option.cameraSolver
default['follow track'] = option.followTrack
default['object solver'] = option.objectSolver
default['copy location'] = option.copyLocation
default['copy rotation'] = option.copyRotation
default['copy scale'] = option.copyScale
default['copy transforms'] = option.copyTransforms
default['limit distance'] = option.limitDistance
default['limit location'] = option.limitLocation
default['limit rotation'] = option.limitRotation
default['limit scale'] = option.limitScale
default['maintain volume'] = option.maintainVolume
default['transform'] = option.transform
default['clamp to'] = option.clampTo
default['damped track'] = option.dampedTrack
default['inverse kinematics'] = option.inverseKinematics
default['locked track'] = option.lockedTrack
default['spline inverse kinematics'] = option.splineInverseKinematics
default['stretch to'] = option.stretchTo
default['track to'] = option.trackTo
default['action'] = option.action
default['child of'] = option.childOf
default['floor'] = option.floor
default['follow path'] = option.followPath
default['pivot'] = option.pivot
default['rigid body joint'] = option.rigidBodyJoint
default['shrinkwrap'] = option.shrinkwrap
# option
option = context.scene.ModifierNames
# default
default = defaults['auto name']['modifier names']
# options
default['prefix'] = option.prefix
default['data transfer'] = option.dataTransfer
default['mesh cache'] = option.meshCache
default['normal edit'] = option.normalEdit
default['uv project'] = option.uvProject
default['uv warp'] = option.uvWarp
default['vertex weight edit'] = option.vertexWeightEdit
default['vertex weight mix'] = option.vertexWeightMix
default['vertex weight proximity'] = option.vertexWeightProximity
default['array'] = option.array
default['bevel'] = option.bevel
default['boolean'] = option.boolean
default['build'] = option.build
default['decimate'] = option.decimate
default['edge split'] = option.edgeSplit
default['mask'] = option.mask
default['mirror'] = option.mirror
default['multiresolution'] = option.multiresolution
default['remesh'] = option.remesh
default['screw'] = option.screw
default['skin'] = option.skin
default['solidify'] = option.solidify
default['subdivision surface'] = option.subdivisionSurface
default['triangulate'] = option.triangulate
default['wireframe'] = option.wireframe
default['armature'] = option.armature
default['cast'] = option.cast
default['corrective smooth'] = option.correctiveSmooth
default['curve'] = option.curve
default['displace'] = option.displace
default['hook'] = option.hook
default['laplacian smooth'] = option.laplacianSmooth
default['laplacian deform'] = option.laplacianDeform
default['lattice'] = option.lattice
default['mesh deform'] = option.meshDeform
default['shrinkwrap'] = option.shrinkwrap
default['simple deform'] = option.simpleDeform
default['smooth'] = option.smooth
default['warp'] = option.warp
default['wave'] = option.wave
default['cloth'] = option.cloth
default['collision'] = option.collision
default['dynamic paint'] = option.dynamicPaint
default['explode'] = option.explode
default['fluid simulation'] = option.fluidSimulation
default['ocean'] = option.ocean
default['particle instance'] = option.particleInstance
default['particle system'] = option.particleSystem
default['smoke'] = option.smoke
default['soft body'] = option.softBody
# option
option = context.scene.ObjectDataNames
# default
default = defaults['auto name']['object data names']
# options
default['prefix'] = option.prefix
default['mesh'] = option.mesh
default['curve'] = option.curve
default['surface'] = option.surface
default['meta'] = option.meta
default['font'] = option.font
default['armature'] = option.armature
default['lattice'] = option.lattice
default['speaker'] = option.speaker
default['camera'] = option.camera
default['lamp'] = option.lamp
# batch
def batch(context):
# option
option = context.window_manager.BatchName
# default
default = defaults['batch name']
# options
default['mode'] = option.mode
default['actions'] = option.actions
default['action groups'] = option.actionGroups
default['grease pencil'] = option.greasePencil
default['pencil layers'] = option.pencilLayers
default['objects'] = option.objects
default['groups'] = option.groups
default['constraints'] = option.constraints
default['modifiers'] = option.modifiers
default['object data'] = option.objectData
default['bone groups'] = option.boneGroups
default['bones'] = option.bones
default['bone constraints'] = option.boneConstraints
default['vertex groups'] = option.vertexGroups
default['shapekeys'] = option.shapekeys
default['uvs'] = option.uvs
default['vertex colors'] = option.vertexColors
default['materials'] = option.materials
default['textures'] = option.textures
default['particle systems'] = option.particleSystems
default['particle settings'] = option.particleSettings
default['object type'] = option.objectType
default['constraint type'] = option.constraintType
default['modifier type'] = option.modifierType
default['sensors'] = option.sensors
default['controllers'] = option.controllers
default['actuators'] = option.actuators
default['line sets'] = option.lineSets
default['linestyles'] = option.linestyles
default['linestyle modifiers'] = option.linestyleModifiers
default['linestyle modifier type'] = option.linestyleModifierType
default['scenes'] = option.scenes
default['render layers'] = option.renderLayers
default['worlds'] = option.worlds
default['libraries'] = option.libraries
default['images'] = option.images
default['masks'] = option.masks
default['sequences'] = option.sequences
default['movie clips'] = option.movieClips
default['sounds'] = option.sounds
default['screens'] = option.screens
default['keying sets'] = option.keyingSets
default['palettes'] = option.palettes
default['brushes'] = option.brushes
default['nodes'] = option.nodes
default['node labels'] = option.nodeLabels
default['frame nodes'] = option.frameNodes
default['node groups'] = option.nodeGroups
default['texts'] = option.texts
default['ignore action'] = option.ignoreAction
default['ignore grease pencil'] = option.ignoreGreasePencil
default['ignore object'] = option.ignoreObject
default['ignore group'] = option.ignoreGroup
default['ignore constraint'] = option.ignoreConstraint
default['ignore modifier'] = option.ignoreModifier
default['ignore bone'] = option.ignoreBone
default['ignore bone group'] = option.ignoreBoneGroup
default['ignore bone constraint'] = option.ignoreBoneConstraint
default['ignore object data'] = option.ignoreObjectData
default['ignore vertex group'] = option.ignoreVertexGroup
default['ignore shapekey'] = option.ignoreShapekey
default['ignore uv'] = option.ignoreUV
default['ignore vertex color'] = option.ignoreVertexColor
default['ignore material'] = option.ignoreMaterial
default['ignore texture'] = option.ignoreTexture
default['ignore particle system'] = option.ignoreParticleSystem
default['ignore particle setting'] = option.ignoreParticleSetting
default['custom'] = option.custom
default['find'] = option.find
default['regex'] = option.regex
default['replace'] = option.replace
default['prefix'] = option.prefix
default['suffix'] = option.suffix
default['suffix last'] = option.suffixLast
default['trim start'] = option.trimStart
default['trim end'] = option.trimEnd
default['cut start'] = option.cutStart
default['cut amount'] = option.cutAmount
# copy
def copy(context):
# option
option = context.window_manager.CopyName
# default
default = defaults['copy name']
# options
default['mode'] = option.mode
default['source'] = option.source
default['objects'] = option.objects
default['object data'] = option.objectData
default['materials'] = option.materials
default['textures'] = option.textures
default['particle systems'] = option.particleSystems
default['particle settings'] = option.particleSettings
default['use active object'] = option.useActiveObject
|
gpl-3.0
|
hjanime/VisTrails
|
contrib/TetGenBridge/__init__.py
|
6
|
3057
|
############################################################################
##
## Copyright (C) 2006-2007 University of Utah. All rights reserved.
##
## This file is part of VisTrails.
##
## This file may be used under the terms of the GNU General Public
## License version 2.0 as published by the Free Software Foundation
## and appearing in the file LICENSE.GPL included in the packaging of
## this file. Please review the following to ensure GNU General Public
## Licensing requirements will be met:
## http://www.opensource.org/licenses/gpl-license.php
##
## If you are unsure which license is appropriate for your use (for
## instance, you are interested in developing a commercial derivative
## of VisTrails), please contact us at [email protected].
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
############################################################################
"""The TetGenBridge package wraps the TetGen library, exposing the tetrahedralize function as well as the input/output classes."""
import core.modules
import core.modules.module_registry
from core.modules.vistrails_module import Module, ModuleError
import TetGen
import CreateTestSurf
identifier = 'edu.utah.sci.vistrails.tetget'
version = '0.9.0'
name = 'tetgen'
# The port that wraps a tetgenio class.
class tetgenio_wrapper(Module):
def __init__(self) :
self.data = TetGen.tetgenio()
class tetrahedralize(Module):
"""tetrahedralize corresponds to the TetGen function of the same name"""
def __init__(self):
Module.__init__(self)
def compute(self):
switches = None
if self.has_input("switches") :
switches = self.get_input("switches")
print switches
if self.has_input("tgio in") :
tgio_in = self.get_input("tgio in").data
print "input has %d nodes!" % tgio_in.numberofpoints
tgio_in.save_nodes("/tmp/tgIN.vt")
tgio_in.save_poly("/tmp/tgIN.vt")
out = tetgenio_wrapper()
TetGen.tetrahedralize(switches, tgio_in, out.data)
print "Done making tets"
self.set_output("tgio out", out)
def initialize(*args, **keywords):
reg = core.modules.module_registry
#calls the lib with function of the same name.
reg.add_module(tetrahedralize)
# command line switches that tell tetrahedralize what to do.
reg.add_input_port(tetrahedralize, "switches",
(core.modules.basic_modules.String, 'tetgen options'))
# input mesh information.
reg.add_input_port(tetrahedralize, "tgio in",
(tetgenio_wrapper, 'input data'))
# output mesh information.
reg.add_output_port(tetrahedralize, "tgio out",
(tetgenio_wrapper, 'output data'))
#holds the tetgenio class, and acts as a port.
reg.add_module(tetgenio_wrapper)
# get support and testing modules registered.
CreateTestSurf.initialize(reg)
|
bsd-3-clause
|
oldmantaiter/disco
|
tests/test_pipe.py
|
11
|
1829
|
from disco.test import TestCase, TestPipe
from disco.compat import bytes_to_str
from disco.worker.pipeline.worker import Stage
def reduce_init(interface, params):
return []
def reduce_done(interface, state):
out = interface.output(0)
for k, v in sorted(state):
out.add(k, v)
def map(interface, state, label, inp):
out = interface.output(0)
for e in inp:
out.add(int(e), (bytes_to_str(e)).strip())
def intermediate(interface, state, label, inp):
out = interface.output(0)
for k, v in sorted(inp):
out.add(k, v)
def reduce(interface, state, label, inp):
for k, v in sorted(inp):
state.append((k, v))
def getPipeline(count, type):
intermediates = [(type, Stage("inter_%d" % i, process=intermediate)) for i in range(count)]
pipeline = [("split", Stage("map", process=map))] + intermediates + [("group_label", Stage("reduce",
init=reduce_init,
process=reduce, done=reduce_done))]
return pipeline
class PipePerNodeJob(TestPipe):
pipeline = getPipeline(10, "group_node_label")
class PipeGlobalJob(TestPipe):
pipeline = getPipeline(10, "group_label")
class SimpleTestCase(TestCase):
input = range(1000)
def answers(self):
return ((i, str(i)) for i in self.input for x in range(10000))
def serve(self, path):
return '\n'.join([path] * 10000)
def test_per_node(self):
self.job = PipePerNodeJob().run(input=self.test_server.urls(self.input))
self.assertResults(self.job, self.answers())
def test_global(self):
self.job = PipeGlobalJob().run(input=self.test_server.urls(self.input))
self.assertResults(self.job, self.answers())
|
bsd-3-clause
|
marckuz/django
|
tests/staticfiles_tests/test_views.py
|
279
|
1312
|
from __future__ import unicode_literals
import posixpath
from django.conf import settings
from django.test import override_settings
from .cases import StaticFilesTestCase, TestDefaults
@override_settings(ROOT_URLCONF='staticfiles_tests.urls.default')
class TestServeStatic(StaticFilesTestCase):
"""
Test static asset serving view.
"""
def _response(self, filepath):
return self.client.get(
posixpath.join(settings.STATIC_URL, filepath))
def assertFileContains(self, filepath, text):
self.assertContains(self._response(filepath), text)
def assertFileNotFound(self, filepath):
self.assertEqual(self._response(filepath).status_code, 404)
@override_settings(DEBUG=False)
class TestServeDisabled(TestServeStatic):
"""
Test serving static files disabled when DEBUG is False.
"""
def test_disabled_serving(self):
self.assertFileNotFound('test.txt')
class TestServeStaticWithDefaultURL(TestServeStatic, TestDefaults):
"""
Test static asset serving view with manually configured URLconf.
"""
@override_settings(ROOT_URLCONF='staticfiles_tests.urls.helper')
class TestServeStaticWithURLHelper(TestServeStatic, TestDefaults):
"""
Test static asset serving view with staticfiles_urlpatterns helper.
"""
|
bsd-3-clause
|
int19h/PTVS
|
Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/cryptography/hazmat/primitives/asymmetric/x448.py
|
13
|
2249
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import abc
import six
from cryptography.exceptions import UnsupportedAlgorithm, _Reasons
@six.add_metaclass(abc.ABCMeta)
class X448PublicKey(object):
@classmethod
def from_public_bytes(cls, data):
from cryptography.hazmat.backends.openssl.backend import backend
if not backend.x448_supported():
raise UnsupportedAlgorithm(
"X448 is not supported by this version of OpenSSL.",
_Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM
)
return backend.x448_load_public_bytes(data)
@abc.abstractmethod
def public_bytes(self, encoding, format):
"""
The serialized bytes of the public key.
"""
@six.add_metaclass(abc.ABCMeta)
class X448PrivateKey(object):
@classmethod
def generate(cls):
from cryptography.hazmat.backends.openssl.backend import backend
if not backend.x448_supported():
raise UnsupportedAlgorithm(
"X448 is not supported by this version of OpenSSL.",
_Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM
)
return backend.x448_generate_key()
@classmethod
def from_private_bytes(cls, data):
from cryptography.hazmat.backends.openssl.backend import backend
if not backend.x448_supported():
raise UnsupportedAlgorithm(
"X448 is not supported by this version of OpenSSL.",
_Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM
)
return backend.x448_load_private_bytes(data)
@abc.abstractmethod
def public_key(self):
"""
The serialized bytes of the public key.
"""
@abc.abstractmethod
def private_bytes(self, encoding, format, encryption_algorithm):
"""
The serialized bytes of the private key.
"""
@abc.abstractmethod
def exchange(self, peer_public_key):
"""
Performs a key exchange operation using the provided peer's public key.
"""
|
apache-2.0
|
bhgv/bCNC
|
Pendant.py
|
1
|
6650
|
# -*- coding: ascii -*-
# $Id$
#
# Author: [email protected]
# Date: 24-Aug-2014
__author__ = "Vasilis Vlachoudis"
__email__ = "[email protected]"
import os
import re
import sys
#import cgi
import json
import urllib
import threading
from CNC import CNC
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
try:
import BaseHTTPServer as HTTPServer
except ImportError:
import http.server as HTTPServer
import Camera
HOSTNAME = "localhost"
port = 8080
httpd = None
prgpath = os.path.abspath(os.path.dirname(sys.argv[0]))
webpath = "%s/pendant"%(prgpath)
iconpath = "%s/icons/"%(prgpath)
#==============================================================================
# Simple Pendant controller for CNC
#==============================================================================
class Pendant(HTTPServer.BaseHTTPRequestHandler):
camera = None
#----------------------------------------------------------------------
def log_message(self, fmt, *args):
# Only requests to the main page log them, all other ignore
if args[0].startswith("GET / "):
HTTPServer.BaseHTTPRequestHandler.log_message(self, fmt, *args)
#----------------------------------------------------------------------
def do_HEAD(self, rc=200, content="text/html"):
self.send_response(rc)
self.send_header("Content-type", content)
self.end_headers()
#----------------------------------------------------------------------
def do_GET(self):
"""Respond to a GET request."""
if "?" in self.path:
page,arg = self.path.split("?",1)
arg = dict(urlparse.parse_qsl(arg))
else:
page = self.path
arg = None
# print self.path,type(self.path)
# print page
# print arg
if page == "/send":
if arg is None: return
for key,value in arg.items():
if key=="gcode":
for line in value.split('\n'):
httpd.app.queue.put(line+"\n")
elif key=="cmd":
httpd.app.pendant.put(urllib.unquote(value))
#send empty response so browser does not generate errors
self.do_HEAD(200, "text/text")
self.wfile.write("")
elif page == "/state":
self.do_HEAD(200, content="text/text")
tmp = {}
for name in ["state", "color", "msg", "wx", "wy", "wz", "G"]:
tmp[name] = CNC.vars[name]
self.wfile.write(json.dumps(tmp))
elif page == "/config":
self.do_HEAD(200, content="text/text")
snd = {}
snd["rpmmax"] = httpd.app.get("CNC","spindlemax")
self.wfile.write(json.dumps(snd))
elif page == "/icon":
if arg is None: return
self.do_HEAD(200, content="image/gif")
filename = os.path.join(iconpath, arg["name"]+".gif")
try:
f = open(filename,"rb")
self.wfile.write(f.read())
f.close()
except:
pass
elif page == "/camera":
if not Camera.hasOpenCV(): return
if Pendant.camera is None:
Pendant.camera = Camera.Camera("webcam")
Pendant.camera.start()
s,img = Pendant.camera.read()
if s:
Pendant.camera.save("camera.jpg")
#cv.imwrite("camera.jpg",img)
self.do_HEAD(200, content="image/jpeg")
try:
f = open("camera.jpg","rb")
self.wfile.write(f.read())
f.close()
except:
pass
else:
self.mainPage(page[1:])
#----------------------------------------------------------------------
def deal_post_data(self):
boundary = self.headers.plisttext.split("=")[1]
remainbytes = int(self.headers['content-length'])
line = self.rfile.readline()
remainbytes -= len(line)
if not boundary in line:
return (False, "Content NOT begin with boundary")
line = self.rfile.readline()
remainbytes -= len(line)
fn = re.findall(r'Content-Disposition.*name="file"; filename="(.*)"', line)
if not fn:
return (False, "Can't find out file name...")
path = os.path.expanduser("~")
path = os.path.join(path, "bCNCUploads")
if not os.path.exists(path):
os.makedirs(path)
fn = os.path.join(path, fn[0])
line = self.rfile.readline()
remainbytes -= len(line)
line = self.rfile.readline()
remainbytes -= len(line)
try:
out = open(fn, 'wb')
except IOError:
return (False, "Can't create file to write, do you have permission to write?")
preline = self.rfile.readline()
remainbytes -= len(preline)
while remainbytes > 0:
line = self.rfile.readline()
remainbytes -= len(line)
if boundary in line:
preline = preline[0:-1]
if preline.endswith('\r'):
preline = preline[0:-1]
out.write(preline)
out.close()
return (True, "%s" % fn)
else:
out.write(preline)
preline = line
return (False, "Unexpected Ends of data.")
#----------------------------------------------------------------------
def do_POST(self):
result,fMsg=self.deal_post_data()
if(result):
httpd.app._pendantFileUploaded=fMsg
#send empty response so browser does not generate errors
self.do_HEAD(200, "text/text")
# ---------------------------------------------------------------------
def mainPage(self, page):
global webpath
#handle certain filetypes
filetype = page.rpartition('.')[2]
if filetype == "css": self.do_HEAD(content="text/css")
elif filetype == "js": self.do_HEAD(content="application/x-javascript")
elif filetype == "json": self.do_HEAD(content="application/json")
elif filetype == "jpg" or filetype == "jpeg" : self.do_HEAD(content="image/jpeg")
elif filetype == "gif": self.do_HEAD(content="image/gif")
elif filetype == "png": self.do_HEAD(content="image/png")
elif filetype == "ico": self.do_HEAD(content="image/x-icon")
else: self.do_HEAD()
if page == "": page = "index.html"
try:
f = open(os.path.join(webpath,page),"r")
self.wfile.write(f.read())
f.close()
except IOError:
self.wfile.write("""<!DOCTYPE html>
<html>
<head>
<title>Errortitle</title>
<meta name="viewport" content="width=device-width,initial-scale=1, user-scalable=yes" />
</head>
<body>
Page not found.
</body>
</html>
""")
# -----------------------------------------------------------------------------
def _server(app):
global httpd
server_class = HTTPServer.HTTPServer
try:
httpd = server_class(('', port), Pendant)
httpd.app = app
httpd.serve_forever()
except:
httpd = None
# -----------------------------------------------------------------------------
def start(app):
global httpd
if httpd is not None: return False
thread = threading.Thread(target=_server, args=(app,))
thread.start()
return True
# -----------------------------------------------------------------------------
def stop():
global httpd
if httpd is None: return False
httpd.shutdown()
httpd = None
if Pendant.camera: Pendant.camera.stop()
return True
if __name__ == '__main__':
start()
|
gpl-2.0
|
lucasmoura/personfinder
|
tools/babel/core.py
|
54
|
26152
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
"""Core locale representation and locale data access."""
import os
import pickle
from babel import localedata
__all__ = ['UnknownLocaleError', 'Locale', 'default_locale', 'negotiate_locale',
'parse_locale']
__docformat__ = 'restructuredtext en'
_global_data = None
def get_global(key):
"""Return the dictionary for the given key in the global data.
The global data is stored in the ``babel/global.dat`` file and contains
information independent of individual locales.
>>> get_global('zone_aliases')['UTC']
'Etc/GMT'
>>> get_global('zone_territories')['Europe/Berlin']
'DE'
:param key: the data key
:return: the dictionary found in the global data under the given key
:rtype: `dict`
:since: version 0.9
"""
global _global_data
if _global_data is None:
dirname = os.path.join(os.path.dirname(__file__))
filename = os.path.join(dirname, 'global.dat')
fileobj = open(filename, 'rb')
try:
_global_data = pickle.load(fileobj)
finally:
fileobj.close()
return _global_data.get(key, {})
LOCALE_ALIASES = {
'ar': 'ar_SY', 'bg': 'bg_BG', 'bs': 'bs_BA', 'ca': 'ca_ES', 'cs': 'cs_CZ',
'da': 'da_DK', 'de': 'de_DE', 'el': 'el_GR', 'en': 'en_US', 'es': 'es_ES',
'et': 'et_EE', 'fa': 'fa_IR', 'fi': 'fi_FI', 'fr': 'fr_FR', 'gl': 'gl_ES',
'he': 'he_IL', 'hu': 'hu_HU', 'id': 'id_ID', 'is': 'is_IS', 'it': 'it_IT',
'ja': 'ja_JP', 'km': 'km_KH', 'ko': 'ko_KR', 'lt': 'lt_LT', 'lv': 'lv_LV',
'mk': 'mk_MK', 'nl': 'nl_NL', 'nn': 'nn_NO', 'no': 'nb_NO', 'pl': 'pl_PL',
'pt': 'pt_PT', 'ro': 'ro_RO', 'ru': 'ru_RU', 'sk': 'sk_SK', 'sl': 'sl_SI',
'sv': 'sv_SE', 'th': 'th_TH', 'tr': 'tr_TR', 'uk': 'uk_UA'
}
class UnknownLocaleError(Exception):
"""Exception thrown when a locale is requested for which no locale data
is available.
"""
def __init__(self, identifier):
"""Create the exception.
:param identifier: the identifier string of the unsupported locale
"""
Exception.__init__(self, 'unknown locale %r' % identifier)
self.identifier = identifier
class Locale(object):
"""Representation of a specific locale.
>>> locale = Locale('en', 'US')
>>> repr(locale)
'<Locale "en_US">'
>>> locale.display_name
u'English (United States)'
A `Locale` object can also be instantiated from a raw locale string:
>>> locale = Locale.parse('en-US', sep='-')
>>> repr(locale)
'<Locale "en_US">'
`Locale` objects provide access to a collection of locale data, such as
territory and language names, number and date format patterns, and more:
>>> locale.number_symbols['decimal']
u'.'
If a locale is requested for which no locale data is available, an
`UnknownLocaleError` is raised:
>>> Locale.parse('en_DE')
Traceback (most recent call last):
...
UnknownLocaleError: unknown locale 'en_DE'
:see: `IETF RFC 3066 <http://www.ietf.org/rfc/rfc3066.txt>`_
"""
def __init__(self, language, territory=None, script=None, variant=None):
"""Initialize the locale object from the given identifier components.
>>> locale = Locale('en', 'US')
>>> locale.language
'en'
>>> locale.territory
'US'
:param language: the language code
:param territory: the territory (country or region) code
:param script: the script code
:param variant: the variant code
:raise `UnknownLocaleError`: if no locale data is available for the
requested locale
"""
self.language = language
self.territory = territory
self.script = script
self.variant = variant
self.__data = None
identifier = str(self)
if not localedata.exists(identifier):
raise UnknownLocaleError(identifier)
def default(cls, category=None, aliases=LOCALE_ALIASES):
"""Return the system default locale for the specified category.
>>> for name in ['LANGUAGE', 'LC_ALL', 'LC_CTYPE']:
... os.environ[name] = ''
>>> os.environ['LANG'] = 'fr_FR.UTF-8'
>>> Locale.default('LC_MESSAGES')
<Locale "fr_FR">
:param category: one of the ``LC_XXX`` environment variable names
:param aliases: a dictionary of aliases for locale identifiers
:return: the value of the variable, or any of the fallbacks
(``LANGUAGE``, ``LC_ALL``, ``LC_CTYPE``, and ``LANG``)
:rtype: `Locale`
:see: `default_locale`
"""
return cls(default_locale(category, aliases=aliases))
default = classmethod(default)
def negotiate(cls, preferred, available, sep='_', aliases=LOCALE_ALIASES):
"""Find the best match between available and requested locale strings.
>>> Locale.negotiate(['de_DE', 'en_US'], ['de_DE', 'de_AT'])
<Locale "de_DE">
>>> Locale.negotiate(['de_DE', 'en_US'], ['en', 'de'])
<Locale "de">
>>> Locale.negotiate(['de_DE', 'de'], ['en_US'])
You can specify the character used in the locale identifiers to separate
the differnet components. This separator is applied to both lists. Also,
case is ignored in the comparison:
>>> Locale.negotiate(['de-DE', 'de'], ['en-us', 'de-de'], sep='-')
<Locale "de_DE">
:param preferred: the list of locale identifers preferred by the user
:param available: the list of locale identifiers available
:param aliases: a dictionary of aliases for locale identifiers
:return: the `Locale` object for the best match, or `None` if no match
was found
:rtype: `Locale`
:see: `negotiate_locale`
"""
identifier = negotiate_locale(preferred, available, sep=sep,
aliases=aliases)
if identifier:
return Locale.parse(identifier, sep=sep)
negotiate = classmethod(negotiate)
def parse(cls, identifier, sep='_'):
"""Create a `Locale` instance for the given locale identifier.
>>> l = Locale.parse('de-DE', sep='-')
>>> l.display_name
u'Deutsch (Deutschland)'
If the `identifier` parameter is not a string, but actually a `Locale`
object, that object is returned:
>>> Locale.parse(l)
<Locale "de_DE">
:param identifier: the locale identifier string
:param sep: optional component separator
:return: a corresponding `Locale` instance
:rtype: `Locale`
:raise `ValueError`: if the string does not appear to be a valid locale
identifier
:raise `UnknownLocaleError`: if no locale data is available for the
requested locale
:see: `parse_locale`
"""
if isinstance(identifier, basestring):
return cls(*parse_locale(identifier, sep=sep))
return identifier
parse = classmethod(parse)
def __eq__(self, other):
return str(self) == str(other)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return '<Locale "%s">' % str(self)
def __str__(self):
return '_'.join(filter(None, [self.language, self.script,
self.territory, self.variant]))
def _data(self):
if self.__data is None:
self.__data = localedata.LocaleDataDict(localedata.load(str(self)))
return self.__data
_data = property(_data)
def get_display_name(self, locale=None):
"""Return the display name of the locale using the given locale.
The display name will include the language, territory, script, and
variant, if those are specified.
>>> Locale('zh', 'CN', script='Hans').get_display_name('en')
u'Chinese (Simplified Han, China)'
:param locale: the locale to use
:return: the display name
"""
if locale is None:
locale = self
locale = Locale.parse(locale)
retval = locale.languages.get(self.language)
if self.territory or self.script or self.variant:
details = []
if self.script:
details.append(locale.scripts.get(self.script))
if self.territory:
details.append(locale.territories.get(self.territory))
if self.variant:
details.append(locale.variants.get(self.variant))
details = filter(None, details)
if details:
retval += ' (%s)' % u', '.join(details)
return retval
display_name = property(get_display_name, doc="""\
The localized display name of the locale.
>>> Locale('en').display_name
u'English'
>>> Locale('en', 'US').display_name
u'English (United States)'
>>> Locale('sv').display_name
u'svenska'
:type: `unicode`
""")
def english_name(self):
return self.get_display_name(Locale('en'))
english_name = property(english_name, doc="""\
The english display name of the locale.
>>> Locale('de').english_name
u'German'
>>> Locale('de', 'DE').english_name
u'German (Germany)'
:type: `unicode`
""")
#{ General Locale Display Names
def languages(self):
return self._data['languages']
languages = property(languages, doc="""\
Mapping of language codes to translated language names.
>>> Locale('de', 'DE').languages['ja']
u'Japanisch'
:type: `dict`
:see: `ISO 639 <http://www.loc.gov/standards/iso639-2/>`_
""")
def scripts(self):
return self._data['scripts']
scripts = property(scripts, doc="""\
Mapping of script codes to translated script names.
>>> Locale('en', 'US').scripts['Hira']
u'Hiragana'
:type: `dict`
:see: `ISO 15924 <http://www.evertype.com/standards/iso15924/>`_
""")
def territories(self):
return self._data['territories']
territories = property(territories, doc="""\
Mapping of script codes to translated script names.
>>> Locale('es', 'CO').territories['DE']
u'Alemania'
:type: `dict`
:see: `ISO 3166 <http://www.iso.org/iso/en/prods-services/iso3166ma/>`_
""")
def variants(self):
return self._data['variants']
variants = property(variants, doc="""\
Mapping of script codes to translated script names.
>>> Locale('de', 'DE').variants['1901']
u'Alte deutsche Rechtschreibung'
:type: `dict`
""")
#{ Number Formatting
def currencies(self):
return self._data['currency_names']
currencies = property(currencies, doc="""\
Mapping of currency codes to translated currency names.
>>> Locale('en').currencies['COP']
u'Colombian Peso'
>>> Locale('de', 'DE').currencies['COP']
u'Kolumbianischer Peso'
:type: `dict`
""")
def currency_symbols(self):
return self._data['currency_symbols']
currency_symbols = property(currency_symbols, doc="""\
Mapping of currency codes to symbols.
>>> Locale('en', 'US').currency_symbols['USD']
u'$'
>>> Locale('es', 'CO').currency_symbols['USD']
u'US$'
:type: `dict`
""")
def number_symbols(self):
return self._data['number_symbols']
number_symbols = property(number_symbols, doc="""\
Symbols used in number formatting.
>>> Locale('fr', 'FR').number_symbols['decimal']
u','
:type: `dict`
""")
def decimal_formats(self):
return self._data['decimal_formats']
decimal_formats = property(decimal_formats, doc="""\
Locale patterns for decimal number formatting.
>>> Locale('en', 'US').decimal_formats[None]
<NumberPattern u'#,##0.###'>
:type: `dict`
""")
def currency_formats(self):
return self._data['currency_formats']
currency_formats = property(currency_formats, doc=r"""\
Locale patterns for currency number formatting.
>>> print Locale('en', 'US').currency_formats[None]
<NumberPattern u'\xa4#,##0.00'>
:type: `dict`
""")
def percent_formats(self):
return self._data['percent_formats']
percent_formats = property(percent_formats, doc="""\
Locale patterns for percent number formatting.
>>> Locale('en', 'US').percent_formats[None]
<NumberPattern u'#,##0%'>
:type: `dict`
""")
def scientific_formats(self):
return self._data['scientific_formats']
scientific_formats = property(scientific_formats, doc="""\
Locale patterns for scientific number formatting.
>>> Locale('en', 'US').scientific_formats[None]
<NumberPattern u'#E0'>
:type: `dict`
""")
#{ Calendar Information and Date Formatting
def periods(self):
return self._data['periods']
periods = property(periods, doc="""\
Locale display names for day periods (AM/PM).
>>> Locale('en', 'US').periods['am']
u'AM'
:type: `dict`
""")
def days(self):
return self._data['days']
days = property(days, doc="""\
Locale display names for weekdays.
>>> Locale('de', 'DE').days['format']['wide'][3]
u'Donnerstag'
:type: `dict`
""")
def months(self):
return self._data['months']
months = property(months, doc="""\
Locale display names for months.
>>> Locale('de', 'DE').months['format']['wide'][10]
u'Oktober'
:type: `dict`
""")
def quarters(self):
return self._data['quarters']
quarters = property(quarters, doc="""\
Locale display names for quarters.
>>> Locale('de', 'DE').quarters['format']['wide'][1]
u'1. Quartal'
:type: `dict`
""")
def eras(self):
return self._data['eras']
eras = property(eras, doc="""\
Locale display names for eras.
>>> Locale('en', 'US').eras['wide'][1]
u'Anno Domini'
>>> Locale('en', 'US').eras['abbreviated'][0]
u'BC'
:type: `dict`
""")
def time_zones(self):
return self._data['time_zones']
time_zones = property(time_zones, doc="""\
Locale display names for time zones.
>>> Locale('en', 'US').time_zones['Europe/London']['long']['daylight']
u'British Summer Time'
>>> Locale('en', 'US').time_zones['America/St_Johns']['city']
u"St. John's"
:type: `dict`
""")
def meta_zones(self):
return self._data['meta_zones']
meta_zones = property(meta_zones, doc="""\
Locale display names for meta time zones.
Meta time zones are basically groups of different Olson time zones that
have the same GMT offset and daylight savings time.
>>> Locale('en', 'US').meta_zones['Europe_Central']['long']['daylight']
u'Central European Summer Time'
:type: `dict`
:since: version 0.9
""")
def zone_formats(self):
return self._data['zone_formats']
zone_formats = property(zone_formats, doc=r"""\
Patterns related to the formatting of time zones.
>>> Locale('en', 'US').zone_formats['fallback']
u'%(1)s (%(0)s)'
>>> Locale('pt', 'BR').zone_formats['region']
u'Hor\xe1rio %s'
:type: `dict`
:since: version 0.9
""")
def first_week_day(self):
return self._data['week_data']['first_day']
first_week_day = property(first_week_day, doc="""\
The first day of a week, with 0 being Monday.
>>> Locale('de', 'DE').first_week_day
0
>>> Locale('en', 'US').first_week_day
6
:type: `int`
""")
def weekend_start(self):
return self._data['week_data']['weekend_start']
weekend_start = property(weekend_start, doc="""\
The day the weekend starts, with 0 being Monday.
>>> Locale('de', 'DE').weekend_start
5
:type: `int`
""")
def weekend_end(self):
return self._data['week_data']['weekend_end']
weekend_end = property(weekend_end, doc="""\
The day the weekend ends, with 0 being Monday.
>>> Locale('de', 'DE').weekend_end
6
:type: `int`
""")
def min_week_days(self):
return self._data['week_data']['min_days']
min_week_days = property(min_week_days, doc="""\
The minimum number of days in a week so that the week is counted as the
first week of a year or month.
>>> Locale('de', 'DE').min_week_days
4
:type: `int`
""")
def date_formats(self):
return self._data['date_formats']
date_formats = property(date_formats, doc="""\
Locale patterns for date formatting.
>>> Locale('en', 'US').date_formats['short']
<DateTimePattern u'M/d/yy'>
>>> Locale('fr', 'FR').date_formats['long']
<DateTimePattern u'd MMMM yyyy'>
:type: `dict`
""")
def time_formats(self):
return self._data['time_formats']
time_formats = property(time_formats, doc="""\
Locale patterns for time formatting.
>>> Locale('en', 'US').time_formats['short']
<DateTimePattern u'h:mm a'>
>>> Locale('fr', 'FR').time_formats['long']
<DateTimePattern u'HH:mm:ss z'>
:type: `dict`
""")
def datetime_formats(self):
return self._data['datetime_formats']
datetime_formats = property(datetime_formats, doc="""\
Locale patterns for datetime formatting.
>>> Locale('en').datetime_formats[None]
u'{1} {0}'
>>> Locale('th').datetime_formats[None]
u'{1}, {0}'
:type: `dict`
""")
def default_locale(category=None, aliases=LOCALE_ALIASES):
"""Returns the system default locale for a given category, based on
environment variables.
>>> for name in ['LANGUAGE', 'LC_ALL', 'LC_CTYPE']:
... os.environ[name] = ''
>>> os.environ['LANG'] = 'fr_FR.UTF-8'
>>> default_locale('LC_MESSAGES')
'fr_FR'
The "C" or "POSIX" pseudo-locales are treated as aliases for the
"en_US_POSIX" locale:
>>> os.environ['LC_MESSAGES'] = 'POSIX'
>>> default_locale('LC_MESSAGES')
'en_US_POSIX'
:param category: one of the ``LC_XXX`` environment variable names
:param aliases: a dictionary of aliases for locale identifiers
:return: the value of the variable, or any of the fallbacks (``LANGUAGE``,
``LC_ALL``, ``LC_CTYPE``, and ``LANG``)
:rtype: `str`
"""
varnames = (category, 'LANGUAGE', 'LC_ALL', 'LC_CTYPE', 'LANG')
for name in filter(None, varnames):
locale = os.getenv(name)
if locale:
if name == 'LANGUAGE' and ':' in locale:
# the LANGUAGE variable may contain a colon-separated list of
# language codes; we just pick the language on the list
locale = locale.split(':')[0]
if locale in ('C', 'POSIX'):
locale = 'en_US_POSIX'
elif aliases and locale in aliases:
locale = aliases[locale]
try:
return '_'.join(filter(None, parse_locale(locale)))
except ValueError:
pass
def negotiate_locale(preferred, available, sep='_', aliases=LOCALE_ALIASES):
"""Find the best match between available and requested locale strings.
>>> negotiate_locale(['de_DE', 'en_US'], ['de_DE', 'de_AT'])
'de_DE'
>>> negotiate_locale(['de_DE', 'en_US'], ['en', 'de'])
'de'
Case is ignored by the algorithm, the result uses the case of the preferred
locale identifier:
>>> negotiate_locale(['de_DE', 'en_US'], ['de_de', 'de_at'])
'de_DE'
>>> negotiate_locale(['de_DE', 'en_US'], ['de_de', 'de_at'])
'de_DE'
By default, some web browsers unfortunately do not include the territory
in the locale identifier for many locales, and some don't even allow the
user to easily add the territory. So while you may prefer using qualified
locale identifiers in your web-application, they would not normally match
the language-only locale sent by such browsers. To workaround that, this
function uses a default mapping of commonly used langauge-only locale
identifiers to identifiers including the territory:
>>> negotiate_locale(['ja', 'en_US'], ['ja_JP', 'en_US'])
'ja_JP'
Some browsers even use an incorrect or outdated language code, such as "no"
for Norwegian, where the correct locale identifier would actually be "nb_NO"
(Bokmål) or "nn_NO" (Nynorsk). The aliases are intended to take care of
such cases, too:
>>> negotiate_locale(['no', 'sv'], ['nb_NO', 'sv_SE'])
'nb_NO'
You can override this default mapping by passing a different `aliases`
dictionary to this function, or you can bypass the behavior althogher by
setting the `aliases` parameter to `None`.
:param preferred: the list of locale strings preferred by the user
:param available: the list of locale strings available
:param sep: character that separates the different parts of the locale
strings
:param aliases: a dictionary of aliases for locale identifiers
:return: the locale identifier for the best match, or `None` if no match
was found
:rtype: `str`
"""
available = [a.lower() for a in available if a]
for locale in preferred:
ll = locale.lower()
if ll in available:
return locale
if aliases:
alias = aliases.get(ll)
if alias:
alias = alias.replace('_', sep)
if alias.lower() in available:
return alias
parts = locale.split(sep)
if len(parts) > 1 and parts[0].lower() in available:
return parts[0]
return None
def parse_locale(identifier, sep='_'):
"""Parse a locale identifier into a tuple of the form::
``(language, territory, script, variant)``
>>> parse_locale('zh_CN')
('zh', 'CN', None, None)
>>> parse_locale('zh_Hans_CN')
('zh', 'CN', 'Hans', None)
The default component separator is "_", but a different separator can be
specified using the `sep` parameter:
>>> parse_locale('zh-CN', sep='-')
('zh', 'CN', None, None)
If the identifier cannot be parsed into a locale, a `ValueError` exception
is raised:
>>> parse_locale('not_a_LOCALE_String')
Traceback (most recent call last):
...
ValueError: 'not_a_LOCALE_String' is not a valid locale identifier
Encoding information and locale modifiers are removed from the identifier:
>>> parse_locale('it_IT@euro')
('it', 'IT', None, None)
>>> parse_locale('en_US.UTF-8')
('en', 'US', None, None)
>>> parse_locale('de_DE.iso885915@euro')
('de', 'DE', None, None)
:param identifier: the locale identifier string
:param sep: character that separates the different components of the locale
identifier
:return: the ``(language, territory, script, variant)`` tuple
:rtype: `tuple`
:raise `ValueError`: if the string does not appear to be a valid locale
identifier
:see: `IETF RFC 4646 <http://www.ietf.org/rfc/rfc4646.txt>`_
"""
if '.' in identifier:
# this is probably the charset/encoding, which we don't care about
identifier = identifier.split('.', 1)[0]
if '@' in identifier:
# this is a locale modifier such as @euro, which we don't care about
# either
identifier = identifier.split('@', 1)[0]
parts = identifier.split(sep)
lang = parts.pop(0).lower()
if not lang.isalpha():
raise ValueError('expected only letters, got %r' % lang)
script = territory = variant = None
if parts:
if len(parts[0]) == 4 and parts[0].isalpha():
script = parts.pop(0).title()
if parts:
if len(parts[0]) == 2 and parts[0].isalpha():
territory = parts.pop(0).upper()
elif len(parts[0]) == 3 and parts[0].isdigit():
territory = parts.pop(0)
if parts:
if len(parts[0]) == 4 and parts[0][0].isdigit() or \
len(parts[0]) >= 5 and parts[0][0].isalpha():
variant = parts.pop()
if parts:
raise ValueError('%r is not a valid locale identifier' % identifier)
return lang, territory, script, variant
|
apache-2.0
|
quarckster/cfme_tests
|
cfme/tests/infrastructure/test_vm_rest.py
|
2
|
2531
|
# -*- coding: utf-8 -*-
import fauxfactory
import pytest
from cfme import test_requirements
from cfme.rest.gen_data import a_provider as _a_provider
from cfme.rest.gen_data import vm as _vm
from cfme.utils import error
from cfme.utils.rest import assert_response, delete_resources_from_collection
from cfme.utils.wait import wait_for
pytestmark = [test_requirements.provision]
@pytest.fixture(scope="function")
def a_provider(request):
return _a_provider(request)
@pytest.fixture(scope="function")
def vm_name(request, a_provider, appliance):
return _vm(request, a_provider, appliance.rest_api)
@pytest.mark.parametrize(
'from_detail', [True, False],
ids=['from_detail', 'from_collection'])
def test_edit_vm(request, vm_name, appliance, from_detail):
"""Tests edit VMs using REST API.
Testing BZ 1428250.
Metadata:
test_flag: rest
"""
vm = appliance.rest_api.collections.vms.get(name=vm_name)
request.addfinalizer(vm.action.delete)
new_description = 'Test REST VM {}'.format(fauxfactory.gen_alphanumeric(5))
payload = {'description': new_description}
if from_detail:
edited = vm.action.edit(**payload)
assert_response(appliance)
else:
payload.update(vm._ref_repr())
edited = appliance.rest_api.collections.vms.action.edit(payload)
assert_response(appliance)
edited = edited[0]
record, __ = wait_for(
lambda: appliance.rest_api.collections.vms.find_by(
description=new_description) or False,
num_sec=100,
delay=5,
)
vm.reload()
assert vm.description == edited.description == record[0].description
@pytest.mark.tier(3)
@pytest.mark.parametrize('method', ['post', 'delete'], ids=['POST', 'DELETE'])
def test_delete_vm_from_detail(vm_name, appliance, method):
vm = appliance.rest_api.collections.vms.get(name=vm_name)
del_action = getattr(vm.action.delete, method.upper())
del_action()
assert_response(appliance)
wait_for(
lambda: not appliance.rest_api.collections.vms.find_by(name=vm_name), num_sec=300, delay=10)
with error.expected('ActiveRecord::RecordNotFound'):
del_action()
assert_response(appliance, http_status=404)
@pytest.mark.tier(3)
def test_delete_vm_from_collection(vm_name, appliance):
vm = appliance.rest_api.collections.vms.get(name=vm_name)
collection = appliance.rest_api.collections.vms
delete_resources_from_collection(collection, [vm], not_found=True, num_sec=300, delay=10)
|
gpl-2.0
|
openpolis/politikos
|
project/politikos/views.py
|
1
|
2982
|
from django.conf import settings
from django.http import HttpResponseRedirect
from django.views.generic import TemplateView
from django.contrib import messages
from geopy.geocoders import Nominatim
from pci import Mapit, Popit
import tortilla
class RepresentativesSearchView(TemplateView):
template_name = 'explorer.html'
def post(self, request, *args, **kwargs):
address = self.request.POST.get('search', None)
if address:
kwargs['search'] = address
# for some reason, Nominatim responds 403 to this requests
# geolocator = Nominatim()
# location = geolocator.geocode(address)
nominatim = tortilla.wrap('http://nominatim.openstreetmap.org')
try:
location = nominatim.search.get(
address, params={'format': 'json'}
)[0]
except IndexError:
messages.warning(request, 'Location could not be found, try another one.')
return HttpResponseRedirect('/')
kwargs['location'] = location
kwargs['areas'] = []
if location:
endpoint = settings.MAPIT_ENDPOINT
mapit = Mapit(base_endpoint=endpoint)
popit = Popit(instance=settings.POPIT_INSTANCE)
kwargs['area_representatives'] = []
for mapit_id, mapit_data in mapit.areas(point='{0},{1}'.format(location.lon, location.lat),srid='4326').get().items():
kwargs['areas'].append(
(mapit_id, mapit_data)
)
area_uri = endpoint + 'area/' + str(mapit_id)
area_representatives = popit.search.memberships.get(params={
'q': 'area.identifier:"%s"' % area_uri
})
if area_representatives['total'] > 0:
kwargs['area_representatives'].append(
(area_representatives['result'][0]['organization'], mapit_data, area_representatives['result'])
)
return super(RepresentativesSearchView, self).get(request, *args, **kwargs)
class PersonDetail(TemplateView):
template_name = 'person-detail.html'
def get(self, request, *args, **kwargs):
person_id = kwargs.pop('id')
popit = Popit(instance=settings.POPIT_INSTANCE)
return super(PersonDetail, self).get(request, person=popit.persons.get(person_id, params={'embed': 'membership.organization'})['result'], *args, **kwargs)
class InstitutionDetail(TemplateView):
template_name = 'institution-detail.html'
def get(self, request, *args, **kwargs):
institution_id = kwargs.pop('id')
popit = Popit(instance=settings.POPIT_INSTANCE)
return super(InstitutionDetail, self).get(request, institution=popit.organizations.get(institution_id, params={'embed': 'membership.person'})['result'])
|
bsd-3-clause
|
dbmi-pitt/DIKB-Micropublication
|
scripts/mp-scripts/Bio/dbdefs/pdb.py
|
1
|
1473
|
# Copyright 2002 by Jeffrey Chang. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
from Bio.config.DBRegistry import CGIDB, DBGroup
from _support import *
from Martel import *
not_header_expr = AssertNot(Str("HEADER"))
pdb_rcsb_cgi = CGIDB(
name="pdb-rcsb-cgi",
cgi="http://www.rcsb.org/pdb/cgi/export.cgi",
url="XXX PLEASE FILL THIS IN XXX",
delay=5.0,
params=[("format", "PDB"),
("compression", "None")
],
key="pdbId",
# failure cases for file not found are making retrieval freeze up
# while Martel checks for them, for some reason I can't figure
# so we go with checking to make sure results look like PDB
# failure_cases=[(has_str("File not found"), "ID does not exist")],
failure_cases=[(not_header_expr, "results do not look like PDB format")]
)
pdb_ebi_cgi = CGIDB(
name="pdb-ebi-cgi",
cgi="http://www.ebi.ac.uk/cgi-bin/dbfetch",
url="http://www.ebi.ac.uk/cgi-bin/dbfetch",
delay=5.0,
params=[("db", "PDB"),
("format", "default"), # also Fasta, bsml, agave available
("style", "raw"),
],
key="id",
failure_cases=[(not_header_expr, "results do not look like PDB format")]
)
pdb = DBGroup(
name="pdb",
behavior="serial"
)
pdb.add(pdb_rcsb_cgi)
pdb.add(pdb_ebi_cgi)
|
apache-2.0
|
cstan11/Sick-Beard
|
lib/chardet/langgreekmodel.py
|
2763
|
12628
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin7_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 90,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,248, 61, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
win1253_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 61,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,253,253, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 98.2851%
# first 1024 sequences:1.7001%
# rest sequences: 0.0359%
# negative sequences: 0.0148%
GreekLangModel = (
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,2,2,3,3,3,3,3,3,3,3,1,3,3,3,0,2,2,3,3,0,3,0,3,2,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,3,0,3,2,3,3,0,3,2,3,3,3,0,0,3,0,3,0,3,3,2,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,2,3,2,2,3,3,3,3,3,3,3,3,0,3,3,3,3,0,2,3,3,0,3,3,3,3,2,3,3,3,0,
2,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,2,1,3,3,3,3,2,3,3,2,3,3,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,2,3,3,0,
2,0,1,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,3,0,0,0,0,3,3,0,3,1,3,3,3,0,3,3,0,3,3,3,3,0,0,0,0,
2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,0,3,3,3,3,3,0,3,2,2,2,3,0,2,3,3,3,3,3,2,3,3,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,2,2,2,3,3,3,3,0,3,1,3,3,3,3,2,3,3,3,3,3,3,3,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,0,0,0,3,3,2,3,3,3,3,3,0,0,3,2,3,0,2,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,0,3,3,0,2,3,0,3,0,3,3,3,0,0,3,0,3,0,2,2,3,3,0,0,
0,0,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,2,3,3,3,3,0,3,3,3,3,3,0,3,3,2,3,2,3,3,2,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,2,3,2,3,3,3,3,3,3,0,2,3,2,3,2,2,2,3,2,3,3,2,3,0,2,2,2,3,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,2,3,3,0,0,3,0,3,0,0,0,3,2,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,0,0,0,3,3,0,3,3,3,0,0,1,2,3,0,
3,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,0,3,2,2,3,3,0,3,3,3,3,3,2,1,3,0,3,2,3,3,2,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,3,0,2,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,3,0,3,2,3,0,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,2,0,3,2,3,0,0,3,2,3,0,
2,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,1,2,2,3,3,3,3,3,3,0,2,3,0,3,0,0,0,3,3,0,3,0,2,0,0,2,3,1,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,3,0,3,3,2,3,0,3,3,3,3,3,3,0,3,3,3,0,2,3,0,0,3,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,3,3,0,3,0,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,3,3,3,0,0,3,0,2,0,0,0,3,3,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,0,3,0,2,0,3,2,0,3,2,3,2,3,0,0,3,2,3,2,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,3,3,0,0,0,3,0,2,1,0,0,3,2,2,2,0,3,0,0,2,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,2,0,3,0,3,0,3,3,0,2,1,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,3,0,3,3,3,3,3,3,0,2,3,0,3,0,0,0,2,1,0,2,2,3,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,2,3,0,0,1,3,0,2,0,0,0,0,3,0,1,0,2,0,0,1,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,1,0,3,0,0,0,3,2,0,3,2,3,3,3,0,0,3,0,3,2,2,2,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,0,2,0,2,3,3,2,2,2,2,3,0,2,0,2,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,2,0,0,0,0,0,0,2,3,0,2,0,2,3,2,0,0,3,0,3,0,3,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,2,3,3,2,2,3,0,2,0,3,0,0,0,2,0,0,0,0,1,2,0,2,0,2,0,
0,2,0,2,0,2,2,0,0,1,0,2,2,2,0,2,2,2,0,2,2,2,0,0,2,0,0,1,0,0,0,0,
0,2,0,3,3,2,0,0,0,0,0,0,1,3,0,2,0,2,2,2,0,0,2,0,3,0,0,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,3,2,0,2,2,0,2,0,2,2,0,2,0,2,2,2,0,0,0,0,0,0,2,3,0,0,0,2,
0,1,2,0,0,0,0,2,2,0,0,0,2,1,0,2,2,0,0,0,0,0,0,1,0,2,0,0,0,0,0,0,
0,0,2,1,0,2,3,2,2,3,2,3,2,0,0,3,3,3,0,0,3,2,0,0,0,1,1,0,2,0,2,2,
0,2,0,2,0,2,2,0,0,2,0,2,2,2,0,2,2,2,2,0,0,2,0,0,0,2,0,1,0,0,0,0,
0,3,0,3,3,2,2,0,3,0,0,0,2,2,0,2,2,2,1,2,0,0,1,2,2,0,0,3,0,0,0,2,
0,1,2,0,0,0,1,2,0,0,0,0,0,0,0,2,2,0,1,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,2,0,0,0,2,0,2,3,3,0,2,0,0,0,0,0,0,2,2,2,0,2,2,0,2,0,2,
0,2,2,0,0,2,2,2,2,1,0,0,2,2,0,2,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,0,3,2,3,0,0,0,3,0,0,2,2,0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,2,2,0,0,2,2,2,0,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,3,2,0,2,2,2,2,2,0,0,0,2,0,0,0,0,2,0,1,0,0,2,0,1,0,0,0,
0,2,2,2,0,2,2,0,1,2,0,2,2,2,0,2,2,2,2,1,2,2,0,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,2,0,2,2,0,0,0,0,1,2,1,0,0,2,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,3,2,3,0,0,2,0,0,0,2,2,0,2,0,0,0,1,0,0,2,0,2,0,2,2,0,0,0,0,
0,0,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,2,3,2,2,0,0,0,0,0,0,1,3,0,2,0,2,2,0,0,0,1,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,0,3,2,0,2,0,0,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,1,0,0,2,1,2,0,2,2,0,1,0,0,1,0,0,0,2,0,0,0,0,0,0,
0,3,0,2,2,2,0,0,2,0,0,0,2,0,0,0,2,3,0,2,0,0,0,0,0,0,2,2,0,0,0,2,
0,1,2,0,0,0,1,2,2,1,0,0,0,2,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,2,0,2,2,0,2,0,0,2,0,0,0,0,1,2,1,0,2,1,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,0,3,1,2,2,0,2,0,0,0,0,2,0,0,0,2,0,0,3,0,0,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,2,2,2,2,2,0,1,2,0,0,0,2,2,0,1,0,2,0,0,2,2,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,0,0,0,0,2,0,2,0,0,0,0,2,
0,1,2,0,0,0,0,2,2,1,0,1,0,1,0,2,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,2,0,0,2,2,0,0,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,
0,2,2,2,2,0,0,0,3,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,2,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,2,2,2,0,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,1,0,0,0,0,2,1,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,0,0,2,2,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,2,1,0,0,0,0,0,0,2,0,0,2,0,2,2,2,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,2,0,2,2,0,0,0,0,2,0,2,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,3,0,0,0,2,2,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,0,0,
0,2,2,2,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,
0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,0,0,2,0,0,0,0,0,1,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,2,0,0,0,
0,2,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,2,0,2,0,0,0,
0,0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,1,2,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
Latin7GreekModel = {
'charToOrderMap': Latin7_CharToOrderMap,
'precedenceMatrix': GreekLangModel,
'mTypicalPositiveRatio': 0.982851,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-7"
}
Win1253GreekModel = {
'charToOrderMap': win1253_CharToOrderMap,
'precedenceMatrix': GreekLangModel,
'mTypicalPositiveRatio': 0.982851,
'keepEnglishLetter': False,
'charsetName': "windows-1253"
}
# flake8: noqa
|
gpl-3.0
|
appurify/appurify-python
|
tests/test_client.py
|
2
|
31230
|
"""
Copyright 2013 Appurify, Inc
All rights reserved
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
To run tests:
from parent directory of tests:
python -m unittest tests.test_client
"""
import unittest
import json
import mock
import os
from appurify.client import AppurifyClient, AppurifyClientError
from appurify.constants import EXIT_CODE_APP_INSTALL_FAILED, EXIT_CODE_CLIENT_EXCEPTION, EXIT_CODE_OTHER_EXCEPTION
class TestObject(object):
pass
def mockRequestObj(response_obj, status_code=200):
r = TestObject()
r.headers = {'x-api-server-hostname': 'django-01'}
r.text = json.dumps(response_obj)
r.status_code = status_code
r.json = lambda: response_obj
return r
def mockRequestPost(url, data, files=None, verify=False, headers={'User-Agent': 'MockAgent'}):
if 'access_token/generate' in url:
return mockRequestObj({"meta": {"code": 200}, "response": {"access_token": "test_access_token", "ttl": 86400}})
if 'apps/upload' in url:
name = data.get('name', None)
return mockRequestObj({"meta": {"code": 200},
"response": {"_id_": None,
"uploaded_on": "2013-09-09T21:25:24Z",
"name": name,
"app_group_id": None,
"test_type": "test_test_type",
"size": None,
"app_group": "None",
"id": 12345,
"app_id": "test_app_id"}})
if 'tests/upload' in url:
return mockRequestObj({"meta": {"code": 200},
"response": {"uploaded_on": "2013-09-09T22:30:51Z",
"name": "use_bdd_tests.zip", "ttl": 86400,
"config": None,
"test_type": "uiautomation",
"test_id": "test_test_id",
"expired": False,
"id": 3456,
"size": 1326}})
if 'config/upload/' in url:
return mockRequestObj({"meta": {"code": 200},
"response": {"test_id": "test_test_id",
"config_id": 23456,
"conf_file": "appurify.conf"}})
if 'tests/run' in url:
return mockRequestObj({
"meta": {
"code": 200
},
"response": {
"id": 16282,
"test_type": "uiautomation",
"device_type": {
"device_type_id": 58,
"name": "5_NR",
"battery": False,
"brand": "iPhone",
"os_name": "iOS",
"os_version": "6.1.2",
"has_available_device": None,
"carrier": None,
"available_devices_count": None,
"busy_devices_count": None,
"all_devices_count": None,
"is_rooted": False,
"is_api": True,
"is_manual": False
},
"request_time": "2013-09-11T22:35:18.724Z",
"start_time": None,
"end_time": None,
"all_pass": False,
"run_id": "test_test_run_id",
"nbr_pass": None,
"nbr_fail": None,
"queued": True,
"app": 17967,
"version": {
"id": 17967,
"app": 7735,
"uploaded_on": "2013-09-11T22:35:18Z",
"description": None,
"size": 197072,
"icon_url": "/api/app/icon/?app_id=c5f361ebed16488dbf6b69be54f03e2c",
"app_id": "c5f361ebed16488dbf6b69be54f03e2c",
"app_type": "ios",
"web_app_url": None
},
"app_name": "use-bdd",
"device": "58 - iPhone 5_NR / iOS 6.1.2",
"source": 16177,
"config": {
"id": 1963,
"device": {
"id": 123,
"profiler": True,
"videocapture": True,
"import_photos": False,
"import_contacts": False,
"latitude": "37.777363",
"longitude": "-122.395894",
"packet_capture": True,
"free_memory": None,
"orientation": None,
"network": None
},
"framework": "{\"uiautomation\": {\"template\": \"Memory_Profiling_Template\"}}",
"test_timeout": 240,
"debug": False,
"keep_vm": False,
"device_types": [],
"vm_size": "small"
},
"status": "queueing",
"test_id": "test_test_id",
"app_id": "test_app_id",
"test_run_id": "test_test_run_id",
"queue_timeout_limit": 2
}
})
def mockRequestPostMulti(url, data, files=None, verify=False, headers={'User-Agent': 'MockAgent'}):
if 'tests/run' in url:
return mockRequestObj({
"meta": {
"code": 200
},
"response": {
"test_run_id": "test_test_run_id1,test_test_run_id2",
"test_id": "test_test_id",
"test_runs": [
{
"id": 16290,
"test_type": "uiautomation",
"device_type": {
"device_type_id": 58,
"name": "5_NR",
"battery": False,
"brand": "iPhone",
"os_name": "iOS",
"os_version": "6.1.2",
"has_available_device": None,
"carrier": None,
"available_devices_count": None,
"busy_devices_count": None,
"all_devices_count": None,
"is_rooted": False,
"is_api": True,
"is_manual": False
},
"request_time": "2013-09-12T22:01:48.594Z",
"start_time": None,
"end_time": None,
"all_pass": False,
"run_id": "test_test_run_id1",
"nbr_pass": None,
"nbr_fail": None,
"queued": True,
"app": 17975,
"version": {
"id": 17975,
"app": 7735,
"uploaded_on": "2013-09-12T21:59:07Z",
"description": None,
"size": 197072,
"icon_url": "/api/app/icon/?app_id=4858befdd9304984a171837c612746eb",
"app_id": "test_app_id",
"app_type": "ios",
"web_app_url": None
},
"app_name": "use-bdd",
"device": "58 - iPhone 5_NR / iOS 6.1.2",
"source": 16185,
"config": {
"id": 1971,
"device": {
"id": 123,
"profiler": True,
"videocapture": True,
"import_photos": False,
"import_contacts": False,
"latitude": "37.777363",
"longitude": "-122.395894",
"packet_capture": True,
"free_memory": None,
"orientation": None,
"network": None
},
"framework": "{\"uiautomation\": {\"template\": \"Memory_Profiling_Template\"}}",
"test_timeout": 240,
"debug": False,
"keep_vm": False,
"device_types": [],
"vm_size": "small",
"raw": "[uiautomation]\ntemplate=Memory_Profiling_Template\n\n[appurify]\nprofiler=1\npcap=1\nlatlng=37.777363,-122.395894\n\n"
},
"status": "queueing",
"test_id": "test_test_id",
"app_id": "test_app_id",
"test_run_id": "test_test_run_id1",
"device_type_id": 58,
"queue_timeout_limit": 2,
},
{
"id": 16291,
"test_type": "uiautomation",
"device_type": {
"device_type_id": 61,
"name": "5_NR",
"battery": False,
"brand": "iPhone",
"os_name": "iOS",
"os_version": "6.0.2",
"has_available_device": None,
"carrier": None,
"available_devices_count": None,
"busy_devices_count": None,
"all_devices_count": None,
"is_rooted": False,
"is_api": True,
"is_manual": False
},
"request_time": "2013-09-12T22:01:48.614Z",
"start_time": None,
"end_time": None,
"all_pass": False,
"run_id": "test_test_run_id2",
"nbr_pass": None,
"nbr_fail": None,
"queued": True,
"app": 17975,
"version": {
"id": 17975,
"app": 7735,
"uploaded_on": "2013-09-12T21:59:07Z",
"description": None,
"size": 197072,
"icon_url": "/api/app/icon/?app_id=4858befdd9304984a171837c612746eb",
"app_id": "test_app_id",
"app_type": "ios",
"web_app_url": None
},
"app_name": "use-bdd",
"device": "61 - iPhone 5_NR / iOS 6.0.2",
"source": 16185,
"config": {
"id": 1971,
"device": {
"id": 234,
"profiler": True,
"videocapture": True,
"import_photos": False,
"import_contacts": False,
"latitude": "37.777363",
"longitude": "-122.395894",
"packet_capture": True,
"free_memory": None,
"orientation": None,
"network": None
},
"framework": "{\"uiautomation\": {\"template\": \"Memory_Profiling_Template\"}}",
"test_timeout": 240,
"debug": False,
"keep_vm": False,
"device_types": [],
"vm_size": "small",
"raw": "[uiautomation]\ntemplate=Memory_Profiling_Template\n\n[appurify]\nprofiler=1\npcap=1\nlatlng=37.777363,-122.395894\n\n"
},
"status": "queueing",
"test_id": "test_test_id",
"app_id": "test_app_id",
"test_run_id": "test_test_run_id2",
"device_type_id": 61,
"queue_timeout_limit": 2,
}
],
"app_id": "test_app_id",
}
})
else:
raise Exception("Unrecognized url")
def mockRequestGet(url, params, verify=False, headers={'User-Agent': 'MockUserAgent'}):
if 'tests/check' in url:
if mockRequestGet.count <= 0:
mockRequestGet.count = mockRequestGet.count + 1
return mockRequestObj({"meta": {"code": 200},
"response": {"status": "in-progress",
"test_run_id": "test_test_run_id",
"test_config": "[uiautomation]\n\n[appurify]\nprofiler=1\npcap=1\n",
"device_type": "58 - iPhone 5_NR / iOS 6.1.2",
"device_type_id": 58}})
elif mockRequestGet.exception:
mockRequestGet.count = mockRequestGet.count + 1
return mockRequestObj({"meta": {"code": 200},
"response": {"status": "complete",
"detailed_status": "exception",
"results": {"exception": "-9999: Other exception",
"errors": None,
"url": "http://localhost/resource/tests/result/?run_id=dummy_test_run_id",
"number_passes": mockRequestGet.passes,
"number_fails": mockRequestGet.fails,
"pass": mockRequestGet.pass_val,
"output": ""},
"test_run_id": "test_test_run_id",
"device_type": "58 - iPhone 5_NR / iOS 6.1.2",
"device_type_id": 58}})
else:
mockRequestGet.count = mockRequestGet.count + 1
return mockRequestObj({"meta": {"code": 200},
"response": {"status": "complete",
"test_config": "[test_type]\nconfig",
"results": {"exception": None,
"errors": "",
"url": "http://localhost/resource/tests/result/?run_id=dummy_test_run_id",
"number_passes": mockRequestGet.passes,
"number_fails": mockRequestGet.fails,
"pass": mockRequestGet.pass_val,
"output": "test_run_output"},
"test_run_id": "test_test_run_id",
"device_type": "58 - iPhone 5_NR / iOS 6.1.2",
"device_type_id": 58}})
elif 'devices/list' in url:
return mockRequestObj({"meta": {"code": 200},
"response": [{"device_type_id": 137, "name": "5", "battery":False, "brand": "iPhone", "os_name": "iOS", "os_version": "7.0.4", "has_available_device":True, "available_devices_count": 1, "busy_devices_count": 0, "all_devices_count": 1, "is_rooted":False, "is_api":True, "is_manual":False, "device_family": "ios"},
{"device_type_id": 223, "name": "G Flex", "battery":False, "brand": "LG", "os_name": "Android", "os_version": "4.2.2", "has_available_device":True, "available_devices_count": 1, "busy_devices_count": 0, "all_devices_count": 1, "is_rooted":False, "is_api":True, "is_manual":False, "device_family": "android"}]})
mockRequestGet.count = 0
mockRequestGet.passes = 1
mockRequestGet.fails = 1
mockRequestGet.exception = 0
mockRequestGet.pass_val = False
def mockRequestGetPrimaryException(url, params, verify=False, headers={'User-Agent': 'MockUserAgent'}):
raise Exception("Mock Syntax Error")
class TestAuth(unittest.TestCase):
def setUp(self):
self.client = AppurifyClient(api_key="test_key", api_secret="test_secret")
@mock.patch("requests.post", mockRequestPost)
def testGetAccessToken(self):
client = AppurifyClient(api_key="test_key", api_secret="test_secret")
client.refreshAccessToken()
access_token = client.access_token
self.assertEqual(access_token, "test_access_token", "Should return proper access token on post")
def testGetAccessTokenPrePop(self):
client = AppurifyClient(access_token="Already_Set")
client.refreshAccessToken()
access_token = client.access_token
self.assertEqual(access_token, "Already_Set", "Should return access token when one is provided")
def testNoAuth(self):
client = AppurifyClient()
with self.assertRaises(AppurifyClientError):
""" Should error out on no auth data """
client.refreshAccessToken()
class TestUpload(unittest.TestCase):
@mock.patch("requests.post", mockRequestPost)
def testUploadAppNoSource(self):
client = AppurifyClient(access_token="authenticated", test_type='ios_webrobot')
app_id = client.uploadApp()
self.assertEqual(app_id, "test_app_id", "Should properly fetch web robot for app id")
@mock.patch("requests.post", mockRequestPost)
@mock.patch("requests.get", mockRequestGet)
def testUploadAppSource(self):
client = AppurifyClient(access_token="authenticated", app_src=__file__, app_src_type='raw', test_type='calabash', name="test_name", device_type_id="137")
app_id = client.uploadApp()
self.assertEqual(app_id, "test_app_id", "Should properly fetch web robot for app id")
@mock.patch("requests.post", mockRequestPost)
def testUploadAppNoSourceError(self):
client = AppurifyClient(access_token="authenticated", app_src_type='raw', test_type='calabash')
with self.assertRaises(AppurifyClientError):
client.uploadApp()
@mock.patch("requests.post", mockRequestPost)
def testUploadTestNoSource(self):
client = AppurifyClient(access_token="authenticated", test_type='ios_webrobot')
app_id = client.uploadTest('test_app_id')
self.assertEqual(app_id, "test_test_id", "Should properly fetch web robot for app id")
@mock.patch("requests.post", mockRequestPost)
def testUploadTest(self):
client = AppurifyClient(access_token="authenticated", test_src=__file__, test_type="uiautomation", test_src_type='raw')
test_id = client.uploadTest('test_app_id')
self.assertEqual(test_id, "test_test_id", "Should properly fetch web robot for app id")
@mock.patch("requests.post", mockRequestPost)
def testUploadTestNoSourceError(self):
client = AppurifyClient(access_token="authenticated", test_type='uiautomation')
with self.assertRaises(AppurifyClientError):
app_id = client.uploadTest('test_app_id')
@mock.patch("requests.post", mockRequestPost)
def testUploadConfig(self):
client = AppurifyClient(access_token="authenticated", test_type="ios_webrobot")
config_id = client.uploadConfig("test_id", config_src=__file__)
self.assertEqual(config_id, 23456, "Should properly fetch uploaded config id")
def testPrintConfig(self):
client = AppurifyClient(access_token="authenticated", test_type="ios_webrobot")
config = [{
"id": 1963,
"device": {
"id": 123,
"profiler": True,
"videocapture": True,
},
"framework": "{\"uiautomation\": {\"template\": \"Memory_Profiling_Template\"}}",
"test_timeout": 240,
"debug": False,
"keep_vm": False,
"device_types": [],
"vm_size": "small"
}]
client.printConfigs(config)
class TestRun(unittest.TestCase):
@mock.patch("requests.post", mockRequestPost)
def testRunTestSingle(self):
client = AppurifyClient(access_token="authenticated")
test_run_id, queue_timeout_limit, configs = client.runTest("app_id", "test_test_id")
self.assertEqual(test_run_id, "test_test_run_id", "Should get test_run_id when executing run")
self.assertEqual(len(configs), 1, "Should get config back for test run")
self.assertEqual(configs[0]['device']['id'], 123, "Sanity check parameters")
@mock.patch("requests.post", mockRequestPostMulti)
def testRunTestMulti(self):
client = AppurifyClient(access_token="authenticated")
test_run_id, queue_timeout_limit, configs = client.runTest("app_id", "test_test_id")
self.assertEqual(test_run_id, "test_test_run_id1,test_test_run_id2", "Should get test_run_ids when executing run")
self.assertEqual(len(configs), 2, "Should get config back for test run")
self.assertEqual(configs[0]['device']['id'], 123, "Sanity check parameters")
@mock.patch("requests.get", mockRequestGet)
def testPollTestResult(self):
mockRequestGet.count = 0
client = AppurifyClient(access_token="authenticated", timeout_sec=2, poll_every=0.1)
test_status_response = client.pollTestResult("test_test_run_id", 2)
self.assertEqual(test_status_response['status'], "complete", "Should poll until complete")
@mock.patch("requests.post", mockRequestPost)
@mock.patch("requests.get", mockRequestGet)
def testMainServerException(self):
mockRequestGet.count = 0
mockRequestGet.passes = 0
mockRequestGet.fails = 0
mockRequestGet.exception = 1
mockRequestGet.pass_val = False
client = AppurifyClient(api_key="test_key", api_secret="test_secret", test_type="uiautomation",
app_src=__file__, app_src_type='raw',
test_src=__file__, test_src_type='raw',
timeout_sec=2, poll_every=0.1, device_type_id="137")
result_code = client.main()
self.assertEqual(result_code, EXIT_CODE_OTHER_EXCEPTION, "Main should execute and return exception code")
@mock.patch("requests.post", mockRequestPost)
@mock.patch("requests.get", mockRequestGet)
def testMainFail(self):
mockRequestGet.count = 0
mockRequestGet.passes = 1
mockRequestGet.fails = 1
mockRequestGet.exception = 0
mockRequestGet.pass_val = False
client = AppurifyClient(api_key="test_key", api_secret="test_secret", test_type="uiautomation",
app_src=__file__, app_src_type='raw',
test_src=__file__, test_src_type='raw',
timeout_sec=2, poll_every=0.1, device_type_id="137")
result_code = client.main()
self.assertEqual(result_code, 1, "Main should execute and return fail code")
@mock.patch("requests.post", mockRequestPost)
@mock.patch("requests.get", mockRequestGet)
def testMainPass(self):
mockRequestGet.count = 0
mockRequestGet.passes = 2
mockRequestGet.fails = 0
mockRequestGet.exception = 0
mockRequestGet.pass_val = True
client = AppurifyClient(api_key="test_key", api_secret="test_secret", test_type="uiautomation",
app_src=__file__, app_src_type='raw',
test_src=__file__, test_src_type='raw',
timeout_sec=2, poll_every=0.1, device_type_id="137")
result_code = client.main()
self.assertEqual(result_code, 0, "Main should execute and return pass code")
@mock.patch("requests.post", mockRequestPost)
@mock.patch("requests.get", mockRequestGet)
def testMainPassUrl(self):
mockRequestGet.count = 0
mockRequestGet.passes = 2
mockRequestGet.fails = 0
mockRequestGet.exception = 0
mockRequestGet.pass_val = True
client = AppurifyClient(api_key="test_key", api_secret="test_secret", test_type="ios_webrobot",
app_src=None,
test_src=None,
url="www.yahoo.com",
timeout_sec=2, poll_every=0.1, device_type_id="137")
result_code = client.main()
self.assertEqual(result_code, 0, "Main should execute and return pass code")
@mock.patch("requests.get", mockRequestGet)
def testDefaultPollTimeout(self):
old_env = os.environ.get('APPURIFY_API_TIMEOUT', None)
try:
os.environ['APPURIFY_API_TIMEOUT'] = '0.2'
mockRequestGet.count = -20
client = AppurifyClient(access_token="authenticated", poll_every=0.1)
with self.assertRaises(AppurifyClientError):
client.pollTestResult("test_test_run_id", 0.2)
finally:
if old_env:
os.environ['APPURIFY_API_TIMEOUT'] = str(old_env)
@mock.patch("requests.post", mockRequestPost)
@mock.patch("requests.get", mockRequestGet)
def testDefaultPollTimeoutCode(self):
old_env = os.environ.get('APPURIFY_API_TIMEOUT', None)
try:
os.environ['APPURIFY_API_TIMEOUT'] = '0.2'
mockRequestGet.count = -20
client = AppurifyClient(api_key="test_key", api_secret="test_secret", test_type="ios_webrobot",
app_src=None,
test_src=None,
url="www.yahoo.com",
poll_every=0.1,
device_type_id="137")
result_code = client.main()
self.assertEqual(result_code, 3, "Main should execute and return error code with default timeout")
finally:
if old_env:
os.environ['APPURIFY_API_TIMEOUT'] = str(old_env)
@mock.patch("requests.get", mockRequestGet)
def testPollTimeout(self):
mockRequestGet.count = -20
client = AppurifyClient(access_token="authenticated", timeout_sec=0.2, poll_every=0.1, device_type_id="137")
with self.assertRaises(AppurifyClientError):
client.pollTestResult("test_test_run_id", 0.2)
@mock.patch("requests.post", mockRequestPost)
@mock.patch("requests.get", mockRequestGet)
def testPollTimeoutCode(self):
mockRequestGet.count = -20
client = AppurifyClient(api_key="test_key", api_secret="test_secret", test_type="ios_webrobot",
app_src=None,
test_src=None,
url="www.yahoo.com",
timeout_sec=0.2,
poll_every=0.1,
device_type_id="137")
result_code = client.main()
self.assertEqual(result_code, 3, "Main should execute and return error code")
@mock.patch("requests.post", mockRequestPost)
@mock.patch("requests.get", mockRequestGet)
def testGetExceptionExitCode(self):
mockRequestGet.count = -20
client = AppurifyClient(access_token="authenticated", timeout_sec=0.2, poll_every=0.1, device_type_id="137")
self.assertEqual(client.getExceptionExitCode([{"exception": "4007: Error installing the app: file does not contain AndroidManifest.xml\n (1)"}]), EXIT_CODE_APP_INSTALL_FAILED, "Should return correct exit code for matching exception")
self.assertEqual(client.getExceptionExitCode([{"exception": "-9999: no match anything"}]), EXIT_CODE_OTHER_EXCEPTION, "Should return correct exit code for no exception")
|
apache-2.0
|
mdesco/dipy
|
dipy/core/sphere.py
|
6
|
19979
|
from __future__ import division, print_function, absolute_import
__all__ = ['Sphere', 'HemiSphere', 'faces_from_sphere_vertices', 'unique_edges']
import numpy as np
import warnings
from ..utils.six.moves import xrange
from dipy.core.geometry import cart2sphere, sphere2cart, vector_norm
from dipy.core.onetime import auto_attr
from dipy.reconst.recspeed import remove_similar_vertices
def _all_specified(*args):
for a in args:
if a is None:
return False
return True
def _some_specified(*args):
for a in args:
if a is not None:
return True
return False
def faces_from_sphere_vertices(vertices):
"""
Triangulate a set of vertices on the sphere.
Parameters
----------
vertices : (M, 3) ndarray
XYZ coordinates of vertices on the sphere.
Returns
-------
faces : (N, 3) ndarray
Indices into vertices; forms triangular faces.
"""
from scipy.spatial import Delaunay
faces = Delaunay(vertices).convex_hull
if len(vertices) < 2**16:
return np.asarray(faces, np.uint16)
else:
return faces
def unique_edges(faces, return_mapping=False):
"""Extract all unique edges from given triangular faces.
Parameters
----------
faces : (N, 3) ndarray
Vertex indices forming triangular faces.
return_mapping : bool
If true, a mapping to the edges of each face is returned.
Returns
-------
edges : (N, 2) ndarray
Unique edges.
mapping : (N, 3)
For each face, [x, y, z], a mapping to it's edges [a, b, c].
::
y
/\
/ \
a/ \b
/ \
/ \
/__________\
x c z
"""
faces = np.asarray(faces)
edges = np.concatenate([faces[:, 0:2], faces[:, 1:3], faces[:, ::2]])
if return_mapping:
ue, inverse = unique_sets(edges, return_inverse=True)
return ue, inverse.reshape((3, -1)).T
else:
return unique_sets(edges)
def unique_sets(sets, return_inverse=False):
"""Remove duplicate sets.
Parameters
----------
sets : array (N, k)
N sets of size k.
return_inverse : bool
If True, also returns the indices of unique_sets that can be used
to reconstruct `sets` (the original ordering of each set may not be
preserved).
Return
------
unique_sets : array
Unique sets.
inverse : array (N,)
The indices to reconstruct `sets` from `unique_sets`.
"""
sets = np.sort(sets, 1)
order = np.lexsort(sets.T)
sets = sets[order]
flag = np.ones(len(sets), 'bool')
flag[1:] = (sets[1:] != sets[:-1]).any(-1)
uniqsets = sets[flag]
if return_inverse:
inverse = np.empty_like(order)
inverse[order] = np.arange(len(order))
index = flag.cumsum() - 1
return uniqsets, index[inverse]
else:
return uniqsets
class Sphere(object):
"""Points on the unit sphere.
The sphere can be constructed using one of three conventions::
Sphere(x, y, z)
Sphere(xyz=xyz)
Sphere(theta=theta, phi=phi)
Parameters
----------
x, y, z : 1-D array_like
Vertices as x-y-z coordinates.
theta, phi : 1-D array_like
Vertices as spherical coordinates. Theta and phi are the inclination
and azimuth angles respectively.
xyz : (N, 3) ndarray
Vertices as x-y-z coordinates.
faces : (N, 3) ndarray
Indices into vertices that form triangular faces. If unspecified,
the faces are computed using a Delaunay triangulation.
edges : (N, 2) ndarray
Edges between vertices. If unspecified, the edges are
derived from the faces.
"""
def __init__(self, x=None, y=None, z=None,
theta=None, phi=None,
xyz=None,
faces=None, edges=None):
all_specified = _all_specified(x, y, z) + _all_specified(xyz) + \
_all_specified(theta, phi)
one_complete = _some_specified(x, y, z) + _some_specified(xyz) + \
_some_specified(theta, phi)
if not (all_specified == 1 and one_complete == 1):
raise ValueError("Sphere must be constructed using either "
"(x,y,z), (theta, phi) or xyz.")
if edges is not None and faces is None:
raise ValueError("Either specify both faces and "
"edges, only faces, or neither.")
if edges is not None:
self.edges = np.asarray(edges)
if faces is not None:
self.faces = np.asarray(faces)
if theta is not None:
self.theta = np.array(theta, copy=False, ndmin=1)
self.phi = np.array(phi, copy=False, ndmin=1)
return
if xyz is not None:
xyz = np.asarray(xyz)
x, y, z = xyz.T
x, y, z = (np.asarray(t) for t in (x, y, z))
r, self.theta, self.phi = cart2sphere(x, y, z)
if not np.allclose(r, 1):
warnings.warn("Vertices are not on the unit sphere.")
@auto_attr
def vertices(self):
return np.column_stack(sphere2cart(1, self.theta, self.phi))
@property
def x(self):
return self.vertices[:, 0]
@property
def y(self):
return self.vertices[:, 1]
@property
def z(self):
return self.vertices[:, 2]
@auto_attr
def faces(self):
faces = faces_from_sphere_vertices(self.vertices)
return faces
@auto_attr
def edges(self):
return unique_edges(self.faces)
def subdivide(self, n=1):
"""Subdivides each face of the sphere into four new faces.
New vertices are created at a, b, and c. Then each face [x, y, z] is
divided into faces [x, a, c], [y, a, b], [z, b, c], and [a, b, c].
::
y
/\
/ \
a/____\b
/\ /\
/ \ / \
/____\/____\
x c z
Parameters
----------
n : int, optional
The number of subdivisions to preform.
Returns
-------
new_sphere : Sphere
The subdivided sphere.
"""
vertices = self.vertices
faces = self.faces
for i in xrange(n):
edges, mapping = unique_edges(faces, return_mapping=True)
new_vertices = vertices[edges].sum(1)
new_vertices /= vector_norm(new_vertices, keepdims=True)
mapping += len(vertices)
vertices = np.vstack([vertices, new_vertices])
x, y, z = faces.T
a, b, c = mapping.T
face1 = np.column_stack([x, a, c])
face2 = np.column_stack([y, b, a])
face3 = np.column_stack([z, c, b])
face4 = mapping
faces = np.concatenate([face1, face2, face3, face4])
if len(vertices) < 2**16:
faces = np.asarray(faces, dtype='uint16')
return Sphere(xyz=vertices, faces=faces)
def find_closest(self, xyz):
"""
Find the index of the vertex in the Sphere closest to the input vector
Parameters
----------
xyz : array-like, 3 elements
A unit vector
Return
------
idx : int
The index into the Sphere.vertices array that gives the closest
vertex (in angle).
"""
cos_sim = np.dot(self.vertices, xyz)
return np.argmax(cos_sim)
class HemiSphere(Sphere):
"""Points on the unit sphere.
A HemiSphere is similar to a Sphere but it takes antipodal symmetry into
account. Antipodal symmetry means that point v on a HemiSphere is the same
as the point -v. Duplicate points are discarded when constructing a
HemiSphere (including antipodal duplicates). `edges` and `faces` are
remapped to the remaining points as closely as possible.
The HemiSphere can be constructed using one of three conventions::
HemiSphere(x, y, z)
HemiSphere(xyz=xyz)
HemiSphere(theta=theta, phi=phi)
Parameters
----------
x, y, z : 1-D array_like
Vertices as x-y-z coordinates.
theta, phi : 1-D array_like
Vertices as spherical coordinates. Theta and phi are the inclination
and azimuth angles respectively.
xyz : (N, 3) ndarray
Vertices as x-y-z coordinates.
faces : (N, 3) ndarray
Indices into vertices that form triangular faces. If unspecified,
the faces are computed using a Delaunay triangulation.
edges : (N, 2) ndarray
Edges between vertices. If unspecified, the edges are
derived from the faces.
tol : float
Angle in degrees. Vertices that are less than tol degrees apart are
treated as duplicates.
See Also
--------
Sphere
"""
def __init__(self, x=None, y=None, z=None,
theta=None, phi=None,
xyz=None,
faces=None, edges=None, tol=1e-5):
"""Create a HemiSphere from points"""
sphere = Sphere(x=x, y=y, z=z, theta=theta, phi=phi, xyz=xyz)
uniq_vertices, mapping = remove_similar_vertices(sphere.vertices, tol,
return_mapping=True)
uniq_vertices *= 1 - 2*(uniq_vertices[:, -1:] < 0)
if faces is not None:
faces = np.asarray(faces)
faces = unique_sets(mapping[faces])
if edges is not None:
edges = np.asarray(edges)
edges = unique_sets(mapping[edges])
Sphere.__init__(self, xyz=uniq_vertices, edges=edges, faces=faces)
@classmethod
def from_sphere(klass, sphere, tol=1e-5):
"""Create instance from a Sphere"""
return klass(theta=sphere.theta, phi=sphere.phi,
edges=sphere.edges, faces=sphere.faces, tol=tol)
def mirror(self):
"""Create a full Sphere from a HemiSphere"""
n = len(self.vertices)
vertices = np.vstack([self.vertices, -self.vertices])
edges = np.vstack([self.edges, n + self.edges])
_switch_vertex(edges[:,0], edges[:,1], vertices)
faces = np.vstack([self.faces, n + self.faces])
_switch_vertex(faces[:,0], faces[:,1], vertices)
_switch_vertex(faces[:,0], faces[:,2], vertices)
return Sphere(xyz=vertices, edges=edges, faces=faces)
@auto_attr
def faces(self):
vertices = np.vstack([self.vertices, -self.vertices])
faces = faces_from_sphere_vertices(vertices)
return unique_sets(faces % len(self.vertices))
def subdivide(self, n=1):
"""Create a more subdivided HemiSphere
See Sphere.subdivide for full documentation.
"""
sphere = self.mirror()
sphere = sphere.subdivide(n)
return HemiSphere.from_sphere(sphere)
def find_closest(self, xyz):
"""
Find the index of the vertex in the Sphere closest to the input vector,
taking into account antipodal symmetry
Parameters
----------
xyz : array-like, 3 elements
A unit vector
Return
------
idx : int
The index into the Sphere.vertices array that gives the closest
vertex (in angle).
"""
cos_sim = abs(np.dot(self.vertices, xyz))
return np.argmax(cos_sim)
def _switch_vertex(index1, index2, vertices):
"""When we mirror an edge (a, b). We can either create (a, b) and (a', b')
OR (a, b') and (a', b). The angles of edges (a, b) and (a, b') are
supplementary, so we choose the two new edges such that their angles are
less than 90 degrees.
"""
n = len(vertices)
A = vertices[index1]
B = vertices[index2]
is_far = (A * B).sum(-1) < 0
index2[is_far] += n/2
index2 %= n
def _get_forces(charges):
r"""Given a set of charges on the surface of the sphere gets total force
those charges exert on each other.
The force exerted by one charge on another is given by Coulomb's law. For
this simulation we use charges of equal magnitude so this force can be
written as $\vec{r}/r^3$, up to a constant factor, where $\vec{r}$ is the
separation of the two charges and $r$ is the magnitude of $\vec{r}$. Forces
are additive so the total force on each of the charges is the sum of the
force exerted by each other charge in the system. Charges do not exert a
force on themselves. The electric potential can similarly be written as
$1/r$ and is also additive.
"""
all_charges = np.concatenate((charges, -charges))
all_charges = all_charges[:, None]
r = charges - all_charges
r_mag = np.sqrt((r*r).sum(-1))[:, :, None]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
force = r / r_mag**3
potential = 1. / r_mag
d = np.arange(len(charges))
force[d,d] = 0
force = force.sum(0)
force_r_comp = (charges*force).sum(-1)[:, None]
f_theta = force - force_r_comp*charges
potential[d,d] = 0
potential = 2*potential.sum()
return f_theta, potential
def disperse_charges(hemi, iters, const=.2):
"""Models electrostatic repulsion on the unit sphere
Places charges on a sphere and simulates the repulsive forces felt by each
one. Allows the charges to move for some number of iterations and returns
their final location as well as the total potential of the system at each
step.
Parameters
----------
hemi : HemiSphere
Points on a unit sphere.
iters : int
Number of iterations to run.
const : float
Using a smaller const could provide a more accurate result, but will
need more iterations to converge.
Returns
-------
hemi : HemiSphere
Distributed points on a unit sphere.
potential : ndarray
The electrostatic potential at each iteration. This can be useful to
check if the repulsion converged to a minimum.
Note:
-----
This function is meant to be used with diffusion imaging so antipodal
symmetry is assumed. Therefor each charge must not only be unique, but if
there is a charge at +x, there cannot be a charge at -x. These are treated
as the same location and because the distance between the two charges will
be zero, the result will be unstable.
"""
if not isinstance(hemi, HemiSphere):
raise ValueError("expecting HemiSphere")
charges = hemi.vertices
forces, v = _get_forces(charges)
force_mag = np.sqrt((forces*forces).sum())
const = const / force_mag.max()
potential = np.empty(iters)
v_min = v
for ii in xrange(iters):
new_charges = charges + forces * const
norms = np.sqrt((new_charges**2).sum(-1))
new_charges /= norms[:, None]
new_forces, v = _get_forces(new_charges)
if v <= v_min:
charges = new_charges
forces = new_forces
potential[ii] = v_min = v
else:
const /= 2.
potential[ii] = v_min
return HemiSphere(xyz=charges), potential
def interp_rbf(data, sphere_origin, sphere_target,
function='multiquadric', epsilon=None, smooth=0,
norm = "euclidean_norm"):
"""Interpolate data on the sphere, using radial basis functions.
Parameters
----------
data : (N,) ndarray
Function values on the unit sphere.
sphere_origin : Sphere
Positions of data values.
sphere_target : Sphere
M target positions for which to interpolate.
function : {'multiquadric', 'inverse', 'gaussian'}
Radial basis function.
epsilon : float
Radial basis function spread parameter.
smooth : float
values greater than zero increase the smoothness of the
approximation with 0 (the default) as pure interpolation.
norm : str
A string indicating the function that returns the
"distance" between two points.
'angle' - The angle between two vectors
'euclidean_norm' - The Euclidean distance
Returns
-------
v : (M,) ndarray
Interpolated values.
See Also
--------
scipy.interpolate.Rbf
"""
from scipy.interpolate import Rbf
def angle(x1, x2):
xx = np.arccos((x1 * x2).sum(axis=0))
xx[np.isnan(xx)] = 0
return xx
def euclidean_norm(x1, x2):
return np.sqrt(((x1 - x2)**2).sum(axis=0))
if norm is "angle":
norm = angle
elif norm is "euclidean_norm":
norm = euclidean_norm
# Workaround for bug in SciPy that doesn't allow
# specification of epsilon None
if epsilon is not None:
kwargs = {'function': function,
'epsilon': epsilon,
'smooth' : smooth,
'norm' : norm}
else:
kwargs = {'function': function,
'smooth': smooth,
'norm' : norm}
rbfi = Rbf(sphere_origin.x, sphere_origin.y, sphere_origin.z, data,
**kwargs)
return rbfi(sphere_target.x, sphere_target.y, sphere_target.z)
def euler_characteristic_check(sphere, chi=2):
r"""Checks the euler characteristic of a sphere
If $f$ = number of faces, $e$ = number_of_edges and $v$ = number of
vertices, the Euler formula says $f-e+v = 2$ for a mesh on a sphere. More
generally, whether $f -e + v == \chi$ where $\chi$ is the Euler
characteristic of the mesh.
- Open chain (track) has $\chi=1$
- Closed chain (loop) has $\chi=0$
- Disk has $\chi=1$
- Sphere has $\chi=2$
- HemiSphere has $\chi=1$
Parameters
----------
sphere : Sphere
A Sphere instance with vertices, edges and faces attributes.
chi : int, optional
The Euler characteristic of the mesh to be checked
Returns
-------
check : bool
True if the mesh has Euler characteristic $\chi$
Examples
--------
>>> euler_characteristic_check(unit_octahedron)
True
>>> hemisphere = HemiSphere.from_sphere(unit_icosahedron)
>>> euler_characteristic_check(hemisphere, chi=1)
True
"""
v = sphere.vertices.shape[0]
e = sphere.edges.shape[0]
f = sphere.faces.shape[0]
return (f - e + v) == chi
octahedron_vertices = np.array(
[[ 1.0 , 0.0, 0.0],
[-1.0, 0.0, 0.0],
[ 0.0, 1.0, 0.0],
[ 0.0, -1.0, 0.0],
[ 0.0, 0.0, 1.0],
[ 0.0, 0.0, -1.0],
])
octahedron_faces = np.array(
[[0, 4, 2],
[1, 5, 3],
[4, 2, 1],
[5, 3, 0],
[1, 4, 3],
[0, 5, 2],
[0, 4, 3],
[1, 5, 2],
], dtype='uint16')
t = (1 + np.sqrt(5)) / 2
icosahedron_vertices = np.array(
[[ t, 1, 0], # 0
[ -t, 1, 0], # 1
[ t, -1, 0], # 2
[ -t, -1, 0], # 3
[ 1, 0, t], # 4
[ 1, 0, -t], # 5
[ -1, 0, t], # 6
[ -1, 0, -t], # 7
[ 0, t, 1], # 8
[ 0, -t, 1], # 9
[ 0, t, -1], # 10
[ 0, -t, -1], # 11
])
icosahedron_vertices /= vector_norm(icosahedron_vertices, keepdims=True)
icosahedron_faces = np.array(
[[ 8, 4, 0],
[ 2, 5, 0],
[ 2, 5, 11],
[ 9, 2, 11],
[ 2, 4, 0],
[ 9, 2, 4],
[10, 8, 1],
[10, 8, 0],
[10, 5, 0],
[ 6, 3, 1],
[ 9, 6, 3],
[ 6, 8, 1],
[ 6, 8, 4],
[ 9, 6, 4],
[ 7, 10, 1],
[ 7, 10, 5],
[ 7, 3, 1],
[ 7, 3, 11],
[ 9, 3, 11],
[ 7, 5, 11],
], dtype='uint16')
unit_octahedron = Sphere(xyz=octahedron_vertices, faces=octahedron_faces)
unit_icosahedron = Sphere(xyz=icosahedron_vertices, faces=icosahedron_faces)
hemi_icosahedron = HemiSphere.from_sphere(unit_icosahedron)
|
bsd-3-clause
|
RanadeepPolavarapu/kuma
|
vendor/packages/nose/plugins/skip.py
|
80
|
2142
|
"""
This plugin installs a SKIP error class for the SkipTest exception.
When SkipTest is raised, the exception will be logged in the skipped
attribute of the result, 'S' or 'SKIP' (verbose) will be output, and
the exception will not be counted as an error or failure. This plugin
is enabled by default but may be disabled with the ``--no-skip`` option.
"""
from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
# on SkipTest:
# - unittest SkipTest is first preference, but it's only available
# for >= 2.7
# - unittest2 SkipTest is second preference for older pythons. This
# mirrors logic for choosing SkipTest exception in testtools
# - if none of the above, provide custom class
try:
from unittest.case import SkipTest
except ImportError:
try:
from unittest2.case import SkipTest
except ImportError:
class SkipTest(Exception):
"""Raise this exception to mark a test as skipped.
"""
pass
class Skip(ErrorClassPlugin):
"""
Plugin that installs a SKIP error class for the SkipTest
exception. When SkipTest is raised, the exception will be logged
in the skipped attribute of the result, 'S' or 'SKIP' (verbose)
will be output, and the exception will not be counted as an error
or failure.
"""
enabled = True
skipped = ErrorClass(SkipTest,
label='SKIP',
isfailure=False)
def options(self, parser, env):
"""
Add my options to command line.
"""
env_opt = 'NOSE_WITHOUT_SKIP'
parser.add_option('--no-skip', action='store_true',
dest='noSkip', default=env.get(env_opt, False),
help="Disable special handling of SkipTest "
"exceptions.")
def configure(self, options, conf):
"""
Configure plugin. Skip plugin is enabled by default.
"""
if not self.can_configure:
return
self.conf = conf
disable = getattr(options, 'noSkip', False)
if disable:
self.enabled = False
|
mpl-2.0
|
arizona-phonological-imaging-lab/Autotrace
|
matlab-version/fixImages.py
|
3
|
2423
|
import os, sys, subprocess
import Image
import pygtk
pygtk.require('2.0')
import gtk, gobject
import gtk.glade
class ImageFixer:
def __init__(self, filenames):
self.gladefile = "LinguaViewer.glade"
self.wTree = gtk.glade.XML(self.gladefile, "resize")
self.window = self.wTree.get_widget("resize")
self.window.set_size_request(400, 100)
self.window.connect("destroy", self.destroy_progress)
self.pbar = self.wTree.get_widget("progressbar1")
self.pbar.show()
self.val = 0.0
self.frac = 1.0/len(filenames)
self.pbar.set_fraction(self.val)
result = self.check(filenames)
if result == gtk.RESPONSE_OK:
task = self.fix(filenames)
gobject.idle_add(task.next)
else:
self.window.destroy()
def check(self, filenames):
#check whether we need to do correction
badcount = 0
for i in filenames:
im = Image.open(i)
if (im.size[0] != 720): #or (im.size[1] != 480):
badcount += 1
break
if badcount > 0:
dlg = gtk.MessageDialog(None, gtk.DIALOG_MODAL, gtk.MESSAGE_WARNING,
gtk.BUTTONS_OK_CANCEL,
"It appears that 1 or more images need to be resized.\nResizing the images will overwrite the originals. Continue?")
result = dlg.run()
dlg.destroy()
else:
result = gtk.RESPONSE_CANCEL
return result
def fix(self, files):
l = len(files)
c = 0
for j in files:
im = Image.open(j)
if (im.size[0] != 720) or (im.size[1] != 480):
cmd = ['convert', j, '-shave', '126x0', j]
p = subprocess.Popen(cmd)
p.wait()
cmd = ['convert', j, '-chop', '12x0', j]
p = subprocess.Popen(cmd)
p.wait()
cmd = ['convert', j, '-resize', '720x480!', j]
p = subprocess.Popen(cmd)
#p.wait()
self.val += self.frac
self.pbar.set_fraction(self.val)
c += 1
if c < l:
yield True
else:
yield False
def destroy_progress(self, event):
self.window.destroy()
|
mit
|
pacoqueen/bbinn
|
formularios/transferencias.py
|
1
|
25006
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (C) 2005-2008 Francisco José Rodríguez Bogado, #
# Diego Muñoz Escalante. #
# ([email protected], [email protected]) #
# #
# This file is part of GeotexInn. #
# #
# GeotexInn is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2 of the License, or #
# (at your option) any later version. #
# #
# GeotexInn is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with GeotexInn; if not, write to the Free Software #
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA #
###############################################################################
###################################################################
## transferencias.py -- Hacer, editar o borrar transferencias
###################################################################
## NOTAS:
## La clase base es Pagos, pero sólo tiene en cuenta aquellos
## que sean transferencia.
## ----------------------------------------------------------------
##
###################################################################
## Changelog:
## 22 de febrero de 2007 -> Inicio
##
###################################################################
import sys, os
from ventana import Ventana
import utils
import pygtk
pygtk.require('2.0')
import gtk, gtk.glade, time, mx, mx.DateTime
try:
import pclases
from seeker import VentanaGenerica
except ImportError:
sys.path.append(os.path.join('..', 'framework'))
import pclases
from seeker import VentanaGenerica
from utils import _float as float
class Transferencias(Ventana, VentanaGenerica):
def __init__(self, objeto = None, usuario = None):
"""
Constructor. objeto puede ser un objeto de pclases con el que
comenzar la ventana (en lugar del primero de la tabla, que es
el que se muestra por defecto).
"""
self.usuario = usuario
self.clase = pclases.Pago
self.dic_campos = {"importe": "e_importe",
"proveedorID": "cbe_proveedor",
"cuentaOrigenID": "cbe_origen",
"cuentaDestinoID": "cbe_destino",
"fecha": "e_fecha",
"conceptoLibre": "e_concepto",
}
Ventana.__init__(self, 'transferencias.glade', objeto)
connections = {'b_salir/clicked': self.salir,
'b_nuevo/clicked': self.nuevo,
'b_borrar/clicked': self.borrar,
'b_actualizar/clicked': self.actualizar_ventana,
'b_guardar/clicked': self.guardar,
'b_buscar/clicked': self.buscar,
'b_fecha/clicked': self.set_fecha,
'b_factura/clicked': self.set_factura,
'b_imprimir/clicked': self.imprimir,
'b_nuevo_destino/clicked': self.crear_nueva_cuenta_destino
}
self.add_connections(connections)
self.inicializar_ventana()
if self.objeto == None:
self.ir_a_primero()
else:
self.ir_a(objeto)
gtk.main()
def imprimir(self, boton):
"""
Imprime un fax con la información de la transferencia.
Solicita un firmante al usuario. El resto de los datos
se obtienen de la BD.
"""
sys.path.append(os.path.join("..", "informes"))
from geninformes import fax_transferencia
from informes import abrir_pdf
firmantes = {1: "Otro (texto libre)",
2: "D. Enrique Román Corzo",
3: "D. Enrique Figueroa Yáñez",
4: "D. Enrique Mozo del Río",
5: "D. Fernando Guijarro Lozano"} # TODO: HARCODED
claves = firmantes.keys()
claves.sort()
firmado = utils.dialogo_combo(titulo = "FIRMANTE",
texto = 'Seleccione un firmante o elija "otro" y pulse\n«Aceptar» para escribir uno distinto:',
ops = ([(k, firmantes[k]) for k in claves]),
padre = self.wids['ventana'])
if firmado == 1:
firmado = utils.dialogo_entrada(titulo = "FIRMANTE",
texto = "Introduzca el nombre que aparecerá en la firma:",
padre = self.wids['ventana'])
elif firmado != None:
firmado = firmantes[firmado]
if firmado != None:
try:
e = pclases.DatosDeLaEmpresa.select()[0]
t = self.objeto
o = t.cuentaOrigen
d = t.cuentaDestino
p = t.proveedor
empresa = o.banco
contacto = o.contacto
fax = o.fax
telefono = o.telefono
de = e.nombreContacto
asunto = "Transferencia"
fecha = utils.str_fecha(t.fecha)
beneficiario = p.nombre
banco = d.banco
#if p.es_extranjero():
# cuenta = "%s %s" % (d.iban, d.swif)
#else:
# cuenta = d.cuenta
cuenta = d.cuenta
porcuenta = e.nombre
ccc = o.ccc
concepto = t.concepto
importe = "%s €" % (utils.float2str(t.importe))
swift = d.swif
iban = d.iban
observaciones = d.observaciones
conceptoLibre = t.conceptoLibre
except AttributeError, msg:
utils.dialogo_info(titulo = "ERROR AL GENERAR FAX", texto = "No se encontraron algunos datos.\n\nVerifique la información y vuelva a intentarlo.", padre = self.wids['ventana'])
self.logger.error("transferencias.py::imprimir -> AttributeError: %s" % msg)
except IndexError, msg:
utils.dialogo_info(titulo = "ERROR AL RECUPERAR DATOS DE LA EMPRESA", texto = "No se encontraron los datos de la empresa.\n\nCompruebe que existe una empresa en la tabla «datos_de_la_empresa».\n\n\n(Contacte con el administrador en caso de duda.)", padre = self.wids['ventana'])
self.logger.error("transferencias.py::imprimir -> IndexError: %s" % msg)
else:
abrir_pdf(fax_transferencia(empresa,
contacto,
fax,
telefono,
de,
asunto,
fecha,
beneficiario,
banco,
cuenta,
porcuenta,
ccc,
concepto,
importe,
firmado,
swift,
iban,
observaciones,
conceptoLibre))
def ir_a_primero(self):
"""
Sobeescribe el método de seeker.
Va a la última transferencia de la BD.
"""
anterior = self.objeto
objeto = None
ts = pclases.Pago.select(orderBy = "-id")
for t in ts:
if t.es_transferencia():
objeto = t
break
if objeto != None:
if self.objeto != None:
self.objeto.notificador.desactivar()
self.objeto = objeto
self.objeto.notificador.activar(self.aviso_actualizacion)
self.actualizar_ventana(objeto_anterior = anterior)
self.activar_widgets(False) # Por defecto lo inhabilito, no sea que se confunda y lo machaque con una transferencia nueva.
def crear_nueva_cuenta_destino(self, boton):
"""
Crea una nueva cuenta destino a través de la ventana de cuentas destino
y la introduce en el ComboBoxEntry.
"""
if self.objeto != None:
import cuentas_destino
nueva_cuenta_destino = pclases.CuentaDestino(proveedor = self.objeto.proveedor,
nombre = "Nueva cuenta de %s" % (self.objeto.proveedor and self.objeto.proveedor.nombre or "?"))
utils.dialogo_info(titulo = "NUEVA CUENTA CREADA",
texto = """
A continuación complete la información de la nueva cuenta del proveedor
y cierre la ventana que aparecerá.
Después podrá seleccionarla en la ventana de transferencias.
""",
padre = self.wids['ventana'])
v = cuentas_destino.CuentasDestino(objeto = nueva_cuenta_destino, usuario = self.usuario)
self.actualizar_ventana()
def set_factura(self, boton):
"""
Busca una factura de compra de la BD y la
guarda en el objeto activo antes de recargar
la ventana.
"""
factura = self.buscar_factura() # Si lo que va a pagar es un LOGIC, que lo haga desde la ventana de vencimientos_pendientes.
if factura != None:
self.objeto.importe = factura.get_importe_primer_vencimiento_pendiente()
self.objeto.facturaCompra = factura
self.objeto.proveedor = factura.proveedor
self.actualizar_ventana()
def set_fecha(self, boton):
"""
Introduce la fecha seleccionada de un diálogo calendario en el entry.
"""
self.wids['e_fecha'].set_text(utils.str_fecha(utils.mostrar_calendario(fecha_defecto = self.objeto and self.objeto.fecha or None, padre = self.wids['ventana'])))
def es_diferente(self):
"""
Devuelve True si algún valor en ventana difiere de
los del objeto.
"""
if self.objeto == None:
igual = True
else:
igual = self.objeto != None
for colname in self.dic_campos:
col = self.clase._SO_columnDict[colname]
try:
valor_ventana = self.leer_valor(col, self.dic_campos[colname])
except (ValueError, mx.DateTime.RangeError, TypeError):
igual = False
valor_objeto = getattr(self.objeto, col.name)
if isinstance(col, pclases.SODateCol):
valor_objeto = utils.abs_mxfecha(valor_objeto)
igual = igual and (valor_ventana == valor_objeto)
if not igual:
break
return not igual
def inicializar_ventana(self):
"""
Inicializa los controles de la ventana, estableciendo sus
valores por defecto, deshabilitando los innecesarios,
rellenando los combos, formateando el TreeView -si lo hay-...
"""
# Inicialmente no se muestra NADA. Sólo se le deja al
# usuario la opción de buscar o crear nuevo.
self.activar_widgets(False)
self.wids['b_actualizar'].set_sensitive(False)
self.wids['b_guardar'].set_sensitive(False)
self.wids['b_nuevo'].set_sensitive(True)
self.wids['b_buscar'].set_sensitive(True)
# Inicialización del resto de widgets:
utils.rellenar_lista(self.wids['cbe_proveedor'], [(p.id, p.nombre) for p in pclases.Proveedor.select(orderBy = "nombre")])
utils.rellenar_lista(self.wids['cbe_origen'], [(p.id, p.nombre) for p in pclases.CuentaOrigen.select(orderBy = "nombre")])
utils.rellenar_lista(self.wids['cbe_destino'], [(p.id, p.nombre + " " + p.cuenta) for p in pclases.CuentaDestino.select(orderBy = "nombre")])
def activar_widgets(self, s):
"""
Activa o desactiva (sensitive=True/False) todos
los widgets de la ventana que dependan del
objeto mostrado.
Entrada: s debe ser True o False. En todo caso
se evaluará como boolean.
"""
ws = tuple(["b_factura", "b_fecha", "b_nuevo_destino", "b_borrar", "e_factura"] + [self.dic_campos[k] for k in self.dic_campos.keys()])
for w in ws:
try:
self.wids[w].set_sensitive(s)
except:
print w
def refinar_resultados_busqueda(self, resultados):
"""
Muestra en una ventana de resultados todos los
registros de "resultados".
Devuelve el id (primera columna de la ventana
de resultados) de la fila seleccionada o None
si se canceló.
"""
filas_res = []
for r in resultados:
if r.es_transferencia():
filas_res.append((r.id,
utils.str_fecha(r.fecha),
r.proveedor and r.proveedor.nombre or "-",
r.cuentaOrigen and r.cuentaOrigen.nombre or "-",
r.cuentaDestino and r.cuentaDestino.nombre or "-",
utils.float2str(r.importe)))
idcuenta = utils.dialogo_resultado(filas_res,
titulo = 'SELECCIONE TRANSFERENCIA',
cabeceras = ('ID', 'Fecha', 'Proveedor', 'Cuenta', 'Destino', "Importe"),
padre = self.wids['ventana'])
if idcuenta < 0:
return None
else:
return idcuenta
def rellenar_widgets(self):
"""
Introduce la información del cuenta actual
en los widgets.
No se chequea que sea != None, así que
hay que tener cuidado de no llamar a
esta función en ese caso.
"""
if self.objeto.facturaCompra and self.objeto.facturaCompra.proveedor != self.objeto.proveedor: # Cuando hay que recurrir a estas cosas, es que el diseño no es bueno.
self.objeto.facturaCompra = None
if self.objeto.proveedorID != None: # Meto en las cuentas sólo las del proveedor de la transferencia.
utils.rellenar_lista(self.wids['cbe_destino'],
[(p.id, p.nombre + " " + p.cuenta) for p in pclases.CuentaDestino.select(pclases.CuentaDestino.q.proveedorID == self.objeto.proveedor.id, orderBy = "nombre")])
else:
utils.rellenar_lista(self.wids['cbe_destino'], [(p.id, p.nombre + " " + p.cuenta) for p in pclases.CuentaDestino.select(orderBy = "nombre")])
for nombre_col in self.dic_campos:
self.escribir_valor(self.objeto._SO_columnDict[nombre_col], getattr(self.objeto, nombre_col), self.dic_campos[nombre_col])
self.wids['e_factura'].set_text(self.objeto.concepto)
self.objeto.make_swap()
def buscar_factura(self):
"""
Ventana de búsqueda de facturas de compra
"""
res = None
a_buscar = utils.dialogo_entrada(titulo = "BUSCAR FACTURA",
texto = "Introduzca número de factura a buscar:",
padre = self.wids['ventana'])
if a_buscar != None:
facturas = pclases.FacturaCompra.select(pclases.FacturaCompra.q.numfactura.contains(a_buscar))
if facturas.count() >= 1:
id = utils.dialogo_resultado(filas = [(f.id, f.numfactura, f.proveedor and f.proveedor.nombre or "") for f in facturas],
titulo = "SELECCIONE FACTURA",
padre = self.wids['ventana'],
cabeceras = ("ID", "Número de factura", "Proveedor"))
if id > 0:
res = pclases.FacturaCompra.get(id)
else:
utils.dialogo_info(titulo = "SIN RESULTADOS",
texto = "La búsqueda del texto %s no produjo resultados." % (a_buscar),
padre = self.wids['ventana'])
return res
def nuevo(self, widget):
"""
Función callback del botón b_nuevo.
Pide los datos básicos para crear un nuevo objeto.
Una vez insertado en la BD hay que hacerlo activo
en la ventana para que puedan ser editados el resto
de campos que no se hayan pedido aquí.
"""
anterior = self.objeto
if anterior != None:
anterior.notificador.desactivar()
factura = self.buscar_factura() # Si lo que va a pagar es un LOGIC, que lo haga desde la ventana de vencimientos_pendientes.
if factura != None:
importe = factura.get_importe_primer_vencimiento_pendiente() # Devuelve 0 si no quedan.
try:
nuevo = pclases.Pago(cuentaOrigen = pclases.CuentaOrigen.select(orderBy = "-id")[0],
fecha = mx.DateTime.localtime(),
facturaCompra = factura,
proveedor = factura.proveedor,
importe = importe)
except IndexError:
utils.dialogo_info(titulo = "ERROR CREANDO TRANSFERENCIA",
texto = "Se produjo un error al crear una nueva transferencia.\nProbablemente no existan cuentas en la aplicación desde donde realizar transferencias.",
padre = self.wids['ventana'])
else:
utils.dialogo_info('NUEVA TRANSFERENCIA CREADA',
'Se ha creado una transferencia nueva.\nA continuación complete la información de la misma y guarde los cambios.',
padre = self.wids['ventana'])
self.objeto = nuevo
self.objeto.notificador.activar(self.aviso_actualizacion)
self.activar_widgets(True)
self.actualizar_ventana(objeto_anterior = anterior)
def buscar(self, widget):
"""
Muestra una ventana de búsqueda y a continuación los
resultados. El objeto seleccionado se hará activo
en la ventana a no ser que se pulse en Cancelar en
la ventana de resultados.
"""
transferencia = self.objeto
a_buscar = utils.dialogo_entrada(titulo = "BUSCAR TRANSFERENCIA",
texto = "Introduzca identificador, importe o pulse «Aceptar» para verlas todas:",
padre = self.wids['ventana'])
if a_buscar != None:
try:
ida_buscar = int(a_buscar)
except ValueError:
ida_buscar = -1
try:
a_buscar = float(a_buscar)
except ValueError:
if ida_buscar != -1:
criterio = pclases.Pago.q.id == ida_buscar
else:
criterio = None
else:
criterio = pclases.OR(pclases.Pago.q.importe == a_buscar,
pclases.Pago.q.id == ida_buscar)
resultados = pclases.Pago.select(criterio)
resultados = [r for r in resultados if r.es_transferencia()]
if len(resultados) > 1:
## Refinar los resultados
idtransferencia = self.refinar_resultados_busqueda(resultados)
if idtransferencia == None:
return
resultados = [pclases.Pago.get(idtransferencia)]
elif len(resultados) < 1:
## Sin resultados de búsqueda
utils.dialogo_info('SIN RESULTADOS', 'La búsqueda no produjo resultados.\nPruebe a cambiar el texto buscado o déjelo en blanco para ver una lista completa.\n(Atención: Ver la lista completa puede resultar lento si el número de elementos es muy alto)',
padre = self.wids['ventana'])
return
## Un único resultado
# Primero anulo la función de actualización
if transferencia != None:
transferencia.notificador.desactivar()
# Pongo el objeto como actual
try:
transferencia = resultados[0]
except IndexError:
utils.dialogo_info(titulo = "ERROR",
texto = "Se produjo un error al recuperar la información.\nCierre y vuelva a abrir la ventana antes de volver a intentarlo.",
padre = self.wids['texto'])
return
# Y activo la función de notificación:
transferencia.notificador.activar(self.aviso_actualizacion)
self.activar_widgets(True)
self.objeto = transferencia
self.actualizar_ventana()
def guardar(self, widget):
"""
Guarda el contenido de los entry y demás widgets de entrada
de datos en el objeto y lo sincroniza con la BD.
"""
# Desactivo el notificador momentáneamente
self.objeto.notificador.activar(lambda: None)
# Actualizo los datos del objeto
for colname in self.dic_campos:
col = self.clase._SO_columnDict[colname]
try:
valor_ventana = self.leer_valor(col, self.dic_campos[colname])
setattr(self.objeto, colname, valor_ventana)
except (ValueError, mx.DateTime.RangeError, TypeError):
pass # TODO: Avisar al usuario o algo. El problema es que no hay una forma "limpia" de obtener el valor que ha fallado.
# Fuerzo la actualización de la BD y no espero a que SQLObject lo haga por mí:
self.objeto.syncUpdate()
self.objeto.sync()
# Vuelvo a activar el notificador
self.objeto.notificador.activar(self.aviso_actualizacion)
self.actualizar_ventana()
self.wids['b_guardar'].set_sensitive(False)
def borrar(self, widget):
"""
Elimina el pago de la tabla pero NO
intenta eliminar ninguna de sus relaciones,
de forma que si se incumple alguna
restricción de la BD, cancelará la eliminación
y avisará al usuario.
"""
transferencia = self.objeto
if not utils.dialogo('¿Eliminar la transferencia?', 'BORRAR', padre = self.wids['ventana']):
return
else:
transferencia.notificador.desactivar()
try:
transferencia.destroySelf()
except Exception, e:
self.logger.error("transferencias::borrar -> Pago ID %d no se pudo eliminar. Excepción: %s." % (transferencia.id, e))
utils.dialogo_info(titulo = "TRANSFERENCIA NO BORRADA",
texto = "La transferencia no se pudo eliminar.\n\nSe generó un informe de error en el «log» de la aplicación.",
padre = self.wids['ventana'])
self.actualizar_ventana()
return
self.objeto = None
self.ir_a_primero()
if __name__ == "__main__":
#p = Transferencias()
p = Transferencias(usuario = pclases.Usuario.selectBy(usuario = "admin")[0])
|
gpl-2.0
|
dchouzer/GamifyLyfe
|
mysite/settings.py
|
2
|
4217
|
"""
Django settings for mysite project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
PROJ_ROOT = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)),
os.path.pardir)
)
PROJ_NAME = os.path.basename(os.path.dirname(os.path.realpath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'nfqig4u)lwq)p2_jbj!%t$-5&u0u7r7#q2o+v+^8_)6)7qg@h%'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
SESSION_SERIALIZER='django.contrib.sessions.serializers.PickleSerializer'
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = PROJ_NAME + '.urls'
WSGI_APPLICATION = PROJ_NAME + '.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': PROJ_NAME, # Or path to database file if using sqlite3.
'USER': 'vagrant', # Not used with sqlite3.
'PASSWORD': 'dbpasswd', # Not used with sqlite3.
'HOST': 'localhost', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/New_York'
USE_I18N = True
USE_L10N = True
USE_TZ = True
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJ_ROOT, 'templates'),
)
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJ_ROOT, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJ_ROOT, 'static')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(
os.path.dirname(__file__),
'static',
),
)
from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.request',
)
# Load any default setting values that might be modified below:
# from django.conf.global_settings import ...
execfile(os.path.join(PROJ_ROOT, PROJ_NAME, 'settings_social_auth.py'))
execfile(os.path.join(PROJ_ROOT, PROJ_NAME, 'settings_core.py'))
|
apache-2.0
|
gusDuarte/software-center-5.2
|
utils/piston-helpers/piston_generic_helper.py
|
4
|
8562
|
#!/usr/bin/python
# Copyright (C) 2011 Canonical
#
# Authors:
# Michael Vogt
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import httplib2
import argparse
import logging
import os
import json
import pickle
import sys
from gi.repository import GObject
# useful for debugging
if "SOFTWARE_CENTER_DEBUG_HTTP" in os.environ:
httplib2.debuglevel = 1
import piston_mini_client.auth
import piston_mini_client.failhandlers
from piston_mini_client.failhandlers import APIError
try:
import softwarecenter
except ImportError:
if os.path.exists("../softwarecenter"):
sys.path.insert(0, "../")
else:
sys.path.insert(0, "/usr/share/software-center")
import softwarecenter.paths
from softwarecenter.paths import SOFTWARE_CENTER_CACHE_DIR
from softwarecenter.backend.login_sso import get_sso_backend
from softwarecenter.enums import (SOFTWARE_CENTER_NAME_KEYRING,
SOFTWARE_CENTER_SSO_DESCRIPTION,
)
from softwarecenter.utils import clear_token_from_ubuntu_sso_sync
# the piston import
from softwarecenter.backend.piston.ubuntusso_pristine import UbuntuSsoAPI
from softwarecenter.backend.piston.rnrclient import RatingsAndReviewsAPI
from softwarecenter.backend.piston.scaclient import SoftwareCenterAgentAPI
from softwarecenter.backend.piston.sreclient_pristine import (
SoftwareCenterRecommenderAPI)
# patch default_service_root to the one we use
from softwarecenter.enums import UBUNTU_SSO_SERVICE
# *Don't* append /api/1.0, as it's already included in UBUNTU_SSO_SERVICE
UbuntuSsoAPI.default_service_root = UBUNTU_SSO_SERVICE
from softwarecenter.enums import RECOMMENDER_HOST
SoftwareCenterRecommenderAPI.default_service_root = \
RECOMMENDER_HOST + "/api/1.0"
RatingsAndReviewsAPI # pyflakes
UbuntuSsoAPI # pyflakes
SoftwareCenterAgentAPI # pyflakes
SoftwareCenterRecommenderAPI # pyflakes
from gettext import gettext as _
# helper that is only used to verify that the token is ok
# and trigger cleanup if not
class SSOLoginHelper(object):
def __init__(self, xid=0):
self.oauth = None
self.xid = xid
self.loop = GObject.MainLoop(GObject.main_context_default())
def _login_successful(self, sso_backend, oauth_result):
LOG.debug("_login_successful")
self.oauth = oauth_result
# FIXME: actually verify the token against ubuntu SSO
self.loop.quit()
def verify_token_sync(self, token):
""" Verify that the token is valid
Note that this may raise httplib2 exceptions if the server
is not reachable
"""
LOG.debug("verify_token")
auth = piston_mini_client.auth.OAuthAuthorizer(
token["token"], token["token_secret"],
token["consumer_key"], token["consumer_secret"])
api = UbuntuSsoAPI(auth=auth)
try:
res = api.whoami()
except piston_mini_client.failhandlers.APIError as e:
LOG.exception("api.whoami failed with APIError: '%s'" % e)
return False
return len(res) > 0
def clear_token(self):
clear_token_from_ubuntu_sso_sync(SOFTWARE_CENTER_NAME_KEYRING)
def get_oauth_token_sync(self):
self.oauth = None
sso = get_sso_backend(
self.xid,
SOFTWARE_CENTER_NAME_KEYRING,
_(SOFTWARE_CENTER_SSO_DESCRIPTION))
sso.connect("login-successful", self._login_successful)
sso.connect("login-failed", lambda s: self.loop.quit())
sso.connect("login-canceled", lambda s: self.loop.quit())
sso.login_or_register()
self.loop.run()
return self.oauth
def get_oauth_token_and_verify_sync(self):
token = self.get_oauth_token_sync()
# check if the token is valid and reset it if it is not
if token:
# verify token will return false if there is a API error,
# but there maybe httplib2 errors if there is no network,
# so ignore them
try:
if not self.verify_token_sync(token):
self.clear_token()
# re-trigger login once
token = self.get_oauth_token_sync()
except Exception as e:
LOG.warn(
"token could not be verified (network problem?): %s" % e)
return token
LOG = logging.getLogger(__name__)
if __name__ == "__main__":
logging.basicConfig()
# command line parser
parser = argparse.ArgumentParser(
description="Backend helper for piston-mini-client based APIs")
parser.add_argument("--debug", action="store_true", default=False,
help="enable debug output")
parser.add_argument("--datadir", default="/usr/share/software-center",
help="setup alternative datadir")
parser.add_argument("--ignore-cache", action="store_true", default=False,
help="force ignore cache")
parser.add_argument("--needs-auth", default=False, action="store_true",
help="need oauth credentials")
parser.add_argument("--output", default="pickle",
help="output result as [pickle|json|text]")
parser.add_argument("--parent-xid", default=0,
help="xid of the parent window")
parser.add_argument('klass', help='class to use')
parser.add_argument('function', help='function to call')
parser.add_argument('kwargs', nargs="?",
help='kwargs for the function call as json')
args = parser.parse_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
LOG.setLevel(logging.DEBUG)
if args.ignore_cache:
cachedir = None
else:
cachedir = os.path.join(SOFTWARE_CENTER_CACHE_DIR, "piston-helper")
# check what we need to call
klass = globals()[args.klass]
func = args.function
kwargs = json.loads(args.kwargs or '{}')
softwarecenter.paths.datadir = args.datadir
if args.needs_auth:
helper = SSOLoginHelper(args.parent_xid)
token = helper.get_oauth_token_and_verify_sync()
# if we don't have a token, error here
if not token:
# it may happen that the parent is closed already so the pipe
# is gone, that is ok as we exit anyway
try:
sys.stderr.write("ERROR: can not obtain a oauth token\n")
except IOError:
pass
sys.exit(1)
auth = piston_mini_client.auth.OAuthAuthorizer(
token["token"], token["token_secret"],
token["consumer_key"], token["consumer_secret"])
api = klass(cachedir=cachedir, auth=auth)
else:
api = klass(cachedir=cachedir)
piston_reply = None
# handle the args
f = getattr(api, func)
try:
piston_reply = f(**kwargs)
except httplib2.ServerNotFoundError as e:
LOG.warn(e)
sys.exit(1)
except APIError as e:
LOG.warn(e)
sys.exit(1)
except:
LOG.exception("urclient_apps")
sys.exit(1)
# print to stdout where its consumed by the parent
if piston_reply is None:
LOG.warn("no data")
sys.exit(0)
if args.debug:
for itm in piston_reply:
s = "** itm: %s\n" % itm
for var in vars(itm):
s += "%s: '%s'\n" % (var, getattr(itm, var))
LOG.debug(s)
# check what format to use
if args.output == "pickle":
res = pickle.dumps(piston_reply)
elif args.output == "json":
res = json.dumps(piston_reply)
elif args.output == "text":
res = piston_reply
# and output it
try:
print res
except IOError:
# this can happen if the parent gets killed, no need to trigger
# apport for this
pass
|
lgpl-3.0
|
NexusIS/tempest
|
tempest/common/credentials.py
|
25
|
3882
|
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from tempest.common import accounts
from tempest.common import cred_provider
from tempest.common import isolated_creds
from tempest import config
from tempest import exceptions
CONF = config.CONF
# Return the right implementation of CredentialProvider based on config
# Dropping interface and password, as they are never used anyways
# TODO(andreaf) Drop them from the CredentialsProvider interface completely
def get_isolated_credentials(name, network_resources=None,
force_tenant_isolation=False,
identity_version=None):
# If a test requires a new account to work, it can have it via forcing
# tenant isolation. A new account will be produced only for that test.
# In case admin credentials are not available for the account creation,
# the test should be skipped else it would fail.
if CONF.auth.allow_tenant_isolation or force_tenant_isolation:
return isolated_creds.IsolatedCreds(
name=name,
network_resources=network_resources,
identity_version=identity_version)
else:
if (CONF.auth.test_accounts_file and
os.path.isfile(CONF.auth.test_accounts_file)):
# Most params are not relevant for pre-created accounts
return accounts.Accounts(name=name,
identity_version=identity_version)
else:
return accounts.NotLockingAccounts(
name=name, identity_version=identity_version)
# We want a helper function here to check and see if admin credentials
# are available so we can do a single call from skip_checks if admin
# creds area vailable.
def is_admin_available():
is_admin = True
# If tenant isolation is enabled admin will be available
if CONF.auth.allow_tenant_isolation:
return is_admin
# Check whether test accounts file has the admin specified or not
elif (CONF.auth.test_accounts_file and
os.path.isfile(CONF.auth.test_accounts_file)):
check_accounts = accounts.Accounts(name='check_admin')
if not check_accounts.admin_available():
is_admin = False
else:
try:
cred_provider.get_configured_credentials('identity_admin',
fill_in=False)
except exceptions.InvalidConfiguration:
is_admin = False
return is_admin
# We want a helper function here to check and see if alt credentials
# are available so we can do a single call from skip_checks if alt
# creds area vailable.
def is_alt_available():
# If tenant isolation is enabled admin will be available
if CONF.auth.allow_tenant_isolation:
return True
# Check whether test accounts file has the admin specified or not
if (CONF.auth.test_accounts_file and
os.path.isfile(CONF.auth.test_accounts_file)):
check_accounts = accounts.Accounts(name='check_alt')
else:
check_accounts = accounts.NotLockingAccounts(name='check_alt')
try:
if not check_accounts.is_multi_user():
return False
else:
return True
except exceptions.InvalidConfiguration:
return False
|
apache-2.0
|
nagyistoce/edx-platform
|
cms/djangoapps/contentstore/views/component.py
|
57
|
17109
|
from __future__ import absolute_import
import json
import logging
from django.http import HttpResponseBadRequest, Http404
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_GET
from django.core.exceptions import PermissionDenied
from django.conf import settings
from opaque_keys import InvalidKeyError
from xmodule.modulestore.exceptions import ItemNotFoundError
from edxmako.shortcuts import render_to_response
from xmodule.modulestore.django import modulestore
from xblock.core import XBlock
from xblock.django.request import webob_to_django_response, django_to_webob_request
from xblock.exceptions import NoSuchHandlerError
from xblock.fields import Scope
from xblock.plugin import PluginMissingError
from xblock.runtime import Mixologist
from contentstore.utils import get_lms_link_for_item
from contentstore.views.helpers import get_parent_xblock, is_unit, xblock_type_display_name
from contentstore.views.item import create_xblock_info, add_container_page_publishing_info
from opaque_keys.edx.keys import UsageKey
from student.auth import has_course_author_access
from django.utils.translation import ugettext as _
from models.settings.course_grading import CourseGradingModel
__all__ = ['OPEN_ENDED_COMPONENT_TYPES',
'ADVANCED_COMPONENT_POLICY_KEY',
'container_handler',
'component_handler'
]
log = logging.getLogger(__name__)
# NOTE: it is assumed that this list is disjoint from ADVANCED_COMPONENT_TYPES
COMPONENT_TYPES = ['discussion', 'html', 'problem', 'video']
# Constants for determining if these components should be enabled for this course
SPLIT_TEST_COMPONENT_TYPE = 'split_test'
OPEN_ENDED_COMPONENT_TYPES = ["combinedopenended", "peergrading"]
NOTE_COMPONENT_TYPES = ['notes']
if settings.FEATURES.get('ALLOW_ALL_ADVANCED_COMPONENTS'):
ADVANCED_COMPONENT_TYPES = sorted(set(name for name, class_ in XBlock.load_classes()) - set(COMPONENT_TYPES))
else:
ADVANCED_COMPONENT_TYPES = settings.ADVANCED_COMPONENT_TYPES
ADVANCED_COMPONENT_CATEGORY = 'advanced'
ADVANCED_COMPONENT_POLICY_KEY = 'advanced_modules'
ADVANCED_PROBLEM_TYPES = settings.ADVANCED_PROBLEM_TYPES
CONTAINER_TEMPLATES = [
"basic-modal", "modal-button", "edit-xblock-modal",
"editor-mode-button", "upload-dialog",
"add-xblock-component", "add-xblock-component-button", "add-xblock-component-menu",
"add-xblock-component-menu-problem", "xblock-string-field-editor", "publish-xblock", "publish-history",
"unit-outline", "container-message", "license-selector",
]
def _advanced_component_types():
"""
Return advanced component types which can be created.
"""
return [c_type for c_type in ADVANCED_COMPONENT_TYPES if c_type not in settings.DEPRECATED_ADVANCED_COMPONENT_TYPES]
@require_GET
@login_required
def subsection_handler(request, usage_key_string):
"""
The restful handler for subsection-specific requests.
GET
html: return html page for editing a subsection
json: not currently supported
"""
if 'text/html' in request.META.get('HTTP_ACCEPT', 'text/html'):
usage_key = UsageKey.from_string(usage_key_string)
try:
course, item, lms_link, preview_link = _get_item_in_course(request, usage_key)
except ItemNotFoundError:
return HttpResponseBadRequest()
# make sure that location references a 'sequential', otherwise return
# BadRequest
if item.location.category != 'sequential':
return HttpResponseBadRequest()
parent = get_parent_xblock(item)
# remove all metadata from the generic dictionary that is presented in a
# more normalized UI. We only want to display the XBlocks fields, not
# the fields from any mixins that have been added
fields = getattr(item, 'unmixed_class', item.__class__).fields
policy_metadata = dict(
(field.name, field.read_from(item))
for field
in fields.values()
if field.name not in ['display_name', 'start', 'due', 'format'] and field.scope == Scope.settings
)
can_view_live = False
subsection_units = item.get_children()
can_view_live = any([modulestore().has_published_version(unit) for unit in subsection_units])
return render_to_response(
'edit_subsection.html',
{
'subsection': item,
'context_course': course,
'new_unit_category': 'vertical',
'lms_link': lms_link,
'preview_link': preview_link,
'course_graders': json.dumps(CourseGradingModel.fetch(item.location.course_key).graders),
'parent_item': parent,
'locator': item.location,
'policy_metadata': policy_metadata,
'subsection_units': subsection_units,
'can_view_live': can_view_live
}
)
else:
return HttpResponseBadRequest("Only supports html requests")
def _load_mixed_class(category):
"""
Load an XBlock by category name, and apply all defined mixins
"""
component_class = XBlock.load_class(category, select=settings.XBLOCK_SELECT_FUNCTION)
mixologist = Mixologist(settings.XBLOCK_MIXINS)
return mixologist.mix(component_class)
# pylint: disable=unused-argument
@require_GET
@login_required
def container_handler(request, usage_key_string):
"""
The restful handler for container xblock requests.
GET
html: returns the HTML page for editing a container
json: not currently supported
"""
if 'text/html' in request.META.get('HTTP_ACCEPT', 'text/html'):
try:
usage_key = UsageKey.from_string(usage_key_string)
except InvalidKeyError: # Raise Http404 on invalid 'usage_key_string'
raise Http404
with modulestore().bulk_operations(usage_key.course_key):
try:
course, xblock, lms_link, preview_lms_link = _get_item_in_course(request, usage_key)
except ItemNotFoundError:
return HttpResponseBadRequest()
component_templates = get_component_templates(course)
ancestor_xblocks = []
parent = get_parent_xblock(xblock)
action = request.REQUEST.get('action', 'view')
is_unit_page = is_unit(xblock)
unit = xblock if is_unit_page else None
while parent and parent.category != 'course':
if unit is None and is_unit(parent):
unit = parent
ancestor_xblocks.append(parent)
parent = get_parent_xblock(parent)
ancestor_xblocks.reverse()
assert unit is not None, "Could not determine unit page"
subsection = get_parent_xblock(unit)
assert subsection is not None, "Could not determine parent subsection from unit " + unicode(unit.location)
section = get_parent_xblock(subsection)
assert section is not None, "Could not determine ancestor section from unit " + unicode(unit.location)
# Fetch the XBlock info for use by the container page. Note that it includes information
# about the block's ancestors and siblings for use by the Unit Outline.
xblock_info = create_xblock_info(xblock, include_ancestor_info=is_unit_page)
if is_unit_page:
add_container_page_publishing_info(xblock, xblock_info)
# need to figure out where this item is in the list of children as the
# preview will need this
index = 1
for child in subsection.get_children():
if child.location == unit.location:
break
index += 1
return render_to_response('container.html', {
'context_course': course, # Needed only for display of menus at top of page.
'action': action,
'xblock': xblock,
'xblock_locator': xblock.location,
'unit': unit,
'is_unit_page': is_unit_page,
'subsection': subsection,
'section': section,
'new_unit_category': 'vertical',
'ancestor_xblocks': ancestor_xblocks,
'component_templates': json.dumps(component_templates),
'xblock_info': xblock_info,
'draft_preview_link': preview_lms_link,
'published_preview_link': lms_link,
'templates': CONTAINER_TEMPLATES
})
else:
return HttpResponseBadRequest("Only supports HTML requests")
def get_component_templates(courselike, library=False):
"""
Returns the applicable component templates that can be used by the specified course or library.
"""
def create_template_dict(name, cat, boilerplate_name=None, tab="common", hinted=False):
"""
Creates a component template dict.
Parameters
display_name: the user-visible name of the component
category: the type of component (problem, html, etc.)
boilerplate_name: name of boilerplate for filling in default values. May be None.
hinted: True if hinted problem else False
tab: common(default)/advanced, which tab it goes in
"""
return {
"display_name": name,
"category": cat,
"boilerplate_name": boilerplate_name,
"hinted": hinted,
"tab": tab
}
component_display_names = {
'discussion': _("Discussion"),
'html': _("HTML"),
'problem': _("Problem"),
'video': _("Video")
}
component_templates = []
categories = set()
# The component_templates array is in the order of "advanced" (if present), followed
# by the components in the order listed in COMPONENT_TYPES.
component_types = COMPONENT_TYPES[:]
# Libraries do not support discussions
if library:
component_types = [component for component in component_types if component != 'discussion']
for category in component_types:
templates_for_category = []
component_class = _load_mixed_class(category)
# add the default template with localized display name
# TODO: Once mixins are defined per-application, rather than per-runtime,
# this should use a cms mixed-in class. (cpennington)
display_name = xblock_type_display_name(category, _('Blank')) # this is the Blank Advanced problem
templates_for_category.append(create_template_dict(display_name, category, None, 'advanced'))
categories.add(category)
# add boilerplates
if hasattr(component_class, 'templates'):
for template in component_class.templates():
filter_templates = getattr(component_class, 'filter_templates', None)
if not filter_templates or filter_templates(template, courselike):
# Tab can be 'common' 'advanced'
# Default setting is common/advanced depending on the presence of markdown
tab = 'common'
if template['metadata'].get('markdown') is None:
tab = 'advanced'
hinted = template.get('hinted', False)
templates_for_category.append(
create_template_dict(
_(template['metadata'].get('display_name')), # pylint: disable=translation-of-non-string
category,
template.get('template_id'),
tab,
hinted,
)
)
# Add any advanced problem types
if category == 'problem':
for advanced_problem_type in ADVANCED_PROBLEM_TYPES:
component = advanced_problem_type['component']
boilerplate_name = advanced_problem_type['boilerplate_name']
try:
component_display_name = xblock_type_display_name(component)
except PluginMissingError:
log.warning('Unable to load xblock type %s to read display_name', component, exc_info=True)
else:
templates_for_category.append(
create_template_dict(component_display_name, component, boilerplate_name, 'advanced')
)
categories.add(component)
component_templates.append({
"type": category,
"templates": templates_for_category,
"display_name": component_display_names[category]
})
# Libraries do not support advanced components at this time.
if library:
return component_templates
# Check if there are any advanced modules specified in the course policy.
# These modules should be specified as a list of strings, where the strings
# are the names of the modules in ADVANCED_COMPONENT_TYPES that should be
# enabled for the course.
course_advanced_keys = courselike.advanced_modules
advanced_component_templates = {"type": "advanced", "templates": [], "display_name": _("Advanced")}
advanced_component_types = _advanced_component_types()
# Set component types according to course policy file
if isinstance(course_advanced_keys, list):
for category in course_advanced_keys:
if category in advanced_component_types and category not in categories:
# boilerplates not supported for advanced components
try:
component_display_name = xblock_type_display_name(category, default_display_name=category)
advanced_component_templates['templates'].append(
create_template_dict(
component_display_name,
category
)
)
categories.add(category)
except PluginMissingError:
# dhm: I got this once but it can happen any time the
# course author configures an advanced component which does
# not exist on the server. This code here merely
# prevents any authors from trying to instantiate the
# non-existent component type by not showing it in the menu
log.warning(
"Advanced component %s does not exist. It will not be added to the Studio new component menu.",
category
)
else:
log.error(
"Improper format for course advanced keys! %s",
course_advanced_keys
)
if len(advanced_component_templates['templates']) > 0:
component_templates.insert(0, advanced_component_templates)
return component_templates
@login_required
def _get_item_in_course(request, usage_key):
"""
Helper method for getting the old location, containing course,
item, lms_link, and preview_lms_link for a given locator.
Verifies that the caller has permission to access this item.
"""
# usage_key's course_key may have an empty run property
usage_key = usage_key.replace(course_key=modulestore().fill_in_run(usage_key.course_key))
course_key = usage_key.course_key
if not has_course_author_access(request.user, course_key):
raise PermissionDenied()
course = modulestore().get_course(course_key)
item = modulestore().get_item(usage_key, depth=1)
lms_link = get_lms_link_for_item(item.location)
preview_lms_link = get_lms_link_for_item(item.location, preview=True)
return course, item, lms_link, preview_lms_link
@login_required
def component_handler(request, usage_key_string, handler, suffix=''):
"""
Dispatch an AJAX action to an xblock
Args:
usage_id: The usage-id of the block to dispatch to
handler (str): The handler to execute
suffix (str): The remainder of the url to be passed to the handler
Returns:
:class:`django.http.HttpResponse`: The response from the handler, converted to a
django response
"""
usage_key = UsageKey.from_string(usage_key_string)
descriptor = modulestore().get_item(usage_key)
# Let the module handle the AJAX
req = django_to_webob_request(request)
try:
resp = descriptor.handle(handler, req, suffix)
except NoSuchHandlerError:
log.info("XBlock %s attempted to access missing handler %r", descriptor, handler, exc_info=True)
raise Http404
# unintentional update to handle any side effects of handle call
# could potentially be updating actual course data or simply caching its values
modulestore().update_item(descriptor, request.user.id)
return webob_to_django_response(resp)
|
agpl-3.0
|
koyuawsmbrtn/eclock
|
windows/Python27/Lib/logging/__init__.py
|
9
|
60284
|
# Copyright 2001-2012 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Logging package for Python. Based on PEP 282 and comments thereto in
comp.lang.python.
Copyright (C) 2001-2012 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
import sys, os, time, cStringIO, traceback, warnings, weakref
__all__ = ['BASIC_FORMAT', 'BufferingFormatter', 'CRITICAL', 'DEBUG', 'ERROR',
'FATAL', 'FileHandler', 'Filter', 'Formatter', 'Handler', 'INFO',
'LogRecord', 'Logger', 'LoggerAdapter', 'NOTSET', 'NullHandler',
'StreamHandler', 'WARN', 'WARNING', 'addLevelName', 'basicConfig',
'captureWarnings', 'critical', 'debug', 'disable', 'error',
'exception', 'fatal', 'getLevelName', 'getLogger', 'getLoggerClass',
'info', 'log', 'makeLogRecord', 'setLoggerClass', 'warn', 'warning']
try:
import codecs
except ImportError:
codecs = None
try:
import thread
import threading
except ImportError:
thread = None
__author__ = "Vinay Sajip <[email protected]>"
__status__ = "production"
__version__ = "0.5.1.2"
__date__ = "07 February 2010"
#---------------------------------------------------------------------------
# Miscellaneous module data
#---------------------------------------------------------------------------
try:
unicode
_unicode = True
except NameError:
_unicode = False
#
# _srcfile is used when walking the stack to check when we've got the first
# caller stack frame.
#
if hasattr(sys, 'frozen'): #support for py2exe
_srcfile = "logging%s__init__%s" % (os.sep, __file__[-4:])
elif __file__[-4:].lower() in ['.pyc', '.pyo']:
_srcfile = __file__[:-4] + '.py'
else:
_srcfile = __file__
_srcfile = os.path.normcase(_srcfile)
# next bit filched from 1.5.2's inspect.py
def currentframe():
"""Return the frame object for the caller's stack frame."""
try:
raise Exception
except:
return sys.exc_info()[2].tb_frame.f_back
if hasattr(sys, '_getframe'): currentframe = lambda: sys._getframe(3)
# done filching
# _srcfile is only used in conjunction with sys._getframe().
# To provide compatibility with older versions of Python, set _srcfile
# to None if _getframe() is not available; this value will prevent
# findCaller() from being called.
#if not hasattr(sys, "_getframe"):
# _srcfile = None
#
#_startTime is used as the base when calculating the relative time of events
#
_startTime = time.time()
#
#raiseExceptions is used to see if exceptions during handling should be
#propagated
#
raiseExceptions = 1
#
# If you don't want threading information in the log, set this to zero
#
logThreads = 1
#
# If you don't want multiprocessing information in the log, set this to zero
#
logMultiprocessing = 1
#
# If you don't want process information in the log, set this to zero
#
logProcesses = 1
#---------------------------------------------------------------------------
# Level related stuff
#---------------------------------------------------------------------------
#
# Default levels and level names, these can be replaced with any positive set
# of values having corresponding names. There is a pseudo-level, NOTSET, which
# is only really there as a lower limit for user-defined levels. Handlers and
# loggers are initialized with NOTSET so that they will log all messages, even
# at user-defined levels.
#
CRITICAL = 50
FATAL = CRITICAL
ERROR = 40
WARNING = 30
WARN = WARNING
INFO = 20
DEBUG = 10
NOTSET = 0
_levelNames = {
CRITICAL : 'CRITICAL',
ERROR : 'ERROR',
WARNING : 'WARNING',
INFO : 'INFO',
DEBUG : 'DEBUG',
NOTSET : 'NOTSET',
'CRITICAL' : CRITICAL,
'ERROR' : ERROR,
'WARN' : WARNING,
'WARNING' : WARNING,
'INFO' : INFO,
'DEBUG' : DEBUG,
'NOTSET' : NOTSET,
}
def getLevelName(level):
"""
Return the textual representation of logging level 'level'.
If the level is one of the predefined levels (CRITICAL, ERROR, WARNING,
INFO, DEBUG) then you get the corresponding string. If you have
associated levels with names using addLevelName then the name you have
associated with 'level' is returned.
If a numeric value corresponding to one of the defined levels is passed
in, the corresponding string representation is returned.
Otherwise, the string "Level %s" % level is returned.
"""
return _levelNames.get(level, ("Level %s" % level))
def addLevelName(level, levelName):
"""
Associate 'levelName' with 'level'.
This is used when converting levels to text during message formatting.
"""
_acquireLock()
try: #unlikely to cause an exception, but you never know...
_levelNames[level] = levelName
_levelNames[levelName] = level
finally:
_releaseLock()
def _checkLevel(level):
if isinstance(level, (int, long)):
rv = level
elif str(level) == level:
if level not in _levelNames:
raise ValueError("Unknown level: %r" % level)
rv = _levelNames[level]
else:
raise TypeError("Level not an integer or a valid string: %r" % level)
return rv
#---------------------------------------------------------------------------
# Thread-related stuff
#---------------------------------------------------------------------------
#
#_lock is used to serialize access to shared data structures in this module.
#This needs to be an RLock because fileConfig() creates and configures
#Handlers, and so might arbitrary user threads. Since Handler code updates the
#shared dictionary _handlers, it needs to acquire the lock. But if configuring,
#the lock would already have been acquired - so we need an RLock.
#The same argument applies to Loggers and Manager.loggerDict.
#
if thread:
_lock = threading.RLock()
else:
_lock = None
def _acquireLock():
"""
Acquire the module-level lock for serializing access to shared data.
This should be released with _releaseLock().
"""
if _lock:
_lock.acquire()
def _releaseLock():
"""
Release the module-level lock acquired by calling _acquireLock().
"""
if _lock:
_lock.release()
#---------------------------------------------------------------------------
# The logging record
#---------------------------------------------------------------------------
class LogRecord(object):
"""
A LogRecord instance represents an event being logged.
LogRecord instances are created every time something is logged. They
contain all the information pertinent to the event being logged. The
main information passed in is in msg and args, which are combined
using str(msg) % args to create the message field of the record. The
record also includes information such as when the record was created,
the source line where the logging call was made, and any exception
information to be logged.
"""
def __init__(self, name, level, pathname, lineno,
msg, args, exc_info, func=None):
"""
Initialize a logging record with interesting information.
"""
ct = time.time()
self.name = name
self.msg = msg
#
# The following statement allows passing of a dictionary as a sole
# argument, so that you can do something like
# logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2})
# Suggested by Stefan Behnel.
# Note that without the test for args[0], we get a problem because
# during formatting, we test to see if the arg is present using
# 'if self.args:'. If the event being logged is e.g. 'Value is %d'
# and if the passed arg fails 'if self.args:' then no formatting
# is done. For example, logger.warn('Value is %d', 0) would log
# 'Value is %d' instead of 'Value is 0'.
# For the use case of passing a dictionary, this should not be a
# problem.
if args and len(args) == 1 and isinstance(args[0], dict) and args[0]:
args = args[0]
self.args = args
self.levelname = getLevelName(level)
self.levelno = level
self.pathname = pathname
try:
self.filename = os.path.basename(pathname)
self.module = os.path.splitext(self.filename)[0]
except (TypeError, ValueError, AttributeError):
self.filename = pathname
self.module = "Unknown module"
self.exc_info = exc_info
self.exc_text = None # used to cache the traceback text
self.lineno = lineno
self.funcName = func
self.created = ct
self.msecs = (ct - long(ct)) * 1000
self.relativeCreated = (self.created - _startTime) * 1000
if logThreads and thread:
self.thread = thread.get_ident()
self.threadName = threading.current_thread().name
else:
self.thread = None
self.threadName = None
if not logMultiprocessing:
self.processName = None
else:
self.processName = 'MainProcess'
mp = sys.modules.get('multiprocessing')
if mp is not None:
# Errors may occur if multiprocessing has not finished loading
# yet - e.g. if a custom import hook causes third-party code
# to run when multiprocessing calls import. See issue 8200
# for an example
try:
self.processName = mp.current_process().name
except StandardError:
pass
if logProcesses and hasattr(os, 'getpid'):
self.process = os.getpid()
else:
self.process = None
def __str__(self):
return '<LogRecord: %s, %s, %s, %s, "%s">'%(self.name, self.levelno,
self.pathname, self.lineno, self.msg)
def getMessage(self):
"""
Return the message for this LogRecord.
Return the message for this LogRecord after merging any user-supplied
arguments with the message.
"""
if not _unicode: #if no unicode support...
msg = str(self.msg)
else:
msg = self.msg
if not isinstance(msg, basestring):
try:
msg = str(self.msg)
except UnicodeError:
msg = self.msg #Defer encoding till later
if self.args:
msg = msg % self.args
return msg
def makeLogRecord(dict):
"""
Make a LogRecord whose attributes are defined by the specified dictionary,
This function is useful for converting a logging event received over
a socket connection (which is sent as a dictionary) into a LogRecord
instance.
"""
rv = LogRecord(None, None, "", 0, "", (), None, None)
rv.__dict__.update(dict)
return rv
#---------------------------------------------------------------------------
# Formatter classes and functions
#---------------------------------------------------------------------------
class Formatter(object):
"""
Formatter instances are used to convert a LogRecord to text.
Formatters need to know how a LogRecord is constructed. They are
responsible for converting a LogRecord to (usually) a string which can
be interpreted by either a human or an external system. The base Formatter
allows a formatting string to be specified. If none is supplied, the
default value of "%s(message)\\n" is used.
The Formatter can be initialized with a format string which makes use of
knowledge of the LogRecord attributes - e.g. the default value mentioned
above makes use of the fact that the user's message and arguments are pre-
formatted into a LogRecord's message attribute. Currently, the useful
attributes in a LogRecord are described by:
%(name)s Name of the logger (logging channel)
%(levelno)s Numeric logging level for the message (DEBUG, INFO,
WARNING, ERROR, CRITICAL)
%(levelname)s Text logging level for the message ("DEBUG", "INFO",
"WARNING", "ERROR", "CRITICAL")
%(pathname)s Full pathname of the source file where the logging
call was issued (if available)
%(filename)s Filename portion of pathname
%(module)s Module (name portion of filename)
%(lineno)d Source line number where the logging call was issued
(if available)
%(funcName)s Function name
%(created)f Time when the LogRecord was created (time.time()
return value)
%(asctime)s Textual time when the LogRecord was created
%(msecs)d Millisecond portion of the creation time
%(relativeCreated)d Time in milliseconds when the LogRecord was created,
relative to the time the logging module was loaded
(typically at application startup time)
%(thread)d Thread ID (if available)
%(threadName)s Thread name (if available)
%(process)d Process ID (if available)
%(message)s The result of record.getMessage(), computed just as
the record is emitted
"""
converter = time.localtime
def __init__(self, fmt=None, datefmt=None):
"""
Initialize the formatter with specified format strings.
Initialize the formatter either with the specified format string, or a
default as described above. Allow for specialized date formatting with
the optional datefmt argument (if omitted, you get the ISO8601 format).
"""
if fmt:
self._fmt = fmt
else:
self._fmt = "%(message)s"
self.datefmt = datefmt
def formatTime(self, record, datefmt=None):
"""
Return the creation time of the specified LogRecord as formatted text.
This method should be called from format() by a formatter which
wants to make use of a formatted time. This method can be overridden
in formatters to provide for any specific requirement, but the
basic behaviour is as follows: if datefmt (a string) is specified,
it is used with time.strftime() to format the creation time of the
record. Otherwise, the ISO8601 format is used. The resulting
string is returned. This function uses a user-configurable function
to convert the creation time to a tuple. By default, time.localtime()
is used; to change this for a particular formatter instance, set the
'converter' attribute to a function with the same signature as
time.localtime() or time.gmtime(). To change it for all formatters,
for example if you want all logging times to be shown in GMT,
set the 'converter' attribute in the Formatter class.
"""
ct = self.converter(record.created)
if datefmt:
s = time.strftime(datefmt, ct)
else:
t = time.strftime("%Y-%m-%d %H:%M:%S", ct)
s = "%s,%03d" % (t, record.msecs)
return s
def formatException(self, ei):
"""
Format and return the specified exception information as a string.
This default implementation just uses
traceback.print_exception()
"""
sio = cStringIO.StringIO()
traceback.print_exception(ei[0], ei[1], ei[2], None, sio)
s = sio.getvalue()
sio.close()
if s[-1:] == "\n":
s = s[:-1]
return s
def usesTime(self):
"""
Check if the format uses the creation time of the record.
"""
return self._fmt.find("%(asctime)") >= 0
def format(self, record):
"""
Format the specified record as text.
The record's attribute dictionary is used as the operand to a
string formatting operation which yields the returned string.
Before formatting the dictionary, a couple of preparatory steps
are carried out. The message attribute of the record is computed
using LogRecord.getMessage(). If the formatting string uses the
time (as determined by a call to usesTime(), formatTime() is
called to format the event time. If there is exception information,
it is formatted using formatException() and appended to the message.
"""
record.message = record.getMessage()
if self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
s = self._fmt % record.__dict__
if record.exc_info:
# Cache the traceback text to avoid converting it multiple times
# (it's constant anyway)
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
if s[-1:] != "\n":
s = s + "\n"
try:
s = s + record.exc_text
except UnicodeError:
# Sometimes filenames have non-ASCII chars, which can lead
# to errors when s is Unicode and record.exc_text is str
# See issue 8924.
# We also use replace for when there are multiple
# encodings, e.g. UTF-8 for the filesystem and latin-1
# for a script. See issue 13232.
s = s + record.exc_text.decode(sys.getfilesystemencoding(),
'replace')
return s
#
# The default formatter to use when no other is specified
#
_defaultFormatter = Formatter()
class BufferingFormatter(object):
"""
A formatter suitable for formatting a number of records.
"""
def __init__(self, linefmt=None):
"""
Optionally specify a formatter which will be used to format each
individual record.
"""
if linefmt:
self.linefmt = linefmt
else:
self.linefmt = _defaultFormatter
def formatHeader(self, records):
"""
Return the header string for the specified records.
"""
return ""
def formatFooter(self, records):
"""
Return the footer string for the specified records.
"""
return ""
def format(self, records):
"""
Format the specified records and return the result as a string.
"""
rv = ""
if len(records) > 0:
rv = rv + self.formatHeader(records)
for record in records:
rv = rv + self.linefmt.format(record)
rv = rv + self.formatFooter(records)
return rv
#---------------------------------------------------------------------------
# Filter classes and functions
#---------------------------------------------------------------------------
class Filter(object):
"""
Filter instances are used to perform arbitrary filtering of LogRecords.
Loggers and Handlers can optionally use Filter instances to filter
records as desired. The base filter class only allows events which are
below a certain point in the logger hierarchy. For example, a filter
initialized with "A.B" will allow events logged by loggers "A.B",
"A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If
initialized with the empty string, all events are passed.
"""
def __init__(self, name=''):
"""
Initialize a filter.
Initialize with the name of the logger which, together with its
children, will have its events allowed through the filter. If no
name is specified, allow every event.
"""
self.name = name
self.nlen = len(name)
def filter(self, record):
"""
Determine if the specified record is to be logged.
Is the specified record to be logged? Returns 0 for no, nonzero for
yes. If deemed appropriate, the record may be modified in-place.
"""
if self.nlen == 0:
return 1
elif self.name == record.name:
return 1
elif record.name.find(self.name, 0, self.nlen) != 0:
return 0
return (record.name[self.nlen] == ".")
class Filterer(object):
"""
A base class for loggers and handlers which allows them to share
common code.
"""
def __init__(self):
"""
Initialize the list of filters to be an empty list.
"""
self.filters = []
def addFilter(self, filter):
"""
Add the specified filter to this handler.
"""
if not (filter in self.filters):
self.filters.append(filter)
def removeFilter(self, filter):
"""
Remove the specified filter from this handler.
"""
if filter in self.filters:
self.filters.remove(filter)
def filter(self, record):
"""
Determine if a record is loggable by consulting all the filters.
The default is to allow the record to be logged; any filter can veto
this and the record is then dropped. Returns a zero value if a record
is to be dropped, else non-zero.
"""
rv = 1
for f in self.filters:
if not f.filter(record):
rv = 0
break
return rv
#---------------------------------------------------------------------------
# Handler classes and functions
#---------------------------------------------------------------------------
_handlers = weakref.WeakValueDictionary() #map of handler names to handlers
_handlerList = [] # added to allow handlers to be removed in reverse of order initialized
def _removeHandlerRef(wr):
"""
Remove a handler reference from the internal cleanup list.
"""
# This function can be called during module teardown, when globals are
# set to None. If _acquireLock is None, assume this is the case and do
# nothing.
if (_acquireLock is not None and _handlerList is not None and
_releaseLock is not None):
_acquireLock()
try:
if wr in _handlerList:
_handlerList.remove(wr)
finally:
_releaseLock()
def _addHandlerRef(handler):
"""
Add a handler to the internal cleanup list using a weak reference.
"""
_acquireLock()
try:
_handlerList.append(weakref.ref(handler, _removeHandlerRef))
finally:
_releaseLock()
class Handler(Filterer):
"""
Handler instances dispatch logging events to specific destinations.
The base handler class. Acts as a placeholder which defines the Handler
interface. Handlers can optionally use Formatter instances to format
records as desired. By default, no formatter is specified; in this case,
the 'raw' message as determined by record.message is logged.
"""
def __init__(self, level=NOTSET):
"""
Initializes the instance - basically setting the formatter to None
and the filter list to empty.
"""
Filterer.__init__(self)
self._name = None
self.level = _checkLevel(level)
self.formatter = None
# Add the handler to the global _handlerList (for cleanup on shutdown)
_addHandlerRef(self)
self.createLock()
def get_name(self):
return self._name
def set_name(self, name):
_acquireLock()
try:
if self._name in _handlers:
del _handlers[self._name]
self._name = name
if name:
_handlers[name] = self
finally:
_releaseLock()
name = property(get_name, set_name)
def createLock(self):
"""
Acquire a thread lock for serializing access to the underlying I/O.
"""
if thread:
self.lock = threading.RLock()
else:
self.lock = None
def acquire(self):
"""
Acquire the I/O thread lock.
"""
if self.lock:
self.lock.acquire()
def release(self):
"""
Release the I/O thread lock.
"""
if self.lock:
self.lock.release()
def setLevel(self, level):
"""
Set the logging level of this handler.
"""
self.level = _checkLevel(level)
def format(self, record):
"""
Format the specified record.
If a formatter is set, use it. Otherwise, use the default formatter
for the module.
"""
if self.formatter:
fmt = self.formatter
else:
fmt = _defaultFormatter
return fmt.format(record)
def emit(self, record):
"""
Do whatever it takes to actually log the specified logging record.
This version is intended to be implemented by subclasses and so
raises a NotImplementedError.
"""
raise NotImplementedError('emit must be implemented '
'by Handler subclasses')
def handle(self, record):
"""
Conditionally emit the specified logging record.
Emission depends on filters which may have been added to the handler.
Wrap the actual emission of the record with acquisition/release of
the I/O thread lock. Returns whether the filter passed the record for
emission.
"""
rv = self.filter(record)
if rv:
self.acquire()
try:
self.emit(record)
finally:
self.release()
return rv
def setFormatter(self, fmt):
"""
Set the formatter for this handler.
"""
self.formatter = fmt
def flush(self):
"""
Ensure all logging output has been flushed.
This version does nothing and is intended to be implemented by
subclasses.
"""
pass
def close(self):
"""
Tidy up any resources used by the handler.
This version removes the handler from an internal map of handlers,
_handlers, which is used for handler lookup by name. Subclasses
should ensure that this gets called from overridden close()
methods.
"""
#get the module data lock, as we're updating a shared structure.
_acquireLock()
try: #unlikely to raise an exception, but you never know...
if self._name and self._name in _handlers:
del _handlers[self._name]
finally:
_releaseLock()
def handleError(self, record):
"""
Handle errors which occur during an emit() call.
This method should be called from handlers when an exception is
encountered during an emit() call. If raiseExceptions is false,
exceptions get silently ignored. This is what is mostly wanted
for a logging system - most users will not care about errors in
the logging system, they are more interested in application errors.
You could, however, replace this with a custom handler if you wish.
The record which was being processed is passed in to this method.
"""
if raiseExceptions and sys.stderr: # see issue 13807
ei = sys.exc_info()
try:
traceback.print_exception(ei[0], ei[1], ei[2],
None, sys.stderr)
sys.stderr.write('Logged from file %s, line %s\n' % (
record.filename, record.lineno))
except IOError:
pass # see issue 5971
finally:
del ei
class StreamHandler(Handler):
"""
A handler class which writes logging records, appropriately formatted,
to a stream. Note that this class does not close the stream, as
sys.stdout or sys.stderr may be used.
"""
def __init__(self, stream=None):
"""
Initialize the handler.
If stream is not specified, sys.stderr is used.
"""
Handler.__init__(self)
if stream is None:
stream = sys.stderr
self.stream = stream
def flush(self):
"""
Flushes the stream.
"""
self.acquire()
try:
if self.stream and hasattr(self.stream, "flush"):
self.stream.flush()
finally:
self.release()
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline. If
exception information is present, it is formatted using
traceback.print_exception and appended to the stream. If the stream
has an 'encoding' attribute, it is used to determine how to do the
output to the stream.
"""
try:
msg = self.format(record)
stream = self.stream
fs = "%s\n"
if not _unicode: #if no unicode support...
stream.write(fs % msg)
else:
try:
if (isinstance(msg, unicode) and
getattr(stream, 'encoding', None)):
ufs = u'%s\n'
try:
stream.write(ufs % msg)
except UnicodeEncodeError:
#Printing to terminals sometimes fails. For example,
#with an encoding of 'cp1251', the above write will
#work if written to a stream opened or wrapped by
#the codecs module, but fail when writing to a
#terminal even when the codepage is set to cp1251.
#An extra encoding step seems to be needed.
stream.write((ufs % msg).encode(stream.encoding))
else:
stream.write(fs % msg)
except UnicodeError:
stream.write(fs % msg.encode("UTF-8"))
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class FileHandler(StreamHandler):
"""
A handler class which writes formatted logging records to disk files.
"""
def __init__(self, filename, mode='a', encoding=None, delay=0):
"""
Open the specified file and use it as the stream for logging.
"""
#keep the absolute path, otherwise derived classes which use this
#may come a cropper when the current directory changes
if codecs is None:
encoding = None
self.baseFilename = os.path.abspath(filename)
self.mode = mode
self.encoding = encoding
self.delay = delay
if delay:
#We don't open the stream, but we still need to call the
#Handler constructor to set level, formatter, lock etc.
Handler.__init__(self)
self.stream = None
else:
StreamHandler.__init__(self, self._open())
def close(self):
"""
Closes the stream.
"""
self.acquire()
try:
if self.stream:
self.flush()
if hasattr(self.stream, "close"):
self.stream.close()
StreamHandler.close(self)
self.stream = None
finally:
self.release()
def _open(self):
"""
Open the current base file with the (original) mode and encoding.
Return the resulting stream.
"""
if self.encoding is None:
stream = open(self.baseFilename, self.mode)
else:
stream = codecs.open(self.baseFilename, self.mode, self.encoding)
return stream
def emit(self, record):
"""
Emit a record.
If the stream was not opened because 'delay' was specified in the
constructor, open it before calling the superclass's emit.
"""
if self.stream is None:
self.stream = self._open()
StreamHandler.emit(self, record)
#---------------------------------------------------------------------------
# Manager classes and functions
#---------------------------------------------------------------------------
class PlaceHolder(object):
"""
PlaceHolder instances are used in the Manager logger hierarchy to take
the place of nodes for which no loggers have been defined. This class is
intended for internal use only and not as part of the public API.
"""
def __init__(self, alogger):
"""
Initialize with the specified logger being a child of this placeholder.
"""
#self.loggers = [alogger]
self.loggerMap = { alogger : None }
def append(self, alogger):
"""
Add the specified logger as a child of this placeholder.
"""
#if alogger not in self.loggers:
if alogger not in self.loggerMap:
#self.loggers.append(alogger)
self.loggerMap[alogger] = None
#
# Determine which class to use when instantiating loggers.
#
_loggerClass = None
def setLoggerClass(klass):
"""
Set the class to be used when instantiating a logger. The class should
define __init__() such that only a name argument is required, and the
__init__() should call Logger.__init__()
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
global _loggerClass
_loggerClass = klass
def getLoggerClass():
"""
Return the class to be used when instantiating a logger.
"""
return _loggerClass
class Manager(object):
"""
There is [under normal circumstances] just one Manager instance, which
holds the hierarchy of loggers.
"""
def __init__(self, rootnode):
"""
Initialize the manager with the root node of the logger hierarchy.
"""
self.root = rootnode
self.disable = 0
self.emittedNoHandlerWarning = 0
self.loggerDict = {}
self.loggerClass = None
def getLogger(self, name):
"""
Get a logger with the specified name (channel name), creating it
if it doesn't yet exist. This name is a dot-separated hierarchical
name, such as "a", "a.b", "a.b.c" or similar.
If a PlaceHolder existed for the specified name [i.e. the logger
didn't exist but a child of it did], replace it with the created
logger and fix up the parent/child references which pointed to the
placeholder to now point to the logger.
"""
rv = None
if not isinstance(name, basestring):
raise TypeError('A logger name must be string or Unicode')
if isinstance(name, unicode):
name = name.encode('utf-8')
_acquireLock()
try:
if name in self.loggerDict:
rv = self.loggerDict[name]
if isinstance(rv, PlaceHolder):
ph = rv
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupChildren(ph, rv)
self._fixupParents(rv)
else:
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupParents(rv)
finally:
_releaseLock()
return rv
def setLoggerClass(self, klass):
"""
Set the class to be used when instantiating a logger with this Manager.
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
self.loggerClass = klass
def _fixupParents(self, alogger):
"""
Ensure that there are either loggers or placeholders all the way
from the specified logger to the root of the logger hierarchy.
"""
name = alogger.name
i = name.rfind(".")
rv = None
while (i > 0) and not rv:
substr = name[:i]
if substr not in self.loggerDict:
self.loggerDict[substr] = PlaceHolder(alogger)
else:
obj = self.loggerDict[substr]
if isinstance(obj, Logger):
rv = obj
else:
assert isinstance(obj, PlaceHolder)
obj.append(alogger)
i = name.rfind(".", 0, i - 1)
if not rv:
rv = self.root
alogger.parent = rv
def _fixupChildren(self, ph, alogger):
"""
Ensure that children of the placeholder ph are connected to the
specified logger.
"""
name = alogger.name
namelen = len(name)
for c in ph.loggerMap.keys():
#The if means ... if not c.parent.name.startswith(nm)
if c.parent.name[:namelen] != name:
alogger.parent = c.parent
c.parent = alogger
#---------------------------------------------------------------------------
# Logger classes and functions
#---------------------------------------------------------------------------
class Logger(Filterer):
"""
Instances of the Logger class represent a single logging channel. A
"logging channel" indicates an area of an application. Exactly how an
"area" is defined is up to the application developer. Since an
application can have any number of areas, logging channels are identified
by a unique string. Application areas can be nested (e.g. an area
of "input processing" might include sub-areas "read CSV files", "read
XLS files" and "read Gnumeric files"). To cater for this natural nesting,
channel names are organized into a namespace hierarchy where levels are
separated by periods, much like the Java or Python package namespace. So
in the instance given above, channel names might be "input" for the upper
level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels.
There is no arbitrary limit to the depth of nesting.
"""
def __init__(self, name, level=NOTSET):
"""
Initialize the logger with a name and an optional level.
"""
Filterer.__init__(self)
self.name = name
self.level = _checkLevel(level)
self.parent = None
self.propagate = 1
self.handlers = []
self.disabled = 0
def setLevel(self, level):
"""
Set the logging level of this logger.
"""
self.level = _checkLevel(level)
def debug(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'DEBUG'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.debug("Houston, we have a %s", "thorny problem", exc_info=1)
"""
if self.isEnabledFor(DEBUG):
self._log(DEBUG, msg, args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'INFO'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.info("Houston, we have a %s", "interesting problem", exc_info=1)
"""
if self.isEnabledFor(INFO):
self._log(INFO, msg, args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'WARNING'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1)
"""
if self.isEnabledFor(WARNING):
self._log(WARNING, msg, args, **kwargs)
warn = warning
def error(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'ERROR'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.error("Houston, we have a %s", "major problem", exc_info=1)
"""
if self.isEnabledFor(ERROR):
self._log(ERROR, msg, args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
Convenience method for logging an ERROR with exception information.
"""
kwargs['exc_info'] = 1
self.error(msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'CRITICAL'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.critical("Houston, we have a %s", "major disaster", exc_info=1)
"""
if self.isEnabledFor(CRITICAL):
self._log(CRITICAL, msg, args, **kwargs)
fatal = critical
def log(self, level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.log(level, "We have a %s", "mysterious problem", exc_info=1)
"""
if not isinstance(level, int):
if raiseExceptions:
raise TypeError("level must be an integer")
else:
return
if self.isEnabledFor(level):
self._log(level, msg, args, **kwargs)
def findCaller(self):
"""
Find the stack frame of the caller so that we can note the source
file name, line number and function name.
"""
f = currentframe()
#On some versions of IronPython, currentframe() returns None if
#IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
rv = "(unknown file)", 0, "(unknown function)"
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
if filename == _srcfile:
f = f.f_back
continue
rv = (co.co_filename, f.f_lineno, co.co_name)
break
return rv
def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None, extra=None):
"""
A factory method which can be overridden in subclasses to create
specialized LogRecords.
"""
rv = LogRecord(name, level, fn, lno, msg, args, exc_info, func)
if extra is not None:
for key in extra:
if (key in ["message", "asctime"]) or (key in rv.__dict__):
raise KeyError("Attempt to overwrite %r in LogRecord" % key)
rv.__dict__[key] = extra[key]
return rv
def _log(self, level, msg, args, exc_info=None, extra=None):
"""
Low-level logging routine which creates a LogRecord and then calls
all the handlers of this logger to handle the record.
"""
if _srcfile:
#IronPython doesn't track Python frames, so findCaller raises an
#exception on some versions of IronPython. We trap it here so that
#IronPython can use logging.
try:
fn, lno, func = self.findCaller()
except ValueError:
fn, lno, func = "(unknown file)", 0, "(unknown function)"
else:
fn, lno, func = "(unknown file)", 0, "(unknown function)"
if exc_info:
if not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
record = self.makeRecord(self.name, level, fn, lno, msg, args, exc_info, func, extra)
self.handle(record)
def handle(self, record):
"""
Call the handlers for the specified record.
This method is used for unpickled records received from a socket, as
well as those created locally. Logger-level filtering is applied.
"""
if (not self.disabled) and self.filter(record):
self.callHandlers(record)
def addHandler(self, hdlr):
"""
Add the specified handler to this logger.
"""
_acquireLock()
try:
if not (hdlr in self.handlers):
self.handlers.append(hdlr)
finally:
_releaseLock()
def removeHandler(self, hdlr):
"""
Remove the specified handler from this logger.
"""
_acquireLock()
try:
if hdlr in self.handlers:
self.handlers.remove(hdlr)
finally:
_releaseLock()
def callHandlers(self, record):
"""
Pass a record to all relevant handlers.
Loop through all handlers for this logger and its parents in the
logger hierarchy. If no handler was found, output a one-off error
message to sys.stderr. Stop searching up the hierarchy whenever a
logger with the "propagate" attribute set to zero is found - that
will be the last logger whose handlers are called.
"""
c = self
found = 0
while c:
for hdlr in c.handlers:
found = found + 1
if record.levelno >= hdlr.level:
hdlr.handle(record)
if not c.propagate:
c = None #break out
else:
c = c.parent
if (found == 0) and raiseExceptions and not self.manager.emittedNoHandlerWarning:
sys.stderr.write("No handlers could be found for logger"
" \"%s\"\n" % self.name)
self.manager.emittedNoHandlerWarning = 1
def getEffectiveLevel(self):
"""
Get the effective level for this logger.
Loop through this logger and its parents in the logger hierarchy,
looking for a non-zero logging level. Return the first one found.
"""
logger = self
while logger:
if logger.level:
return logger.level
logger = logger.parent
return NOTSET
def isEnabledFor(self, level):
"""
Is this logger enabled for level 'level'?
"""
if self.manager.disable >= level:
return 0
return level >= self.getEffectiveLevel()
def getChild(self, suffix):
"""
Get a logger which is a descendant to this one.
This is a convenience method, such that
logging.getLogger('abc').getChild('def.ghi')
is the same as
logging.getLogger('abc.def.ghi')
It's useful, for example, when the parent logger is named using
__name__ rather than a literal string.
"""
if self.root is not self:
suffix = '.'.join((self.name, suffix))
return self.manager.getLogger(suffix)
class RootLogger(Logger):
"""
A root logger is not that different to any other logger, except that
it must have a logging level and there is only one instance of it in
the hierarchy.
"""
def __init__(self, level):
"""
Initialize the logger with the name "root".
"""
Logger.__init__(self, "root", level)
_loggerClass = Logger
class LoggerAdapter(object):
"""
An adapter for loggers which makes it easier to specify contextual
information in logging output.
"""
def __init__(self, logger, extra):
"""
Initialize the adapter with a logger and a dict-like object which
provides contextual information. This constructor signature allows
easy stacking of LoggerAdapters, if so desired.
You can effectively pass keyword arguments as shown in the
following example:
adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2"))
"""
self.logger = logger
self.extra = extra
def process(self, msg, kwargs):
"""
Process the logging message and keyword arguments passed in to
a logging call to insert contextual information. You can either
manipulate the message itself, the keyword args or both. Return
the message and kwargs modified (or not) to suit your needs.
Normally, you'll only need to override this one method in a
LoggerAdapter subclass for your specific needs.
"""
kwargs["extra"] = self.extra
return msg, kwargs
def debug(self, msg, *args, **kwargs):
"""
Delegate a debug call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.debug(msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Delegate an info call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.info(msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Delegate a warning call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
Delegate an error call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.error(msg, *args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
Delegate an exception call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
kwargs["exc_info"] = 1
self.logger.error(msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Delegate a critical call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.critical(msg, *args, **kwargs)
def log(self, level, msg, *args, **kwargs):
"""
Delegate a log call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.log(level, msg, *args, **kwargs)
def isEnabledFor(self, level):
"""
See if the underlying logger is enabled for the specified level.
"""
return self.logger.isEnabledFor(level)
root = RootLogger(WARNING)
Logger.root = root
Logger.manager = Manager(Logger.root)
#---------------------------------------------------------------------------
# Configuration classes and functions
#---------------------------------------------------------------------------
BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s"
def basicConfig(**kwargs):
"""
Do basic configuration for the logging system.
This function does nothing if the root logger already has handlers
configured. It is a convenience method intended for use by simple scripts
to do one-shot configuration of the logging package.
The default behaviour is to create a StreamHandler which writes to
sys.stderr, set a formatter using the BASIC_FORMAT format string, and
add the handler to the root logger.
A number of optional keyword arguments may be specified, which can alter
the default behaviour.
filename Specifies that a FileHandler be created, using the specified
filename, rather than a StreamHandler.
filemode Specifies the mode to open the file, if filename is specified
(if filemode is unspecified, it defaults to 'a').
format Use the specified format string for the handler.
datefmt Use the specified date/time format.
level Set the root logger level to the specified level.
stream Use the specified stream to initialize the StreamHandler. Note
that this argument is incompatible with 'filename' - if both
are present, 'stream' is ignored.
Note that you could specify a stream created using open(filename, mode)
rather than passing the filename and mode in. However, it should be
remembered that StreamHandler does not close its stream (since it may be
using sys.stdout or sys.stderr), whereas FileHandler closes its stream
when the handler is closed.
"""
# Add thread safety in case someone mistakenly calls
# basicConfig() from multiple threads
_acquireLock()
try:
if len(root.handlers) == 0:
filename = kwargs.get("filename")
if filename:
mode = kwargs.get("filemode", 'a')
hdlr = FileHandler(filename, mode)
else:
stream = kwargs.get("stream")
hdlr = StreamHandler(stream)
fs = kwargs.get("format", BASIC_FORMAT)
dfs = kwargs.get("datefmt", None)
fmt = Formatter(fs, dfs)
hdlr.setFormatter(fmt)
root.addHandler(hdlr)
level = kwargs.get("level")
if level is not None:
root.setLevel(level)
finally:
_releaseLock()
#---------------------------------------------------------------------------
# Utility functions at module level.
# Basically delegate everything to the root logger.
#---------------------------------------------------------------------------
def getLogger(name=None):
"""
Return a logger with the specified name, creating it if necessary.
If no name is specified, return the root logger.
"""
if name:
return Logger.manager.getLogger(name)
else:
return root
#def getRootLogger():
# """
# Return the root logger.
#
# Note that getLogger('') now does the same thing, so this function is
# deprecated and may disappear in the future.
# """
# return root
def critical(msg, *args, **kwargs):
"""
Log a message with severity 'CRITICAL' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.critical(msg, *args, **kwargs)
fatal = critical
def error(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.error(msg, *args, **kwargs)
def exception(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger,
with exception information.
"""
kwargs['exc_info'] = 1
error(msg, *args, **kwargs)
def warning(msg, *args, **kwargs):
"""
Log a message with severity 'WARNING' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.warning(msg, *args, **kwargs)
warn = warning
def info(msg, *args, **kwargs):
"""
Log a message with severity 'INFO' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.info(msg, *args, **kwargs)
def debug(msg, *args, **kwargs):
"""
Log a message with severity 'DEBUG' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.debug(msg, *args, **kwargs)
def log(level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.log(level, msg, *args, **kwargs)
def disable(level):
"""
Disable all logging calls of severity 'level' and below.
"""
root.manager.disable = level
def shutdown(handlerList=_handlerList):
"""
Perform any cleanup actions in the logging system (e.g. flushing
buffers).
Should be called at application exit.
"""
for wr in reversed(handlerList[:]):
#errors might occur, for example, if files are locked
#we just ignore them if raiseExceptions is not set
try:
h = wr()
if h:
try:
h.acquire()
h.flush()
h.close()
except (IOError, ValueError):
# Ignore errors which might be caused
# because handlers have been closed but
# references to them are still around at
# application exit.
pass
finally:
h.release()
except:
if raiseExceptions:
raise
#else, swallow
#Let's try and shutdown automatically on application exit...
import atexit
atexit.register(shutdown)
# Null handler
class NullHandler(Handler):
"""
This handler does nothing. It's intended to be used to avoid the
"No handlers could be found for logger XXX" one-off warning. This is
important for library code, which may contain code to log events. If a user
of the library does not configure logging, the one-off warning might be
produced; to avoid this, the library developer simply needs to instantiate
a NullHandler and add it to the top-level logger of the library module or
package.
"""
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
# Warnings integration
_warnings_showwarning = None
def _showwarning(message, category, filename, lineno, file=None, line=None):
"""
Implementation of showwarnings which redirects to logging, which will first
check to see if the file parameter is None. If a file is specified, it will
delegate to the original warnings implementation of showwarning. Otherwise,
it will call warnings.formatwarning and will log the resulting string to a
warnings logger named "py.warnings" with level logging.WARNING.
"""
if file is not None:
if _warnings_showwarning is not None:
_warnings_showwarning(message, category, filename, lineno, file, line)
else:
s = warnings.formatwarning(message, category, filename, lineno, line)
logger = getLogger("py.warnings")
if not logger.handlers:
logger.addHandler(NullHandler())
logger.warning("%s", s)
def captureWarnings(capture):
"""
If capture is true, redirect all warnings to the logging package.
If capture is False, ensure that warnings are not redirected to logging
but to their original destinations.
"""
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = _showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
|
gpl-2.0
|
Eseoghene/bite-project
|
deps/gdata-python-client/samples/apps/marketplace_sample/gdata/service.py
|
78
|
69609
|
#!/usr/bin/python
#
# Copyright (C) 2006,2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GDataService provides CRUD ops. and programmatic login for GData services.
Error: A base exception class for all exceptions in the gdata_client
module.
CaptchaRequired: This exception is thrown when a login attempt results in a
captcha challenge from the ClientLogin service. When this
exception is thrown, the captcha_token and captcha_url are
set to the values provided in the server's response.
BadAuthentication: Raised when a login attempt is made with an incorrect
username or password.
NotAuthenticated: Raised if an operation requiring authentication is called
before a user has authenticated.
NonAuthSubToken: Raised if a method to modify an AuthSub token is used when
the user is either not authenticated or is authenticated
through another authentication mechanism.
NonOAuthToken: Raised if a method to modify an OAuth token is used when the
user is either not authenticated or is authenticated through
another authentication mechanism.
RequestError: Raised if a CRUD request returned a non-success code.
UnexpectedReturnType: Raised if the response from the server was not of the
desired type. For example, this would be raised if the
server sent a feed when the client requested an entry.
GDataService: Encapsulates user credentials needed to perform insert, update
and delete operations with the GData API. An instance can
perform user authentication, query, insertion, deletion, and
update.
Query: Eases query URI creation by allowing URI parameters to be set as
dictionary attributes. For example a query with a feed of
'/base/feeds/snippets' and ['bq'] set to 'digital camera' will
produce '/base/feeds/snippets?bq=digital+camera' when .ToUri() is
called on it.
"""
__author__ = 'api.jscudder (Jeffrey Scudder)'
import re
import urllib
import urlparse
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import atom.service
import gdata
import atom
import atom.http_interface
import atom.token_store
import gdata.auth
import gdata.gauth
AUTH_SERVER_HOST = 'https://www.google.com'
# When requesting an AuthSub token, it is often helpful to track the scope
# which is being requested. One way to accomplish this is to add a URL
# parameter to the 'next' URL which contains the requested scope. This
# constant is the default name (AKA key) for the URL parameter.
SCOPE_URL_PARAM_NAME = 'authsub_token_scope'
# When requesting an OAuth access token or authorization of an existing OAuth
# request token, it is often helpful to track the scope(s) which is/are being
# requested. One way to accomplish this is to add a URL parameter to the
# 'callback' URL which contains the requested scope. This constant is the
# default name (AKA key) for the URL parameter.
OAUTH_SCOPE_URL_PARAM_NAME = 'oauth_token_scope'
# Maps the service names used in ClientLogin to scope URLs.
CLIENT_LOGIN_SCOPES = gdata.gauth.AUTH_SCOPES
# Default parameters for GDataService.GetWithRetries method
DEFAULT_NUM_RETRIES = 3
DEFAULT_DELAY = 1
DEFAULT_BACKOFF = 2
def lookup_scopes(service_name):
"""Finds the scope URLs for the desired service.
In some cases, an unknown service may be used, and in those cases this
function will return None.
"""
if service_name in CLIENT_LOGIN_SCOPES:
return CLIENT_LOGIN_SCOPES[service_name]
return None
# Module level variable specifies which module should be used by GDataService
# objects to make HttpRequests. This setting can be overridden on each
# instance of GDataService.
# This module level variable is deprecated. Reassign the http_client member
# of a GDataService object instead.
http_request_handler = atom.service
class Error(Exception):
pass
class CaptchaRequired(Error):
pass
class BadAuthentication(Error):
pass
class NotAuthenticated(Error):
pass
class NonAuthSubToken(Error):
pass
class NonOAuthToken(Error):
pass
class RequestError(Error):
pass
class UnexpectedReturnType(Error):
pass
class BadAuthenticationServiceURL(Error):
pass
class FetchingOAuthRequestTokenFailed(RequestError):
pass
class TokenUpgradeFailed(RequestError):
pass
class RevokingOAuthTokenFailed(RequestError):
pass
class AuthorizationRequired(Error):
pass
class TokenHadNoScope(Error):
pass
class RanOutOfTries(Error):
pass
class GDataService(atom.service.AtomService):
"""Contains elements needed for GData login and CRUD request headers.
Maintains additional headers (tokens for example) needed for the GData
services to allow a user to perform inserts, updates, and deletes.
"""
# The hander member is deprecated, use http_client instead.
handler = None
# The auth_token member is deprecated, use the token_store instead.
auth_token = None
# The tokens dict is deprecated in favor of the token_store.
tokens = None
def __init__(self, email=None, password=None, account_type='HOSTED_OR_GOOGLE',
service=None, auth_service_url=None, source=None, server=None,
additional_headers=None, handler=None, tokens=None,
http_client=None, token_store=None):
"""Creates an object of type GDataService.
Args:
email: string (optional) The user's email address, used for
authentication.
password: string (optional) The user's password.
account_type: string (optional) The type of account to use. Use
'GOOGLE' for regular Google accounts or 'HOSTED' for Google
Apps accounts, or 'HOSTED_OR_GOOGLE' to try finding a HOSTED
account first and, if it doesn't exist, try finding a regular
GOOGLE account. Default value: 'HOSTED_OR_GOOGLE'.
service: string (optional) The desired service for which credentials
will be obtained.
auth_service_url: string (optional) User-defined auth token request URL
allows users to explicitly specify where to send auth token requests.
source: string (optional) The name of the user's application.
server: string (optional) The name of the server to which a connection
will be opened. Default value: 'base.google.com'.
additional_headers: dictionary (optional) Any additional headers which
should be included with CRUD operations.
handler: module (optional) This parameter is deprecated and has been
replaced by http_client.
tokens: This parameter is deprecated, calls should be made to
token_store instead.
http_client: An object responsible for making HTTP requests using a
request method. If none is provided, a new instance of
atom.http.ProxiedHttpClient will be used.
token_store: Keeps a collection of authorization tokens which can be
applied to requests for a specific URLs. Critical methods are
find_token based on a URL (atom.url.Url or a string), add_token,
and remove_token.
"""
atom.service.AtomService.__init__(self, http_client=http_client,
token_store=token_store)
self.email = email
self.password = password
self.account_type = account_type
self.service = service
self.auth_service_url = auth_service_url
self.server = server
self.additional_headers = additional_headers or {}
self._oauth_input_params = None
self.__SetSource(source)
self.__captcha_token = None
self.__captcha_url = None
self.__gsessionid = None
if http_request_handler.__name__ == 'gdata.urlfetch':
import gdata.alt.appengine
self.http_client = gdata.alt.appengine.AppEngineHttpClient()
def _SetSessionId(self, session_id):
"""Used in unit tests to simulate a 302 which sets a gsessionid."""
self.__gsessionid = session_id
# Define properties for GDataService
def _SetAuthSubToken(self, auth_token, scopes=None):
"""Deprecated, use SetAuthSubToken instead."""
self.SetAuthSubToken(auth_token, scopes=scopes)
def __SetAuthSubToken(self, auth_token, scopes=None):
"""Deprecated, use SetAuthSubToken instead."""
self._SetAuthSubToken(auth_token, scopes=scopes)
def _GetAuthToken(self):
"""Returns the auth token used for authenticating requests.
Returns:
string
"""
current_scopes = lookup_scopes(self.service)
if current_scopes:
token = self.token_store.find_token(current_scopes[0])
if hasattr(token, 'auth_header'):
return token.auth_header
return None
def _GetCaptchaToken(self):
"""Returns a captcha token if the most recent login attempt generated one.
The captcha token is only set if the Programmatic Login attempt failed
because the Google service issued a captcha challenge.
Returns:
string
"""
return self.__captcha_token
def __GetCaptchaToken(self):
return self._GetCaptchaToken()
captcha_token = property(__GetCaptchaToken,
doc="""Get the captcha token for a login request.""")
def _GetCaptchaURL(self):
"""Returns the URL of the captcha image if a login attempt generated one.
The captcha URL is only set if the Programmatic Login attempt failed
because the Google service issued a captcha challenge.
Returns:
string
"""
return self.__captcha_url
def __GetCaptchaURL(self):
return self._GetCaptchaURL()
captcha_url = property(__GetCaptchaURL,
doc="""Get the captcha URL for a login request.""")
def GetGeneratorFromLinkFinder(self, link_finder, func,
num_retries=DEFAULT_NUM_RETRIES,
delay=DEFAULT_DELAY,
backoff=DEFAULT_BACKOFF):
"""returns a generator for pagination"""
yield link_finder
next = link_finder.GetNextLink()
while next is not None:
next_feed = func(str(self.GetWithRetries(
next.href, num_retries=num_retries, delay=delay, backoff=backoff)))
yield next_feed
next = next_feed.GetNextLink()
def _GetElementGeneratorFromLinkFinder(self, link_finder, func,
num_retries=DEFAULT_NUM_RETRIES,
delay=DEFAULT_DELAY,
backoff=DEFAULT_BACKOFF):
for element in self.GetGeneratorFromLinkFinder(link_finder, func,
num_retries=num_retries,
delay=delay,
backoff=backoff).entry:
yield element
def GetOAuthInputParameters(self):
return self._oauth_input_params
def SetOAuthInputParameters(self, signature_method, consumer_key,
consumer_secret=None, rsa_key=None,
two_legged_oauth=False, requestor_id=None):
"""Sets parameters required for using OAuth authentication mechanism.
NOTE: Though consumer_secret and rsa_key are optional, either of the two
is required depending on the value of the signature_method.
Args:
signature_method: class which provides implementation for strategy class
oauth.oauth.OAuthSignatureMethod. Signature method to be used for
signing each request. Valid implementations are provided as the
constants defined by gdata.auth.OAuthSignatureMethod. Currently
they are gdata.auth.OAuthSignatureMethod.RSA_SHA1 and
gdata.auth.OAuthSignatureMethod.HMAC_SHA1
consumer_key: string Domain identifying third_party web application.
consumer_secret: string (optional) Secret generated during registration.
Required only for HMAC_SHA1 signature method.
rsa_key: string (optional) Private key required for RSA_SHA1 signature
method.
two_legged_oauth: boolean (optional) Enables two-legged OAuth process.
requestor_id: string (optional) User email adress to make requests on
their behalf. This parameter should only be set when two_legged_oauth
is True.
"""
self._oauth_input_params = gdata.auth.OAuthInputParams(
signature_method, consumer_key, consumer_secret=consumer_secret,
rsa_key=rsa_key, requestor_id=requestor_id)
if two_legged_oauth:
oauth_token = gdata.auth.OAuthToken(
oauth_input_params=self._oauth_input_params)
self.SetOAuthToken(oauth_token)
def FetchOAuthRequestToken(self, scopes=None, extra_parameters=None,
request_url='%s/accounts/OAuthGetRequestToken' % \
AUTH_SERVER_HOST, oauth_callback=None):
"""Fetches and sets the OAuth request token and returns it.
Args:
scopes: string or list of string base URL(s) of the service(s) to be
accessed. If None, then this method tries to determine the
scope(s) from the current service.
extra_parameters: dict (optional) key-value pairs as any additional
parameters to be included in the URL and signature while making a
request for fetching an OAuth request token. All the OAuth parameters
are added by default. But if provided through this argument, any
default parameters will be overwritten. For e.g. a default parameter
oauth_version 1.0 can be overwritten if
extra_parameters = {'oauth_version': '2.0'}
request_url: Request token URL. The default is
'https://www.google.com/accounts/OAuthGetRequestToken'.
oauth_callback: str (optional) If set, it is assume the client is using
the OAuth v1.0a protocol where the callback url is sent in the
request token step. If the oauth_callback is also set in
extra_params, this value will override that one.
Returns:
The fetched request token as a gdata.auth.OAuthToken object.
Raises:
FetchingOAuthRequestTokenFailed if the server responded to the request
with an error.
"""
if scopes is None:
scopes = lookup_scopes(self.service)
if not isinstance(scopes, (list, tuple)):
scopes = [scopes,]
if oauth_callback:
if extra_parameters is not None:
extra_parameters['oauth_callback'] = oauth_callback
else:
extra_parameters = {'oauth_callback': oauth_callback}
request_token_url = gdata.auth.GenerateOAuthRequestTokenUrl(
self._oauth_input_params, scopes,
request_token_url=request_url,
extra_parameters=extra_parameters)
response = self.http_client.request('GET', str(request_token_url))
if response.status == 200:
token = gdata.auth.OAuthToken()
token.set_token_string(response.read())
token.scopes = scopes
token.oauth_input_params = self._oauth_input_params
self.SetOAuthToken(token)
return token
error = {
'status': response.status,
'reason': 'Non 200 response on fetch request token',
'body': response.read()
}
raise FetchingOAuthRequestTokenFailed(error)
def SetOAuthToken(self, oauth_token):
"""Attempts to set the current token and add it to the token store.
The oauth_token can be any OAuth token i.e. unauthorized request token,
authorized request token or access token.
This method also attempts to add the token to the token store.
Use this method any time you want the current token to point to the
oauth_token passed. For e.g. call this method with the request token
you receive from FetchOAuthRequestToken.
Args:
request_token: gdata.auth.OAuthToken OAuth request token.
"""
if self.auto_set_current_token:
self.current_token = oauth_token
if self.auto_store_tokens:
self.token_store.add_token(oauth_token)
def GenerateOAuthAuthorizationURL(
self, request_token=None, callback_url=None, extra_params=None,
include_scopes_in_callback=False,
scopes_param_prefix=OAUTH_SCOPE_URL_PARAM_NAME,
request_url='%s/accounts/OAuthAuthorizeToken' % AUTH_SERVER_HOST):
"""Generates URL at which user will login to authorize the request token.
Args:
request_token: gdata.auth.OAuthToken (optional) OAuth request token.
If not specified, then the current token will be used if it is of
type <gdata.auth.OAuthToken>, else it is found by looking in the
token_store by looking for a token for the current scope.
callback_url: string (optional) The URL user will be sent to after
logging in and granting access.
extra_params: dict (optional) Additional parameters to be sent.
include_scopes_in_callback: Boolean (default=False) if set to True, and
if 'callback_url' is present, the 'callback_url' will be modified to
include the scope(s) from the request token as a URL parameter. The
key for the 'callback' URL's scope parameter will be
OAUTH_SCOPE_URL_PARAM_NAME. The benefit of including the scope URL as
a parameter to the 'callback' URL, is that the page which receives
the OAuth token will be able to tell which URLs the token grants
access to.
scopes_param_prefix: string (default='oauth_token_scope') The URL
parameter key which maps to the list of valid scopes for the token.
This URL parameter will be included in the callback URL along with
the scopes of the token as value if include_scopes_in_callback=True.
request_url: Authorization URL. The default is
'https://www.google.com/accounts/OAuthAuthorizeToken'.
Returns:
A string URL at which the user is required to login.
Raises:
NonOAuthToken if the user's request token is not an OAuth token or if a
request token was not available.
"""
if request_token and not isinstance(request_token, gdata.auth.OAuthToken):
raise NonOAuthToken
if not request_token:
if isinstance(self.current_token, gdata.auth.OAuthToken):
request_token = self.current_token
else:
current_scopes = lookup_scopes(self.service)
if current_scopes:
token = self.token_store.find_token(current_scopes[0])
if isinstance(token, gdata.auth.OAuthToken):
request_token = token
if not request_token:
raise NonOAuthToken
return str(gdata.auth.GenerateOAuthAuthorizationUrl(
request_token,
authorization_url=request_url,
callback_url=callback_url, extra_params=extra_params,
include_scopes_in_callback=include_scopes_in_callback,
scopes_param_prefix=scopes_param_prefix))
def UpgradeToOAuthAccessToken(self, authorized_request_token=None,
request_url='%s/accounts/OAuthGetAccessToken' \
% AUTH_SERVER_HOST, oauth_version='1.0',
oauth_verifier=None):
"""Upgrades the authorized request token to an access token and returns it
Args:
authorized_request_token: gdata.auth.OAuthToken (optional) OAuth request
token. If not specified, then the current token will be used if it is
of type <gdata.auth.OAuthToken>, else it is found by looking in the
token_store by looking for a token for the current scope.
request_url: Access token URL. The default is
'https://www.google.com/accounts/OAuthGetAccessToken'.
oauth_version: str (default='1.0') oauth_version parameter. All other
'oauth_' parameters are added by default. This parameter too, is
added by default but here you can override it's value.
oauth_verifier: str (optional) If present, it is assumed that the client
will use the OAuth v1.0a protocol which includes passing the
oauth_verifier (as returned by the SP) in the access token step.
Returns:
Access token
Raises:
NonOAuthToken if the user's authorized request token is not an OAuth
token or if an authorized request token was not available.
TokenUpgradeFailed if the server responded to the request with an
error.
"""
if (authorized_request_token and
not isinstance(authorized_request_token, gdata.auth.OAuthToken)):
raise NonOAuthToken
if not authorized_request_token:
if isinstance(self.current_token, gdata.auth.OAuthToken):
authorized_request_token = self.current_token
else:
current_scopes = lookup_scopes(self.service)
if current_scopes:
token = self.token_store.find_token(current_scopes[0])
if isinstance(token, gdata.auth.OAuthToken):
authorized_request_token = token
if not authorized_request_token:
raise NonOAuthToken
access_token_url = gdata.auth.GenerateOAuthAccessTokenUrl(
authorized_request_token,
self._oauth_input_params,
access_token_url=request_url,
oauth_version=oauth_version,
oauth_verifier=oauth_verifier)
response = self.http_client.request('GET', str(access_token_url))
if response.status == 200:
token = gdata.auth.OAuthTokenFromHttpBody(response.read())
token.scopes = authorized_request_token.scopes
token.oauth_input_params = authorized_request_token.oauth_input_params
self.SetOAuthToken(token)
return token
else:
raise TokenUpgradeFailed({'status': response.status,
'reason': 'Non 200 response on upgrade',
'body': response.read()})
def RevokeOAuthToken(self, request_url='%s/accounts/AuthSubRevokeToken' % \
AUTH_SERVER_HOST):
"""Revokes an existing OAuth token.
request_url: Token revoke URL. The default is
'https://www.google.com/accounts/AuthSubRevokeToken'.
Raises:
NonOAuthToken if the user's auth token is not an OAuth token.
RevokingOAuthTokenFailed if request for revoking an OAuth token failed.
"""
scopes = lookup_scopes(self.service)
token = self.token_store.find_token(scopes[0])
if not isinstance(token, gdata.auth.OAuthToken):
raise NonOAuthToken
response = token.perform_request(self.http_client, 'GET', request_url,
headers={'Content-Type':'application/x-www-form-urlencoded'})
if response.status == 200:
self.token_store.remove_token(token)
else:
raise RevokingOAuthTokenFailed
def GetAuthSubToken(self):
"""Returns the AuthSub token as a string.
If the token is an gdta.auth.AuthSubToken, the Authorization Label
("AuthSub token") is removed.
This method examines the current_token to see if it is an AuthSubToken
or SecureAuthSubToken. If not, it searches the token_store for a token
which matches the current scope.
The current scope is determined by the service name string member.
Returns:
If the current_token is set to an AuthSubToken/SecureAuthSubToken,
return the token string. If there is no current_token, a token string
for a token which matches the service object's default scope is returned.
If there are no tokens valid for the scope, returns None.
"""
if isinstance(self.current_token, gdata.auth.AuthSubToken):
return self.current_token.get_token_string()
current_scopes = lookup_scopes(self.service)
if current_scopes:
token = self.token_store.find_token(current_scopes[0])
if isinstance(token, gdata.auth.AuthSubToken):
return token.get_token_string()
else:
token = self.token_store.find_token(atom.token_store.SCOPE_ALL)
if isinstance(token, gdata.auth.ClientLoginToken):
return token.get_token_string()
return None
def SetAuthSubToken(self, token, scopes=None, rsa_key=None):
"""Sets the token sent in requests to an AuthSub token.
Sets the current_token and attempts to add the token to the token_store.
Only use this method if you have received a token from the AuthSub
service. The auth token is set automatically when UpgradeToSessionToken()
is used. See documentation for Google AuthSub here:
http://code.google.com/apis/accounts/AuthForWebApps.html
Args:
token: gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken or string
The token returned by the AuthSub service. If the token is an
AuthSubToken or SecureAuthSubToken, the scope information stored in
the token is used. If the token is a string, the scopes parameter is
used to determine the valid scopes.
scopes: list of URLs for which the token is valid. This is only used
if the token parameter is a string.
rsa_key: string (optional) Private key required for RSA_SHA1 signature
method. This parameter is necessary if the token is a string
representing a secure token.
"""
if not isinstance(token, gdata.auth.AuthSubToken):
token_string = token
if rsa_key:
token = gdata.auth.SecureAuthSubToken(rsa_key)
else:
token = gdata.auth.AuthSubToken()
token.set_token_string(token_string)
# If no scopes were set for the token, use the scopes passed in, or
# try to determine the scopes based on the current service name. If
# all else fails, set the token to match all requests.
if not token.scopes:
if scopes is None:
scopes = lookup_scopes(self.service)
if scopes is None:
scopes = [atom.token_store.SCOPE_ALL]
token.scopes = scopes
if self.auto_set_current_token:
self.current_token = token
if self.auto_store_tokens:
self.token_store.add_token(token)
def GetClientLoginToken(self):
"""Returns the token string for the current token or a token matching the
service scope.
If the current_token is a ClientLoginToken, the token string for
the current token is returned. If the current_token is not set, this method
searches for a token in the token_store which is valid for the service
object's current scope.
The current scope is determined by the service name string member.
The token string is the end of the Authorization header, it doesn not
include the ClientLogin label.
"""
if isinstance(self.current_token, gdata.auth.ClientLoginToken):
return self.current_token.get_token_string()
current_scopes = lookup_scopes(self.service)
if current_scopes:
token = self.token_store.find_token(current_scopes[0])
if isinstance(token, gdata.auth.ClientLoginToken):
return token.get_token_string()
else:
token = self.token_store.find_token(atom.token_store.SCOPE_ALL)
if isinstance(token, gdata.auth.ClientLoginToken):
return token.get_token_string()
return None
def SetClientLoginToken(self, token, scopes=None):
"""Sets the token sent in requests to a ClientLogin token.
This method sets the current_token to a new ClientLoginToken and it
also attempts to add the ClientLoginToken to the token_store.
Only use this method if you have received a token from the ClientLogin
service. The auth_token is set automatically when ProgrammaticLogin()
is used. See documentation for Google ClientLogin here:
http://code.google.com/apis/accounts/docs/AuthForInstalledApps.html
Args:
token: string or instance of a ClientLoginToken.
"""
if not isinstance(token, gdata.auth.ClientLoginToken):
token_string = token
token = gdata.auth.ClientLoginToken()
token.set_token_string(token_string)
if not token.scopes:
if scopes is None:
scopes = lookup_scopes(self.service)
if scopes is None:
scopes = [atom.token_store.SCOPE_ALL]
token.scopes = scopes
if self.auto_set_current_token:
self.current_token = token
if self.auto_store_tokens:
self.token_store.add_token(token)
# Private methods to create the source property.
def __GetSource(self):
return self.__source
def __SetSource(self, new_source):
self.__source = new_source
# Update the UserAgent header to include the new application name.
self.additional_headers['User-Agent'] = atom.http_interface.USER_AGENT % (
self.__source,)
source = property(__GetSource, __SetSource,
doc="""The source is the name of the application making the request.
It should be in the form company_id-app_name-app_version""")
# Authentication operations
def ProgrammaticLogin(self, captcha_token=None, captcha_response=None):
"""Authenticates the user and sets the GData Auth token.
Login retreives a temporary auth token which must be used with all
requests to GData services. The auth token is stored in the GData client
object.
Login is also used to respond to a captcha challenge. If the user's login
attempt failed with a CaptchaRequired error, the user can respond by
calling Login with the captcha token and the answer to the challenge.
Args:
captcha_token: string (optional) The identifier for the captcha challenge
which was presented to the user.
captcha_response: string (optional) The user's answer to the captch
challenge.
Raises:
CaptchaRequired if the login service will require a captcha response
BadAuthentication if the login service rejected the username or password
Error if the login service responded with a 403 different from the above
"""
request_body = gdata.auth.generate_client_login_request_body(self.email,
self.password, self.service, self.source, self.account_type,
captcha_token, captcha_response)
# If the user has defined their own authentication service URL,
# send the ClientLogin requests to this URL:
if not self.auth_service_url:
auth_request_url = AUTH_SERVER_HOST + '/accounts/ClientLogin'
else:
auth_request_url = self.auth_service_url
auth_response = self.http_client.request('POST', auth_request_url,
data=request_body,
headers={'Content-Type':'application/x-www-form-urlencoded'})
response_body = auth_response.read()
if auth_response.status == 200:
# TODO: insert the token into the token_store directly.
self.SetClientLoginToken(
gdata.auth.get_client_login_token(response_body))
self.__captcha_token = None
self.__captcha_url = None
elif auth_response.status == 403:
# Examine each line to find the error type and the captcha token and
# captch URL if they are present.
captcha_parameters = gdata.auth.get_captcha_challenge(response_body,
captcha_base_url='%s/accounts/' % AUTH_SERVER_HOST)
if captcha_parameters:
self.__captcha_token = captcha_parameters['token']
self.__captcha_url = captcha_parameters['url']
raise CaptchaRequired, 'Captcha Required'
elif response_body.splitlines()[0] == 'Error=BadAuthentication':
self.__captcha_token = None
self.__captcha_url = None
raise BadAuthentication, 'Incorrect username or password'
else:
self.__captcha_token = None
self.__captcha_url = None
raise Error, 'Server responded with a 403 code'
elif auth_response.status == 302:
self.__captcha_token = None
self.__captcha_url = None
# Google tries to redirect all bad URLs back to
# http://www.google.<locale>. If a redirect
# attempt is made, assume the user has supplied an incorrect authentication URL
raise BadAuthenticationServiceURL, 'Server responded with a 302 code.'
def ClientLogin(self, username, password, account_type=None, service=None,
auth_service_url=None, source=None, captcha_token=None,
captcha_response=None):
"""Convenience method for authenticating using ProgrammaticLogin.
Sets values for email, password, and other optional members.
Args:
username:
password:
account_type: string (optional)
service: string (optional)
auth_service_url: string (optional)
captcha_token: string (optional)
captcha_response: string (optional)
"""
self.email = username
self.password = password
if account_type:
self.account_type = account_type
if service:
self.service = service
if source:
self.source = source
if auth_service_url:
self.auth_service_url = auth_service_url
self.ProgrammaticLogin(captcha_token, captcha_response)
def GenerateAuthSubURL(self, next, scope, secure=False, session=True,
domain='default'):
"""Generate a URL at which the user will login and be redirected back.
Users enter their credentials on a Google login page and a token is sent
to the URL specified in next. See documentation for AuthSub login at:
http://code.google.com/apis/accounts/docs/AuthSub.html
Args:
next: string The URL user will be sent to after logging in.
scope: string or list of strings. The URLs of the services to be
accessed.
secure: boolean (optional) Determines whether or not the issued token
is a secure token.
session: boolean (optional) Determines whether or not the issued token
can be upgraded to a session token.
"""
if not isinstance(scope, (list, tuple)):
scope = (scope,)
return gdata.auth.generate_auth_sub_url(next, scope, secure=secure,
session=session,
request_url='%s/accounts/AuthSubRequest' % AUTH_SERVER_HOST,
domain=domain)
def UpgradeToSessionToken(self, token=None):
"""Upgrades a single use AuthSub token to a session token.
Args:
token: A gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken
(optional) which is good for a single use but can be upgraded
to a session token. If no token is passed in, the token
is found by looking in the token_store by looking for a token
for the current scope.
Raises:
NonAuthSubToken if the user's auth token is not an AuthSub token
TokenUpgradeFailed if the server responded to the request with an
error.
"""
if token is None:
scopes = lookup_scopes(self.service)
if scopes:
token = self.token_store.find_token(scopes[0])
else:
token = self.token_store.find_token(atom.token_store.SCOPE_ALL)
if not isinstance(token, gdata.auth.AuthSubToken):
raise NonAuthSubToken
self.SetAuthSubToken(self.upgrade_to_session_token(token))
def upgrade_to_session_token(self, token):
"""Upgrades a single use AuthSub token to a session token.
Args:
token: A gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken
which is good for a single use but can be upgraded to a
session token.
Returns:
The upgraded token as a gdata.auth.AuthSubToken object.
Raises:
TokenUpgradeFailed if the server responded to the request with an
error.
"""
response = token.perform_request(self.http_client, 'GET',
AUTH_SERVER_HOST + '/accounts/AuthSubSessionToken',
headers={'Content-Type':'application/x-www-form-urlencoded'})
response_body = response.read()
if response.status == 200:
token.set_token_string(
gdata.auth.token_from_http_body(response_body))
return token
else:
raise TokenUpgradeFailed({'status': response.status,
'reason': 'Non 200 response on upgrade',
'body': response_body})
def RevokeAuthSubToken(self):
"""Revokes an existing AuthSub token.
Raises:
NonAuthSubToken if the user's auth token is not an AuthSub token
"""
scopes = lookup_scopes(self.service)
token = self.token_store.find_token(scopes[0])
if not isinstance(token, gdata.auth.AuthSubToken):
raise NonAuthSubToken
response = token.perform_request(self.http_client, 'GET',
AUTH_SERVER_HOST + '/accounts/AuthSubRevokeToken',
headers={'Content-Type':'application/x-www-form-urlencoded'})
if response.status == 200:
self.token_store.remove_token(token)
def AuthSubTokenInfo(self):
"""Fetches the AuthSub token's metadata from the server.
Raises:
NonAuthSubToken if the user's auth token is not an AuthSub token
"""
scopes = lookup_scopes(self.service)
token = self.token_store.find_token(scopes[0])
if not isinstance(token, gdata.auth.AuthSubToken):
raise NonAuthSubToken
response = token.perform_request(self.http_client, 'GET',
AUTH_SERVER_HOST + '/accounts/AuthSubTokenInfo',
headers={'Content-Type':'application/x-www-form-urlencoded'})
result_body = response.read()
if response.status == 200:
return result_body
else:
raise RequestError, {'status': response.status,
'body': result_body}
def GetWithRetries(self, uri, extra_headers=None, redirects_remaining=4,
encoding='UTF-8', converter=None, num_retries=DEFAULT_NUM_RETRIES,
delay=DEFAULT_DELAY, backoff=DEFAULT_BACKOFF, logger=None):
"""This is a wrapper method for Get with retrying capability.
To avoid various errors while retrieving bulk entities by retrying
specified times.
Note this method relies on the time module and so may not be usable
by default in Python2.2.
Args:
num_retries: Integer; the retry count.
delay: Integer; the initial delay for retrying.
backoff: Integer; how much the delay should lengthen after each failure.
logger: An object which has a debug(str) method to receive logging
messages. Recommended that you pass in the logging module.
Raises:
ValueError if any of the parameters has an invalid value.
RanOutOfTries on failure after number of retries.
"""
# Moved import for time module inside this method since time is not a
# default module in Python2.2. This method will not be usable in
# Python2.2.
import time
if backoff <= 1:
raise ValueError("backoff must be greater than 1")
num_retries = int(num_retries)
if num_retries < 0:
raise ValueError("num_retries must be 0 or greater")
if delay <= 0:
raise ValueError("delay must be greater than 0")
# Let's start
mtries, mdelay = num_retries, delay
while mtries > 0:
if mtries != num_retries:
if logger:
logger.debug("Retrying: %s" % uri)
try:
rv = self.Get(uri, extra_headers=extra_headers,
redirects_remaining=redirects_remaining,
encoding=encoding, converter=converter)
except SystemExit:
# Allow this error
raise
except RequestError, e:
# Error 500 is 'internal server error' and warrants a retry
# Error 503 is 'service unavailable' and warrants a retry
if e[0]['status'] not in [500, 503]:
raise e
# Else, fall through to the retry code...
except Exception, e:
if logger:
logger.debug(e)
# Fall through to the retry code...
else:
# This is the right path.
return rv
mtries -= 1
time.sleep(mdelay)
mdelay *= backoff
raise RanOutOfTries('Ran out of tries.')
# CRUD operations
def Get(self, uri, extra_headers=None, redirects_remaining=4,
encoding='UTF-8', converter=None):
"""Query the GData API with the given URI
The uri is the portion of the URI after the server value
(ex: www.google.com).
To perform a query against Google Base, set the server to
'base.google.com' and set the uri to '/base/feeds/...', where ... is
your query. For example, to find snippets for all digital cameras uri
should be set to: '/base/feeds/snippets?bq=digital+camera'
Args:
uri: string The query in the form of a URI. Example:
'/base/feeds/snippets?bq=digital+camera'.
extra_headers: dictionary (optional) Extra HTTP headers to be included
in the GET request. These headers are in addition to
those stored in the client's additional_headers property.
The client automatically sets the Content-Type and
Authorization headers.
redirects_remaining: int (optional) Tracks the number of additional
redirects this method will allow. If the service object receives
a redirect and remaining is 0, it will not follow the redirect.
This was added to avoid infinite redirect loops.
encoding: string (optional) The character encoding for the server's
response. Default is UTF-8
converter: func (optional) A function which will transform
the server's results before it is returned. Example: use
GDataFeedFromString to parse the server response as if it
were a GDataFeed.
Returns:
If there is no ResultsTransformer specified in the call, a GDataFeed
or GDataEntry depending on which is sent from the server. If the
response is niether a feed or entry and there is no ResultsTransformer,
return a string. If there is a ResultsTransformer, the returned value
will be that of the ResultsTransformer function.
"""
if extra_headers is None:
extra_headers = {}
if self.__gsessionid is not None:
if uri.find('gsessionid=') < 0:
if uri.find('?') > -1:
uri += '&gsessionid=%s' % (self.__gsessionid,)
else:
uri += '?gsessionid=%s' % (self.__gsessionid,)
server_response = self.request('GET', uri,
headers=extra_headers)
result_body = server_response.read()
if server_response.status == 200:
if converter:
return converter(result_body)
# There was no ResultsTransformer specified, so try to convert the
# server's response into a GDataFeed.
feed = gdata.GDataFeedFromString(result_body)
if not feed:
# If conversion to a GDataFeed failed, try to convert the server's
# response to a GDataEntry.
entry = gdata.GDataEntryFromString(result_body)
if not entry:
# The server's response wasn't a feed, or an entry, so return the
# response body as a string.
return result_body
return entry
return feed
elif server_response.status == 302:
if redirects_remaining > 0:
location = (server_response.getheader('Location')
or server_response.getheader('location'))
if location is not None:
m = re.compile('[\?\&]gsessionid=(\w*\-)').search(location)
if m is not None:
self.__gsessionid = m.group(1)
return GDataService.Get(self, location, extra_headers, redirects_remaining - 1,
encoding=encoding, converter=converter)
else:
raise RequestError, {'status': server_response.status,
'reason': '302 received without Location header',
'body': result_body}
else:
raise RequestError, {'status': server_response.status,
'reason': 'Redirect received, but redirects_remaining <= 0',
'body': result_body}
else:
raise RequestError, {'status': server_response.status,
'reason': server_response.reason, 'body': result_body}
def GetMedia(self, uri, extra_headers=None):
"""Returns a MediaSource containing media and its metadata from the given
URI string.
"""
response_handle = self.request('GET', uri,
headers=extra_headers)
return gdata.MediaSource(response_handle, response_handle.getheader(
'Content-Type'),
response_handle.getheader('Content-Length'))
def GetEntry(self, uri, extra_headers=None):
"""Query the GData API with the given URI and receive an Entry.
See also documentation for gdata.service.Get
Args:
uri: string The query in the form of a URI. Example:
'/base/feeds/snippets?bq=digital+camera'.
extra_headers: dictionary (optional) Extra HTTP headers to be included
in the GET request. These headers are in addition to
those stored in the client's additional_headers property.
The client automatically sets the Content-Type and
Authorization headers.
Returns:
A GDataEntry built from the XML in the server's response.
"""
result = GDataService.Get(self, uri, extra_headers,
converter=atom.EntryFromString)
if isinstance(result, atom.Entry):
return result
else:
raise UnexpectedReturnType, 'Server did not send an entry'
def GetFeed(self, uri, extra_headers=None,
converter=gdata.GDataFeedFromString):
"""Query the GData API with the given URI and receive a Feed.
See also documentation for gdata.service.Get
Args:
uri: string The query in the form of a URI. Example:
'/base/feeds/snippets?bq=digital+camera'.
extra_headers: dictionary (optional) Extra HTTP headers to be included
in the GET request. These headers are in addition to
those stored in the client's additional_headers property.
The client automatically sets the Content-Type and
Authorization headers.
Returns:
A GDataFeed built from the XML in the server's response.
"""
result = GDataService.Get(self, uri, extra_headers, converter=converter)
if isinstance(result, atom.Feed):
return result
else:
raise UnexpectedReturnType, 'Server did not send a feed'
def GetNext(self, feed):
"""Requests the next 'page' of results in the feed.
This method uses the feed's next link to request an additional feed
and uses the class of the feed to convert the results of the GET request.
Args:
feed: atom.Feed or a subclass. The feed should contain a next link and
the type of the feed will be applied to the results from the
server. The new feed which is returned will be of the same class
as this feed which was passed in.
Returns:
A new feed representing the next set of results in the server's feed.
The type of this feed will match that of the feed argument.
"""
next_link = feed.GetNextLink()
# Create a closure which will convert an XML string to the class of
# the feed object passed in.
def ConvertToFeedClass(xml_string):
return atom.CreateClassFromXMLString(feed.__class__, xml_string)
# Make a GET request on the next link and use the above closure for the
# converted which processes the XML string from the server.
if next_link and next_link.href:
return GDataService.Get(self, next_link.href,
converter=ConvertToFeedClass)
else:
return None
def Post(self, data, uri, extra_headers=None, url_params=None,
escape_params=True, redirects_remaining=4, media_source=None,
converter=None):
"""Insert or update data into a GData service at the given URI.
Args:
data: string, ElementTree._Element, atom.Entry, or gdata.GDataEntry The
XML to be sent to the uri.
uri: string The location (feed) to which the data should be inserted.
Example: '/base/feeds/items'.
extra_headers: dict (optional) HTTP headers which are to be included.
The client automatically sets the Content-Type,
Authorization, and Content-Length headers.
url_params: dict (optional) Additional URL parameters to be included
in the URI. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
media_source: MediaSource (optional) Container for the media to be sent
along with the entry, if provided.
converter: func (optional) A function which will be executed on the
server's response. Often this is a function like
GDataEntryFromString which will parse the body of the server's
response and return a GDataEntry.
Returns:
If the post succeeded, this method will return a GDataFeed, GDataEntry,
or the results of running converter on the server's result body (if
converter was specified).
"""
return GDataService.PostOrPut(self, 'POST', data, uri,
extra_headers=extra_headers, url_params=url_params,
escape_params=escape_params, redirects_remaining=redirects_remaining,
media_source=media_source, converter=converter)
def PostOrPut(self, verb, data, uri, extra_headers=None, url_params=None,
escape_params=True, redirects_remaining=4, media_source=None,
converter=None):
"""Insert data into a GData service at the given URI.
Args:
verb: string, either 'POST' or 'PUT'
data: string, ElementTree._Element, atom.Entry, or gdata.GDataEntry The
XML to be sent to the uri.
uri: string The location (feed) to which the data should be inserted.
Example: '/base/feeds/items'.
extra_headers: dict (optional) HTTP headers which are to be included.
The client automatically sets the Content-Type,
Authorization, and Content-Length headers.
url_params: dict (optional) Additional URL parameters to be included
in the URI. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
media_source: MediaSource (optional) Container for the media to be sent
along with the entry, if provided.
converter: func (optional) A function which will be executed on the
server's response. Often this is a function like
GDataEntryFromString which will parse the body of the server's
response and return a GDataEntry.
Returns:
If the post succeeded, this method will return a GDataFeed, GDataEntry,
or the results of running converter on the server's result body (if
converter was specified).
"""
if extra_headers is None:
extra_headers = {}
if self.__gsessionid is not None:
if uri.find('gsessionid=') < 0:
if url_params is None:
url_params = {}
url_params['gsessionid'] = self.__gsessionid
if data and media_source:
if ElementTree.iselement(data):
data_str = ElementTree.tostring(data)
else:
data_str = str(data)
multipart = []
multipart.append('Media multipart posting\r\n--END_OF_PART\r\n' + \
'Content-Type: application/atom+xml\r\n\r\n')
multipart.append('\r\n--END_OF_PART\r\nContent-Type: ' + \
media_source.content_type+'\r\n\r\n')
multipart.append('\r\n--END_OF_PART--\r\n')
extra_headers['MIME-version'] = '1.0'
extra_headers['Content-Length'] = str(len(multipart[0]) +
len(multipart[1]) + len(multipart[2]) +
len(data_str) + media_source.content_length)
extra_headers['Content-Type'] = 'multipart/related; boundary=END_OF_PART'
server_response = self.request(verb, uri,
data=[multipart[0], data_str, multipart[1], media_source.file_handle,
multipart[2]], headers=extra_headers, url_params=url_params)
result_body = server_response.read()
elif media_source or isinstance(data, gdata.MediaSource):
if isinstance(data, gdata.MediaSource):
media_source = data
extra_headers['Content-Length'] = str(media_source.content_length)
extra_headers['Content-Type'] = media_source.content_type
server_response = self.request(verb, uri,
data=media_source.file_handle, headers=extra_headers,
url_params=url_params)
result_body = server_response.read()
else:
http_data = data
if 'Content-Type' not in extra_headers:
content_type = 'application/atom+xml'
extra_headers['Content-Type'] = content_type
server_response = self.request(verb, uri, data=http_data,
headers=extra_headers, url_params=url_params)
result_body = server_response.read()
# Server returns 201 for most post requests, but when performing a batch
# request the server responds with a 200 on success.
if server_response.status == 201 or server_response.status == 200:
if converter:
return converter(result_body)
feed = gdata.GDataFeedFromString(result_body)
if not feed:
entry = gdata.GDataEntryFromString(result_body)
if not entry:
return result_body
return entry
return feed
elif server_response.status == 302:
if redirects_remaining > 0:
location = (server_response.getheader('Location')
or server_response.getheader('location'))
if location is not None:
m = re.compile('[\?\&]gsessionid=(\w*\-)').search(location)
if m is not None:
self.__gsessionid = m.group(1)
return GDataService.PostOrPut(self, verb, data, location,
extra_headers, url_params, escape_params,
redirects_remaining - 1, media_source, converter=converter)
else:
raise RequestError, {'status': server_response.status,
'reason': '302 received without Location header',
'body': result_body}
else:
raise RequestError, {'status': server_response.status,
'reason': 'Redirect received, but redirects_remaining <= 0',
'body': result_body}
else:
raise RequestError, {'status': server_response.status,
'reason': server_response.reason, 'body': result_body}
def Put(self, data, uri, extra_headers=None, url_params=None,
escape_params=True, redirects_remaining=3, media_source=None,
converter=None):
"""Updates an entry at the given URI.
Args:
data: string, ElementTree._Element, or xml_wrapper.ElementWrapper The
XML containing the updated data.
uri: string A URI indicating entry to which the update will be applied.
Example: '/base/feeds/items/ITEM-ID'
extra_headers: dict (optional) HTTP headers which are to be included.
The client automatically sets the Content-Type,
Authorization, and Content-Length headers.
url_params: dict (optional) Additional URL parameters to be included
in the URI. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
converter: func (optional) A function which will be executed on the
server's response. Often this is a function like
GDataEntryFromString which will parse the body of the server's
response and return a GDataEntry.
Returns:
If the put succeeded, this method will return a GDataFeed, GDataEntry,
or the results of running converter on the server's result body (if
converter was specified).
"""
return GDataService.PostOrPut(self, 'PUT', data, uri,
extra_headers=extra_headers, url_params=url_params,
escape_params=escape_params, redirects_remaining=redirects_remaining,
media_source=media_source, converter=converter)
def Delete(self, uri, extra_headers=None, url_params=None,
escape_params=True, redirects_remaining=4):
"""Deletes the entry at the given URI.
Args:
uri: string The URI of the entry to be deleted. Example:
'/base/feeds/items/ITEM-ID'
extra_headers: dict (optional) HTTP headers which are to be included.
The client automatically sets the Content-Type and
Authorization headers.
url_params: dict (optional) Additional URL parameters to be included
in the URI. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
Returns:
True if the entry was deleted.
"""
if extra_headers is None:
extra_headers = {}
if self.__gsessionid is not None:
if uri.find('gsessionid=') < 0:
if url_params is None:
url_params = {}
url_params['gsessionid'] = self.__gsessionid
server_response = self.request('DELETE', uri,
headers=extra_headers, url_params=url_params)
result_body = server_response.read()
if server_response.status == 200:
return True
elif server_response.status == 302:
if redirects_remaining > 0:
location = (server_response.getheader('Location')
or server_response.getheader('location'))
if location is not None:
m = re.compile('[\?\&]gsessionid=(\w*\-)').search(location)
if m is not None:
self.__gsessionid = m.group(1)
return GDataService.Delete(self, location, extra_headers,
url_params, escape_params, redirects_remaining - 1)
else:
raise RequestError, {'status': server_response.status,
'reason': '302 received without Location header',
'body': result_body}
else:
raise RequestError, {'status': server_response.status,
'reason': 'Redirect received, but redirects_remaining <= 0',
'body': result_body}
else:
raise RequestError, {'status': server_response.status,
'reason': server_response.reason, 'body': result_body}
def ExtractToken(url, scopes_included_in_next=True):
"""Gets the AuthSub token from the current page's URL.
Designed to be used on the URL that the browser is sent to after the user
authorizes this application at the page given by GenerateAuthSubRequestUrl.
Args:
url: The current page's URL. It should contain the token as a URL
parameter. Example: 'http://example.com/?...&token=abcd435'
scopes_included_in_next: If True, this function looks for a scope value
associated with the token. The scope is a URL parameter with the
key set to SCOPE_URL_PARAM_NAME. This parameter should be present
if the AuthSub request URL was generated using
GenerateAuthSubRequestUrl with include_scope_in_next set to True.
Returns:
A tuple containing the token string and a list of scope strings for which
this token should be valid. If the scope was not included in the URL, the
tuple will contain (token, None).
"""
parsed = urlparse.urlparse(url)
token = gdata.auth.AuthSubTokenFromUrl(parsed[4])
scopes = ''
if scopes_included_in_next:
for pair in parsed[4].split('&'):
if pair.startswith('%s=' % SCOPE_URL_PARAM_NAME):
scopes = urllib.unquote_plus(pair.split('=')[1])
return (token, scopes.split(' '))
def GenerateAuthSubRequestUrl(next, scopes, hd='default', secure=False,
session=True, request_url='https://www.google.com/accounts/AuthSubRequest',
include_scopes_in_next=True):
"""Creates a URL to request an AuthSub token to access Google services.
For more details on AuthSub, see the documentation here:
http://code.google.com/apis/accounts/docs/AuthSub.html
Args:
next: The URL where the browser should be sent after the user authorizes
the application. This page is responsible for receiving the token
which is embeded in the URL as a parameter.
scopes: The base URL to which access will be granted. Example:
'http://www.google.com/calendar/feeds' will grant access to all
URLs in the Google Calendar data API. If you would like a token for
multiple scopes, pass in a list of URL strings.
hd: The domain to which the user's account belongs. This is set to the
domain name if you are using Google Apps. Example: 'example.org'
Defaults to 'default'
secure: If set to True, all requests should be signed. The default is
False.
session: If set to True, the token received by the 'next' URL can be
upgraded to a multiuse session token. If session is set to False, the
token may only be used once and cannot be upgraded. Default is True.
request_url: The base of the URL to which the user will be sent to
authorize this application to access their data. The default is
'https://www.google.com/accounts/AuthSubRequest'.
include_scopes_in_next: Boolean if set to true, the 'next' parameter will
be modified to include the requested scope as a URL parameter. The
key for the next's scope parameter will be SCOPE_URL_PARAM_NAME. The
benefit of including the scope URL as a parameter to the next URL, is
that the page which receives the AuthSub token will be able to tell
which URLs the token grants access to.
Returns:
A URL string to which the browser should be sent.
"""
if isinstance(scopes, list):
scope = ' '.join(scopes)
else:
scope = scopes
if include_scopes_in_next:
if next.find('?') > -1:
next += '&%s' % urllib.urlencode({SCOPE_URL_PARAM_NAME:scope})
else:
next += '?%s' % urllib.urlencode({SCOPE_URL_PARAM_NAME:scope})
return gdata.auth.GenerateAuthSubUrl(next=next, scope=scope, secure=secure,
session=session, request_url=request_url, domain=hd)
class Query(dict):
"""Constructs a query URL to be used in GET requests
Url parameters are created by adding key-value pairs to this object as a
dict. For example, to add &max-results=25 to the URL do
my_query['max-results'] = 25
Category queries are created by adding category strings to the categories
member. All items in the categories list will be concatenated with the /
symbol (symbolizing a category x AND y restriction). If you would like to OR
2 categories, append them as one string with a | between the categories.
For example, do query.categories.append('Fritz|Laurie') to create a query
like this feed/-/Fritz%7CLaurie . This query will look for results in both
categories.
"""
def __init__(self, feed=None, text_query=None, params=None,
categories=None):
"""Constructor for Query
Args:
feed: str (optional) The path for the feed (Examples:
'/base/feeds/snippets' or 'calendar/feeds/[email protected]/private/full'
text_query: str (optional) The contents of the q query parameter. The
contents of the text_query are URL escaped upon conversion to a URI.
params: dict (optional) Parameter value string pairs which become URL
params when translated to a URI. These parameters are added to the
query's items (key-value pairs).
categories: list (optional) List of category strings which should be
included as query categories. See
http://code.google.com/apis/gdata/reference.html#Queries for
details. If you want to get results from category A or B (both
categories), specify a single list item 'A|B'.
"""
self.feed = feed
self.categories = []
if text_query:
self.text_query = text_query
if isinstance(params, dict):
for param in params:
self[param] = params[param]
if isinstance(categories, list):
for category in categories:
self.categories.append(category)
def _GetTextQuery(self):
if 'q' in self.keys():
return self['q']
else:
return None
def _SetTextQuery(self, query):
self['q'] = query
text_query = property(_GetTextQuery, _SetTextQuery,
doc="""The feed query's q parameter""")
def _GetAuthor(self):
if 'author' in self.keys():
return self['author']
else:
return None
def _SetAuthor(self, query):
self['author'] = query
author = property(_GetAuthor, _SetAuthor,
doc="""The feed query's author parameter""")
def _GetAlt(self):
if 'alt' in self.keys():
return self['alt']
else:
return None
def _SetAlt(self, query):
self['alt'] = query
alt = property(_GetAlt, _SetAlt,
doc="""The feed query's alt parameter""")
def _GetUpdatedMin(self):
if 'updated-min' in self.keys():
return self['updated-min']
else:
return None
def _SetUpdatedMin(self, query):
self['updated-min'] = query
updated_min = property(_GetUpdatedMin, _SetUpdatedMin,
doc="""The feed query's updated-min parameter""")
def _GetUpdatedMax(self):
if 'updated-max' in self.keys():
return self['updated-max']
else:
return None
def _SetUpdatedMax(self, query):
self['updated-max'] = query
updated_max = property(_GetUpdatedMax, _SetUpdatedMax,
doc="""The feed query's updated-max parameter""")
def _GetPublishedMin(self):
if 'published-min' in self.keys():
return self['published-min']
else:
return None
def _SetPublishedMin(self, query):
self['published-min'] = query
published_min = property(_GetPublishedMin, _SetPublishedMin,
doc="""The feed query's published-min parameter""")
def _GetPublishedMax(self):
if 'published-max' in self.keys():
return self['published-max']
else:
return None
def _SetPublishedMax(self, query):
self['published-max'] = query
published_max = property(_GetPublishedMax, _SetPublishedMax,
doc="""The feed query's published-max parameter""")
def _GetStartIndex(self):
if 'start-index' in self.keys():
return self['start-index']
else:
return None
def _SetStartIndex(self, query):
if not isinstance(query, str):
query = str(query)
self['start-index'] = query
start_index = property(_GetStartIndex, _SetStartIndex,
doc="""The feed query's start-index parameter""")
def _GetMaxResults(self):
if 'max-results' in self.keys():
return self['max-results']
else:
return None
def _SetMaxResults(self, query):
if not isinstance(query, str):
query = str(query)
self['max-results'] = query
max_results = property(_GetMaxResults, _SetMaxResults,
doc="""The feed query's max-results parameter""")
def _GetOrderBy(self):
if 'orderby' in self.keys():
return self['orderby']
else:
return None
def _SetOrderBy(self, query):
self['orderby'] = query
orderby = property(_GetOrderBy, _SetOrderBy,
doc="""The feed query's orderby parameter""")
def ToUri(self):
q_feed = self.feed or ''
category_string = '/'.join(
[urllib.quote_plus(c) for c in self.categories])
# Add categories to the feed if there are any.
if len(self.categories) > 0:
q_feed = q_feed + '/-/' + category_string
return atom.service.BuildUri(q_feed, self)
def __str__(self):
return self.ToUri()
|
apache-2.0
|
arnondora/wordpress-paper-theme
|
PaperTheme/node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/generator/cmake.py
|
1355
|
44604
|
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""cmake output module
This module is under development and should be considered experimental.
This module produces cmake (2.8.8+) input as its output. One CMakeLists.txt is
created for each configuration.
This module's original purpose was to support editing in IDEs like KDevelop
which use CMake for project management. It is also possible to use CMake to
generate projects for other IDEs such as eclipse cdt and code::blocks. QtCreator
will convert the CMakeLists.txt to a code::blocks cbp for the editor to read,
but build using CMake. As a result QtCreator editor is unaware of compiler
defines. The generated CMakeLists.txt can also be used to build on Linux. There
is currently no support for building on platforms other than Linux.
The generated CMakeLists.txt should properly compile all projects. However,
there is a mismatch between gyp and cmake with regard to linking. All attempts
are made to work around this, but CMake sometimes sees -Wl,--start-group as a
library and incorrectly repeats it. As a result the output of this generator
should not be relied on for building.
When using with kdevelop, use version 4.4+. Previous versions of kdevelop will
not be able to find the header file directories described in the generated
CMakeLists.txt file.
"""
import multiprocessing
import os
import signal
import string
import subprocess
import gyp.common
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_PREFIX': 'lib',
'SHARED_LIB_SUFFIX': '.so',
'SHARED_LIB_DIR': '${builddir}/lib.${TOOLSET}',
'LIB_DIR': '${obj}.${TOOLSET}',
'INTERMEDIATE_DIR': '${obj}.${TOOLSET}/${TARGET}/geni',
'SHARED_INTERMEDIATE_DIR': '${obj}/gen',
'PRODUCT_DIR': '${builddir}',
'RULE_INPUT_PATH': '${RULE_INPUT_PATH}',
'RULE_INPUT_DIRNAME': '${RULE_INPUT_DIRNAME}',
'RULE_INPUT_NAME': '${RULE_INPUT_NAME}',
'RULE_INPUT_ROOT': '${RULE_INPUT_ROOT}',
'RULE_INPUT_EXT': '${RULE_INPUT_EXT}',
'CONFIGURATION_NAME': '${configuration}',
}
FULL_PATH_VARS = ('${CMAKE_CURRENT_LIST_DIR}', '${builddir}', '${obj}')
generator_supports_multiple_toolsets = True
generator_wants_static_library_dependencies_adjusted = True
COMPILABLE_EXTENSIONS = {
'.c': 'cc',
'.cc': 'cxx',
'.cpp': 'cxx',
'.cxx': 'cxx',
'.s': 's', # cc
'.S': 's', # cc
}
def RemovePrefix(a, prefix):
"""Returns 'a' without 'prefix' if it starts with 'prefix'."""
return a[len(prefix):] if a.startswith(prefix) else a
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
default_variables.setdefault('OS', gyp.common.GetFlavor(params))
def Compilable(filename):
"""Return true if the file is compilable (should be in OBJS)."""
return any(filename.endswith(e) for e in COMPILABLE_EXTENSIONS)
def Linkable(filename):
"""Return true if the file is linkable (should be on the link line)."""
return filename.endswith('.o')
def NormjoinPathForceCMakeSource(base_path, rel_path):
"""Resolves rel_path against base_path and returns the result.
If rel_path is an absolute path it is returned unchanged.
Otherwise it is resolved against base_path and normalized.
If the result is a relative path, it is forced to be relative to the
CMakeLists.txt.
"""
if os.path.isabs(rel_path):
return rel_path
if any([rel_path.startswith(var) for var in FULL_PATH_VARS]):
return rel_path
# TODO: do we need to check base_path for absolute variables as well?
return os.path.join('${CMAKE_CURRENT_LIST_DIR}',
os.path.normpath(os.path.join(base_path, rel_path)))
def NormjoinPath(base_path, rel_path):
"""Resolves rel_path against base_path and returns the result.
TODO: what is this really used for?
If rel_path begins with '$' it is returned unchanged.
Otherwise it is resolved against base_path if relative, then normalized.
"""
if rel_path.startswith('$') and not rel_path.startswith('${configuration}'):
return rel_path
return os.path.normpath(os.path.join(base_path, rel_path))
def CMakeStringEscape(a):
"""Escapes the string 'a' for use inside a CMake string.
This means escaping
'\' otherwise it may be seen as modifying the next character
'"' otherwise it will end the string
';' otherwise the string becomes a list
The following do not need to be escaped
'#' when the lexer is in string state, this does not start a comment
The following are yet unknown
'$' generator variables (like ${obj}) must not be escaped,
but text $ should be escaped
what is wanted is to know which $ come from generator variables
"""
return a.replace('\\', '\\\\').replace(';', '\\;').replace('"', '\\"')
def SetFileProperty(output, source_name, property_name, values, sep):
"""Given a set of source file, sets the given property on them."""
output.write('set_source_files_properties(')
output.write(source_name)
output.write(' PROPERTIES ')
output.write(property_name)
output.write(' "')
for value in values:
output.write(CMakeStringEscape(value))
output.write(sep)
output.write('")\n')
def SetFilesProperty(output, variable, property_name, values, sep):
"""Given a set of source files, sets the given property on them."""
output.write('set_source_files_properties(')
WriteVariable(output, variable)
output.write(' PROPERTIES ')
output.write(property_name)
output.write(' "')
for value in values:
output.write(CMakeStringEscape(value))
output.write(sep)
output.write('")\n')
def SetTargetProperty(output, target_name, property_name, values, sep=''):
"""Given a target, sets the given property."""
output.write('set_target_properties(')
output.write(target_name)
output.write(' PROPERTIES ')
output.write(property_name)
output.write(' "')
for value in values:
output.write(CMakeStringEscape(value))
output.write(sep)
output.write('")\n')
def SetVariable(output, variable_name, value):
"""Sets a CMake variable."""
output.write('set(')
output.write(variable_name)
output.write(' "')
output.write(CMakeStringEscape(value))
output.write('")\n')
def SetVariableList(output, variable_name, values):
"""Sets a CMake variable to a list."""
if not values:
return SetVariable(output, variable_name, "")
if len(values) == 1:
return SetVariable(output, variable_name, values[0])
output.write('list(APPEND ')
output.write(variable_name)
output.write('\n "')
output.write('"\n "'.join([CMakeStringEscape(value) for value in values]))
output.write('")\n')
def UnsetVariable(output, variable_name):
"""Unsets a CMake variable."""
output.write('unset(')
output.write(variable_name)
output.write(')\n')
def WriteVariable(output, variable_name, prepend=None):
if prepend:
output.write(prepend)
output.write('${')
output.write(variable_name)
output.write('}')
class CMakeTargetType(object):
def __init__(self, command, modifier, property_modifier):
self.command = command
self.modifier = modifier
self.property_modifier = property_modifier
cmake_target_type_from_gyp_target_type = {
'executable': CMakeTargetType('add_executable', None, 'RUNTIME'),
'static_library': CMakeTargetType('add_library', 'STATIC', 'ARCHIVE'),
'shared_library': CMakeTargetType('add_library', 'SHARED', 'LIBRARY'),
'loadable_module': CMakeTargetType('add_library', 'MODULE', 'LIBRARY'),
'none': CMakeTargetType('add_custom_target', 'SOURCES', None),
}
def StringToCMakeTargetName(a):
"""Converts the given string 'a' to a valid CMake target name.
All invalid characters are replaced by '_'.
Invalid for cmake: ' ', '/', '(', ')', '"'
Invalid for make: ':'
Invalid for unknown reasons but cause failures: '.'
"""
return a.translate(string.maketrans(' /():."', '_______'))
def WriteActions(target_name, actions, extra_sources, extra_deps,
path_to_gyp, output):
"""Write CMake for the 'actions' in the target.
Args:
target_name: the name of the CMake target being generated.
actions: the Gyp 'actions' dict for this target.
extra_sources: [(<cmake_src>, <src>)] to append with generated source files.
extra_deps: [<cmake_taget>] to append with generated targets.
path_to_gyp: relative path from CMakeLists.txt being generated to
the Gyp file in which the target being generated is defined.
"""
for action in actions:
action_name = StringToCMakeTargetName(action['action_name'])
action_target_name = '%s__%s' % (target_name, action_name)
inputs = action['inputs']
inputs_name = action_target_name + '__input'
SetVariableList(output, inputs_name,
[NormjoinPathForceCMakeSource(path_to_gyp, dep) for dep in inputs])
outputs = action['outputs']
cmake_outputs = [NormjoinPathForceCMakeSource(path_to_gyp, out)
for out in outputs]
outputs_name = action_target_name + '__output'
SetVariableList(output, outputs_name, cmake_outputs)
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set(dir for dir in (os.path.dirname(o) for o in outputs) if dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources.extend(zip(cmake_outputs, outputs))
# add_custom_command
output.write('add_custom_command(OUTPUT ')
WriteVariable(output, outputs_name)
output.write('\n')
if len(dirs) > 0:
for directory in dirs:
output.write(' COMMAND ${CMAKE_COMMAND} -E make_directory ')
output.write(directory)
output.write('\n')
output.write(' COMMAND ')
output.write(gyp.common.EncodePOSIXShellList(action['action']))
output.write('\n')
output.write(' DEPENDS ')
WriteVariable(output, inputs_name)
output.write('\n')
output.write(' WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/')
output.write(path_to_gyp)
output.write('\n')
output.write(' COMMENT ')
if 'message' in action:
output.write(action['message'])
else:
output.write(action_target_name)
output.write('\n')
output.write(' VERBATIM\n')
output.write(')\n')
# add_custom_target
output.write('add_custom_target(')
output.write(action_target_name)
output.write('\n DEPENDS ')
WriteVariable(output, outputs_name)
output.write('\n SOURCES ')
WriteVariable(output, inputs_name)
output.write('\n)\n')
extra_deps.append(action_target_name)
def NormjoinRulePathForceCMakeSource(base_path, rel_path, rule_source):
if rel_path.startswith(("${RULE_INPUT_PATH}","${RULE_INPUT_DIRNAME}")):
if any([rule_source.startswith(var) for var in FULL_PATH_VARS]):
return rel_path
return NormjoinPathForceCMakeSource(base_path, rel_path)
def WriteRules(target_name, rules, extra_sources, extra_deps,
path_to_gyp, output):
"""Write CMake for the 'rules' in the target.
Args:
target_name: the name of the CMake target being generated.
actions: the Gyp 'actions' dict for this target.
extra_sources: [(<cmake_src>, <src>)] to append with generated source files.
extra_deps: [<cmake_taget>] to append with generated targets.
path_to_gyp: relative path from CMakeLists.txt being generated to
the Gyp file in which the target being generated is defined.
"""
for rule in rules:
rule_name = StringToCMakeTargetName(target_name + '__' + rule['rule_name'])
inputs = rule.get('inputs', [])
inputs_name = rule_name + '__input'
SetVariableList(output, inputs_name,
[NormjoinPathForceCMakeSource(path_to_gyp, dep) for dep in inputs])
outputs = rule['outputs']
var_outputs = []
for count, rule_source in enumerate(rule.get('rule_sources', [])):
action_name = rule_name + '_' + str(count)
rule_source_dirname, rule_source_basename = os.path.split(rule_source)
rule_source_root, rule_source_ext = os.path.splitext(rule_source_basename)
SetVariable(output, 'RULE_INPUT_PATH', rule_source)
SetVariable(output, 'RULE_INPUT_DIRNAME', rule_source_dirname)
SetVariable(output, 'RULE_INPUT_NAME', rule_source_basename)
SetVariable(output, 'RULE_INPUT_ROOT', rule_source_root)
SetVariable(output, 'RULE_INPUT_EXT', rule_source_ext)
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set(dir for dir in (os.path.dirname(o) for o in outputs) if dir)
# Create variables for the output, as 'local' variable will be unset.
these_outputs = []
for output_index, out in enumerate(outputs):
output_name = action_name + '_' + str(output_index)
SetVariable(output, output_name,
NormjoinRulePathForceCMakeSource(path_to_gyp, out,
rule_source))
if int(rule.get('process_outputs_as_sources', False)):
extra_sources.append(('${' + output_name + '}', out))
these_outputs.append('${' + output_name + '}')
var_outputs.append('${' + output_name + '}')
# add_custom_command
output.write('add_custom_command(OUTPUT\n')
for out in these_outputs:
output.write(' ')
output.write(out)
output.write('\n')
for directory in dirs:
output.write(' COMMAND ${CMAKE_COMMAND} -E make_directory ')
output.write(directory)
output.write('\n')
output.write(' COMMAND ')
output.write(gyp.common.EncodePOSIXShellList(rule['action']))
output.write('\n')
output.write(' DEPENDS ')
WriteVariable(output, inputs_name)
output.write(' ')
output.write(NormjoinPath(path_to_gyp, rule_source))
output.write('\n')
# CMAKE_CURRENT_LIST_DIR is where the CMakeLists.txt lives.
# The cwd is the current build directory.
output.write(' WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/')
output.write(path_to_gyp)
output.write('\n')
output.write(' COMMENT ')
if 'message' in rule:
output.write(rule['message'])
else:
output.write(action_name)
output.write('\n')
output.write(' VERBATIM\n')
output.write(')\n')
UnsetVariable(output, 'RULE_INPUT_PATH')
UnsetVariable(output, 'RULE_INPUT_DIRNAME')
UnsetVariable(output, 'RULE_INPUT_NAME')
UnsetVariable(output, 'RULE_INPUT_ROOT')
UnsetVariable(output, 'RULE_INPUT_EXT')
# add_custom_target
output.write('add_custom_target(')
output.write(rule_name)
output.write(' DEPENDS\n')
for out in var_outputs:
output.write(' ')
output.write(out)
output.write('\n')
output.write('SOURCES ')
WriteVariable(output, inputs_name)
output.write('\n')
for rule_source in rule.get('rule_sources', []):
output.write(' ')
output.write(NormjoinPath(path_to_gyp, rule_source))
output.write('\n')
output.write(')\n')
extra_deps.append(rule_name)
def WriteCopies(target_name, copies, extra_deps, path_to_gyp, output):
"""Write CMake for the 'copies' in the target.
Args:
target_name: the name of the CMake target being generated.
actions: the Gyp 'actions' dict for this target.
extra_deps: [<cmake_taget>] to append with generated targets.
path_to_gyp: relative path from CMakeLists.txt being generated to
the Gyp file in which the target being generated is defined.
"""
copy_name = target_name + '__copies'
# CMake gets upset with custom targets with OUTPUT which specify no output.
have_copies = any(copy['files'] for copy in copies)
if not have_copies:
output.write('add_custom_target(')
output.write(copy_name)
output.write(')\n')
extra_deps.append(copy_name)
return
class Copy(object):
def __init__(self, ext, command):
self.cmake_inputs = []
self.cmake_outputs = []
self.gyp_inputs = []
self.gyp_outputs = []
self.ext = ext
self.inputs_name = None
self.outputs_name = None
self.command = command
file_copy = Copy('', 'copy')
dir_copy = Copy('_dirs', 'copy_directory')
for copy in copies:
files = copy['files']
destination = copy['destination']
for src in files:
path = os.path.normpath(src)
basename = os.path.split(path)[1]
dst = os.path.join(destination, basename)
copy = file_copy if os.path.basename(src) else dir_copy
copy.cmake_inputs.append(NormjoinPathForceCMakeSource(path_to_gyp, src))
copy.cmake_outputs.append(NormjoinPathForceCMakeSource(path_to_gyp, dst))
copy.gyp_inputs.append(src)
copy.gyp_outputs.append(dst)
for copy in (file_copy, dir_copy):
if copy.cmake_inputs:
copy.inputs_name = copy_name + '__input' + copy.ext
SetVariableList(output, copy.inputs_name, copy.cmake_inputs)
copy.outputs_name = copy_name + '__output' + copy.ext
SetVariableList(output, copy.outputs_name, copy.cmake_outputs)
# add_custom_command
output.write('add_custom_command(\n')
output.write('OUTPUT')
for copy in (file_copy, dir_copy):
if copy.outputs_name:
WriteVariable(output, copy.outputs_name, ' ')
output.write('\n')
for copy in (file_copy, dir_copy):
for src, dst in zip(copy.gyp_inputs, copy.gyp_outputs):
# 'cmake -E copy src dst' will create the 'dst' directory if needed.
output.write('COMMAND ${CMAKE_COMMAND} -E %s ' % copy.command)
output.write(src)
output.write(' ')
output.write(dst)
output.write("\n")
output.write('DEPENDS')
for copy in (file_copy, dir_copy):
if copy.inputs_name:
WriteVariable(output, copy.inputs_name, ' ')
output.write('\n')
output.write('WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/')
output.write(path_to_gyp)
output.write('\n')
output.write('COMMENT Copying for ')
output.write(target_name)
output.write('\n')
output.write('VERBATIM\n')
output.write(')\n')
# add_custom_target
output.write('add_custom_target(')
output.write(copy_name)
output.write('\n DEPENDS')
for copy in (file_copy, dir_copy):
if copy.outputs_name:
WriteVariable(output, copy.outputs_name, ' ')
output.write('\n SOURCES')
if file_copy.inputs_name:
WriteVariable(output, file_copy.inputs_name, ' ')
output.write('\n)\n')
extra_deps.append(copy_name)
def CreateCMakeTargetBaseName(qualified_target):
"""This is the name we would like the target to have."""
_, gyp_target_name, gyp_target_toolset = (
gyp.common.ParseQualifiedTarget(qualified_target))
cmake_target_base_name = gyp_target_name
if gyp_target_toolset and gyp_target_toolset != 'target':
cmake_target_base_name += '_' + gyp_target_toolset
return StringToCMakeTargetName(cmake_target_base_name)
def CreateCMakeTargetFullName(qualified_target):
"""An unambiguous name for the target."""
gyp_file, gyp_target_name, gyp_target_toolset = (
gyp.common.ParseQualifiedTarget(qualified_target))
cmake_target_full_name = gyp_file + ':' + gyp_target_name
if gyp_target_toolset and gyp_target_toolset != 'target':
cmake_target_full_name += '_' + gyp_target_toolset
return StringToCMakeTargetName(cmake_target_full_name)
class CMakeNamer(object):
"""Converts Gyp target names into CMake target names.
CMake requires that target names be globally unique. One way to ensure
this is to fully qualify the names of the targets. Unfortunatly, this
ends up with all targets looking like "chrome_chrome_gyp_chrome" instead
of just "chrome". If this generator were only interested in building, it
would be possible to fully qualify all target names, then create
unqualified target names which depend on all qualified targets which
should have had that name. This is more or less what the 'make' generator
does with aliases. However, one goal of this generator is to create CMake
files for use with IDEs, and fully qualified names are not as user
friendly.
Since target name collision is rare, we do the above only when required.
Toolset variants are always qualified from the base, as this is required for
building. However, it also makes sense for an IDE, as it is possible for
defines to be different.
"""
def __init__(self, target_list):
self.cmake_target_base_names_conficting = set()
cmake_target_base_names_seen = set()
for qualified_target in target_list:
cmake_target_base_name = CreateCMakeTargetBaseName(qualified_target)
if cmake_target_base_name not in cmake_target_base_names_seen:
cmake_target_base_names_seen.add(cmake_target_base_name)
else:
self.cmake_target_base_names_conficting.add(cmake_target_base_name)
def CreateCMakeTargetName(self, qualified_target):
base_name = CreateCMakeTargetBaseName(qualified_target)
if base_name in self.cmake_target_base_names_conficting:
return CreateCMakeTargetFullName(qualified_target)
return base_name
def WriteTarget(namer, qualified_target, target_dicts, build_dir, config_to_use,
options, generator_flags, all_qualified_targets, output):
# The make generator does this always.
# TODO: It would be nice to be able to tell CMake all dependencies.
circular_libs = generator_flags.get('circular', True)
if not generator_flags.get('standalone', False):
output.write('\n#')
output.write(qualified_target)
output.write('\n')
gyp_file, _, _ = gyp.common.ParseQualifiedTarget(qualified_target)
rel_gyp_file = gyp.common.RelativePath(gyp_file, options.toplevel_dir)
rel_gyp_dir = os.path.dirname(rel_gyp_file)
# Relative path from build dir to top dir.
build_to_top = gyp.common.InvertRelativePath(build_dir, options.toplevel_dir)
# Relative path from build dir to gyp dir.
build_to_gyp = os.path.join(build_to_top, rel_gyp_dir)
path_from_cmakelists_to_gyp = build_to_gyp
spec = target_dicts.get(qualified_target, {})
config = spec.get('configurations', {}).get(config_to_use, {})
target_name = spec.get('target_name', '<missing target name>')
target_type = spec.get('type', '<missing target type>')
target_toolset = spec.get('toolset')
cmake_target_type = cmake_target_type_from_gyp_target_type.get(target_type)
if cmake_target_type is None:
print ('Target %s has unknown target type %s, skipping.' %
( target_name, target_type ) )
return
SetVariable(output, 'TARGET', target_name)
SetVariable(output, 'TOOLSET', target_toolset)
cmake_target_name = namer.CreateCMakeTargetName(qualified_target)
extra_sources = []
extra_deps = []
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
WriteActions(cmake_target_name, spec['actions'], extra_sources, extra_deps,
path_from_cmakelists_to_gyp, output)
# Rules must be early like actions.
if 'rules' in spec:
WriteRules(cmake_target_name, spec['rules'], extra_sources, extra_deps,
path_from_cmakelists_to_gyp, output)
# Copies
if 'copies' in spec:
WriteCopies(cmake_target_name, spec['copies'], extra_deps,
path_from_cmakelists_to_gyp, output)
# Target and sources
srcs = spec.get('sources', [])
# Gyp separates the sheep from the goats based on file extensions.
# A full separation is done here because of flag handing (see below).
s_sources = []
c_sources = []
cxx_sources = []
linkable_sources = []
other_sources = []
for src in srcs:
_, ext = os.path.splitext(src)
src_type = COMPILABLE_EXTENSIONS.get(ext, None)
src_norm_path = NormjoinPath(path_from_cmakelists_to_gyp, src);
if src_type == 's':
s_sources.append(src_norm_path)
elif src_type == 'cc':
c_sources.append(src_norm_path)
elif src_type == 'cxx':
cxx_sources.append(src_norm_path)
elif Linkable(ext):
linkable_sources.append(src_norm_path)
else:
other_sources.append(src_norm_path)
for extra_source in extra_sources:
src, real_source = extra_source
_, ext = os.path.splitext(real_source)
src_type = COMPILABLE_EXTENSIONS.get(ext, None)
if src_type == 's':
s_sources.append(src)
elif src_type == 'cc':
c_sources.append(src)
elif src_type == 'cxx':
cxx_sources.append(src)
elif Linkable(ext):
linkable_sources.append(src)
else:
other_sources.append(src)
s_sources_name = None
if s_sources:
s_sources_name = cmake_target_name + '__asm_srcs'
SetVariableList(output, s_sources_name, s_sources)
c_sources_name = None
if c_sources:
c_sources_name = cmake_target_name + '__c_srcs'
SetVariableList(output, c_sources_name, c_sources)
cxx_sources_name = None
if cxx_sources:
cxx_sources_name = cmake_target_name + '__cxx_srcs'
SetVariableList(output, cxx_sources_name, cxx_sources)
linkable_sources_name = None
if linkable_sources:
linkable_sources_name = cmake_target_name + '__linkable_srcs'
SetVariableList(output, linkable_sources_name, linkable_sources)
other_sources_name = None
if other_sources:
other_sources_name = cmake_target_name + '__other_srcs'
SetVariableList(output, other_sources_name, other_sources)
# CMake gets upset when executable targets provide no sources.
# http://www.cmake.org/pipermail/cmake/2010-July/038461.html
dummy_sources_name = None
has_sources = (s_sources_name or
c_sources_name or
cxx_sources_name or
linkable_sources_name or
other_sources_name)
if target_type == 'executable' and not has_sources:
dummy_sources_name = cmake_target_name + '__dummy_srcs'
SetVariable(output, dummy_sources_name,
"${obj}.${TOOLSET}/${TARGET}/genc/dummy.c")
output.write('if(NOT EXISTS "')
WriteVariable(output, dummy_sources_name)
output.write('")\n')
output.write(' file(WRITE "')
WriteVariable(output, dummy_sources_name)
output.write('" "")\n')
output.write("endif()\n")
# CMake is opposed to setting linker directories and considers the practice
# of setting linker directories dangerous. Instead, it favors the use of
# find_library and passing absolute paths to target_link_libraries.
# However, CMake does provide the command link_directories, which adds
# link directories to targets defined after it is called.
# As a result, link_directories must come before the target definition.
# CMake unfortunately has no means of removing entries from LINK_DIRECTORIES.
library_dirs = config.get('library_dirs')
if library_dirs is not None:
output.write('link_directories(')
for library_dir in library_dirs:
output.write(' ')
output.write(NormjoinPath(path_from_cmakelists_to_gyp, library_dir))
output.write('\n')
output.write(')\n')
output.write(cmake_target_type.command)
output.write('(')
output.write(cmake_target_name)
if cmake_target_type.modifier is not None:
output.write(' ')
output.write(cmake_target_type.modifier)
if s_sources_name:
WriteVariable(output, s_sources_name, ' ')
if c_sources_name:
WriteVariable(output, c_sources_name, ' ')
if cxx_sources_name:
WriteVariable(output, cxx_sources_name, ' ')
if linkable_sources_name:
WriteVariable(output, linkable_sources_name, ' ')
if other_sources_name:
WriteVariable(output, other_sources_name, ' ')
if dummy_sources_name:
WriteVariable(output, dummy_sources_name, ' ')
output.write(')\n')
# Let CMake know if the 'all' target should depend on this target.
exclude_from_all = ('TRUE' if qualified_target not in all_qualified_targets
else 'FALSE')
SetTargetProperty(output, cmake_target_name,
'EXCLUDE_FROM_ALL', exclude_from_all)
for extra_target_name in extra_deps:
SetTargetProperty(output, extra_target_name,
'EXCLUDE_FROM_ALL', exclude_from_all)
# Output name and location.
if target_type != 'none':
# Link as 'C' if there are no other files
if not c_sources and not cxx_sources:
SetTargetProperty(output, cmake_target_name, 'LINKER_LANGUAGE', ['C'])
# Mark uncompiled sources as uncompiled.
if other_sources_name:
output.write('set_source_files_properties(')
WriteVariable(output, other_sources_name, '')
output.write(' PROPERTIES HEADER_FILE_ONLY "TRUE")\n')
# Mark object sources as linkable.
if linkable_sources_name:
output.write('set_source_files_properties(')
WriteVariable(output, other_sources_name, '')
output.write(' PROPERTIES EXTERNAL_OBJECT "TRUE")\n')
# Output directory
target_output_directory = spec.get('product_dir')
if target_output_directory is None:
if target_type in ('executable', 'loadable_module'):
target_output_directory = generator_default_variables['PRODUCT_DIR']
elif target_type == 'shared_library':
target_output_directory = '${builddir}/lib.${TOOLSET}'
elif spec.get('standalone_static_library', False):
target_output_directory = generator_default_variables['PRODUCT_DIR']
else:
base_path = gyp.common.RelativePath(os.path.dirname(gyp_file),
options.toplevel_dir)
target_output_directory = '${obj}.${TOOLSET}'
target_output_directory = (
os.path.join(target_output_directory, base_path))
cmake_target_output_directory = NormjoinPathForceCMakeSource(
path_from_cmakelists_to_gyp,
target_output_directory)
SetTargetProperty(output,
cmake_target_name,
cmake_target_type.property_modifier + '_OUTPUT_DIRECTORY',
cmake_target_output_directory)
# Output name
default_product_prefix = ''
default_product_name = target_name
default_product_ext = ''
if target_type == 'static_library':
static_library_prefix = generator_default_variables['STATIC_LIB_PREFIX']
default_product_name = RemovePrefix(default_product_name,
static_library_prefix)
default_product_prefix = static_library_prefix
default_product_ext = generator_default_variables['STATIC_LIB_SUFFIX']
elif target_type in ('loadable_module', 'shared_library'):
shared_library_prefix = generator_default_variables['SHARED_LIB_PREFIX']
default_product_name = RemovePrefix(default_product_name,
shared_library_prefix)
default_product_prefix = shared_library_prefix
default_product_ext = generator_default_variables['SHARED_LIB_SUFFIX']
elif target_type != 'executable':
print ('ERROR: What output file should be generated?',
'type', target_type, 'target', target_name)
product_prefix = spec.get('product_prefix', default_product_prefix)
product_name = spec.get('product_name', default_product_name)
product_ext = spec.get('product_extension')
if product_ext:
product_ext = '.' + product_ext
else:
product_ext = default_product_ext
SetTargetProperty(output, cmake_target_name, 'PREFIX', product_prefix)
SetTargetProperty(output, cmake_target_name,
cmake_target_type.property_modifier + '_OUTPUT_NAME',
product_name)
SetTargetProperty(output, cmake_target_name, 'SUFFIX', product_ext)
# Make the output of this target referenceable as a source.
cmake_target_output_basename = product_prefix + product_name + product_ext
cmake_target_output = os.path.join(cmake_target_output_directory,
cmake_target_output_basename)
SetFileProperty(output, cmake_target_output, 'GENERATED', ['TRUE'], '')
# Includes
includes = config.get('include_dirs')
if includes:
# This (target include directories) is what requires CMake 2.8.8
includes_name = cmake_target_name + '__include_dirs'
SetVariableList(output, includes_name,
[NormjoinPathForceCMakeSource(path_from_cmakelists_to_gyp, include)
for include in includes])
output.write('set_property(TARGET ')
output.write(cmake_target_name)
output.write(' APPEND PROPERTY INCLUDE_DIRECTORIES ')
WriteVariable(output, includes_name, '')
output.write(')\n')
# Defines
defines = config.get('defines')
if defines is not None:
SetTargetProperty(output,
cmake_target_name,
'COMPILE_DEFINITIONS',
defines,
';')
# Compile Flags - http://www.cmake.org/Bug/view.php?id=6493
# CMake currently does not have target C and CXX flags.
# So, instead of doing...
# cflags_c = config.get('cflags_c')
# if cflags_c is not None:
# SetTargetProperty(output, cmake_target_name,
# 'C_COMPILE_FLAGS', cflags_c, ' ')
# cflags_cc = config.get('cflags_cc')
# if cflags_cc is not None:
# SetTargetProperty(output, cmake_target_name,
# 'CXX_COMPILE_FLAGS', cflags_cc, ' ')
# Instead we must...
cflags = config.get('cflags', [])
cflags_c = config.get('cflags_c', [])
cflags_cxx = config.get('cflags_cc', [])
if (not cflags_c or not c_sources) and (not cflags_cxx or not cxx_sources):
SetTargetProperty(output, cmake_target_name, 'COMPILE_FLAGS', cflags, ' ')
elif c_sources and not (s_sources or cxx_sources):
flags = []
flags.extend(cflags)
flags.extend(cflags_c)
SetTargetProperty(output, cmake_target_name, 'COMPILE_FLAGS', flags, ' ')
elif cxx_sources and not (s_sources or c_sources):
flags = []
flags.extend(cflags)
flags.extend(cflags_cxx)
SetTargetProperty(output, cmake_target_name, 'COMPILE_FLAGS', flags, ' ')
else:
# TODO: This is broken, one cannot generally set properties on files,
# as other targets may require different properties on the same files.
if s_sources and cflags:
SetFilesProperty(output, s_sources_name, 'COMPILE_FLAGS', cflags, ' ')
if c_sources and (cflags or cflags_c):
flags = []
flags.extend(cflags)
flags.extend(cflags_c)
SetFilesProperty(output, c_sources_name, 'COMPILE_FLAGS', flags, ' ')
if cxx_sources and (cflags or cflags_cxx):
flags = []
flags.extend(cflags)
flags.extend(cflags_cxx)
SetFilesProperty(output, cxx_sources_name, 'COMPILE_FLAGS', flags, ' ')
# Linker flags
ldflags = config.get('ldflags')
if ldflags is not None:
SetTargetProperty(output, cmake_target_name, 'LINK_FLAGS', ldflags, ' ')
# Note on Dependencies and Libraries:
# CMake wants to handle link order, resolving the link line up front.
# Gyp does not retain or enforce specifying enough information to do so.
# So do as other gyp generators and use --start-group and --end-group.
# Give CMake as little information as possible so that it doesn't mess it up.
# Dependencies
rawDeps = spec.get('dependencies', [])
static_deps = []
shared_deps = []
other_deps = []
for rawDep in rawDeps:
dep_cmake_name = namer.CreateCMakeTargetName(rawDep)
dep_spec = target_dicts.get(rawDep, {})
dep_target_type = dep_spec.get('type', None)
if dep_target_type == 'static_library':
static_deps.append(dep_cmake_name)
elif dep_target_type == 'shared_library':
shared_deps.append(dep_cmake_name)
else:
other_deps.append(dep_cmake_name)
# ensure all external dependencies are complete before internal dependencies
# extra_deps currently only depend on their own deps, so otherwise run early
if static_deps or shared_deps or other_deps:
for extra_dep in extra_deps:
output.write('add_dependencies(')
output.write(extra_dep)
output.write('\n')
for deps in (static_deps, shared_deps, other_deps):
for dep in gyp.common.uniquer(deps):
output.write(' ')
output.write(dep)
output.write('\n')
output.write(')\n')
linkable = target_type in ('executable', 'loadable_module', 'shared_library')
other_deps.extend(extra_deps)
if other_deps or (not linkable and (static_deps or shared_deps)):
output.write('add_dependencies(')
output.write(cmake_target_name)
output.write('\n')
for dep in gyp.common.uniquer(other_deps):
output.write(' ')
output.write(dep)
output.write('\n')
if not linkable:
for deps in (static_deps, shared_deps):
for lib_dep in gyp.common.uniquer(deps):
output.write(' ')
output.write(lib_dep)
output.write('\n')
output.write(')\n')
# Libraries
if linkable:
external_libs = [lib for lib in spec.get('libraries', []) if len(lib) > 0]
if external_libs or static_deps or shared_deps:
output.write('target_link_libraries(')
output.write(cmake_target_name)
output.write('\n')
if static_deps:
write_group = circular_libs and len(static_deps) > 1
if write_group:
output.write('-Wl,--start-group\n')
for dep in gyp.common.uniquer(static_deps):
output.write(' ')
output.write(dep)
output.write('\n')
if write_group:
output.write('-Wl,--end-group\n')
if shared_deps:
for dep in gyp.common.uniquer(shared_deps):
output.write(' ')
output.write(dep)
output.write('\n')
if external_libs:
for lib in gyp.common.uniquer(external_libs):
output.write(' ')
output.write(lib)
output.write('\n')
output.write(')\n')
UnsetVariable(output, 'TOOLSET')
UnsetVariable(output, 'TARGET')
def GenerateOutputForConfig(target_list, target_dicts, data,
params, config_to_use):
options = params['options']
generator_flags = params['generator_flags']
# generator_dir: relative path from pwd to where make puts build files.
# Makes migrating from make to cmake easier, cmake doesn't put anything here.
# Each Gyp configuration creates a different CMakeLists.txt file
# to avoid incompatibilities between Gyp and CMake configurations.
generator_dir = os.path.relpath(options.generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = generator_flags.get('output_dir', 'out')
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.normpath(os.path.join(generator_dir,
output_dir,
config_to_use))
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
output_file = os.path.join(toplevel_build, 'CMakeLists.txt')
gyp.common.EnsureDirExists(output_file)
output = open(output_file, 'w')
output.write('cmake_minimum_required(VERSION 2.8.8 FATAL_ERROR)\n')
output.write('cmake_policy(VERSION 2.8.8)\n')
gyp_file, project_target, _ = gyp.common.ParseQualifiedTarget(target_list[-1])
output.write('project(')
output.write(project_target)
output.write(')\n')
SetVariable(output, 'configuration', config_to_use)
ar = None
cc = None
cxx = None
make_global_settings = data[gyp_file].get('make_global_settings', [])
build_to_top = gyp.common.InvertRelativePath(build_dir,
options.toplevel_dir)
for key, value in make_global_settings:
if key == 'AR':
ar = os.path.join(build_to_top, value)
if key == 'CC':
cc = os.path.join(build_to_top, value)
if key == 'CXX':
cxx = os.path.join(build_to_top, value)
ar = gyp.common.GetEnvironFallback(['AR_target', 'AR'], ar)
cc = gyp.common.GetEnvironFallback(['CC_target', 'CC'], cc)
cxx = gyp.common.GetEnvironFallback(['CXX_target', 'CXX'], cxx)
if ar:
SetVariable(output, 'CMAKE_AR', ar)
if cc:
SetVariable(output, 'CMAKE_C_COMPILER', cc)
if cxx:
SetVariable(output, 'CMAKE_CXX_COMPILER', cxx)
# The following appears to be as-yet undocumented.
# http://public.kitware.com/Bug/view.php?id=8392
output.write('enable_language(ASM)\n')
# ASM-ATT does not support .S files.
# output.write('enable_language(ASM-ATT)\n')
if cc:
SetVariable(output, 'CMAKE_ASM_COMPILER', cc)
SetVariable(output, 'builddir', '${CMAKE_CURRENT_BINARY_DIR}')
SetVariable(output, 'obj', '${builddir}/obj')
output.write('\n')
# TODO: Undocumented/unsupported (the CMake Java generator depends on it).
# CMake by default names the object resulting from foo.c to be foo.c.o.
# Gyp traditionally names the object resulting from foo.c foo.o.
# This should be irrelevant, but some targets extract .o files from .a
# and depend on the name of the extracted .o files.
output.write('set(CMAKE_C_OUTPUT_EXTENSION_REPLACE 1)\n')
output.write('set(CMAKE_CXX_OUTPUT_EXTENSION_REPLACE 1)\n')
output.write('\n')
# Force ninja to use rsp files. Otherwise link and ar lines can get too long,
# resulting in 'Argument list too long' errors.
output.write('set(CMAKE_NINJA_FORCE_RESPONSE_FILE 1)\n')
output.write('\n')
namer = CMakeNamer(target_list)
# The list of targets upon which the 'all' target should depend.
# CMake has it's own implicit 'all' target, one is not created explicitly.
all_qualified_targets = set()
for build_file in params['build_files']:
for qualified_target in gyp.common.AllTargets(target_list,
target_dicts,
os.path.normpath(build_file)):
all_qualified_targets.add(qualified_target)
for qualified_target in target_list:
WriteTarget(namer, qualified_target, target_dicts, build_dir, config_to_use,
options, generator_flags, all_qualified_targets, output)
output.close()
def PerformBuild(data, configurations, params):
options = params['options']
generator_flags = params['generator_flags']
# generator_dir: relative path from pwd to where make puts build files.
# Makes migrating from make to cmake easier, cmake doesn't put anything here.
generator_dir = os.path.relpath(options.generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = generator_flags.get('output_dir', 'out')
for config_name in configurations:
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.normpath(os.path.join(generator_dir,
output_dir,
config_name))
arguments = ['cmake', '-G', 'Ninja']
print 'Generating [%s]: %s' % (config_name, arguments)
subprocess.check_call(arguments, cwd=build_dir)
arguments = ['ninja', '-C', build_dir]
print 'Building [%s]: %s' % (config_name, arguments)
subprocess.check_call(arguments)
def CallGenerateOutputForConfig(arglist):
# Ignore the interrupt signal so that the parent process catches it and
# kills all multiprocessing children.
signal.signal(signal.SIGINT, signal.SIG_IGN)
target_list, target_dicts, data, params, config_name = arglist
GenerateOutputForConfig(target_list, target_dicts, data, params, config_name)
def GenerateOutput(target_list, target_dicts, data, params):
user_config = params.get('generator_flags', {}).get('config', None)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data,
params, user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
if params['parallel']:
try:
pool = multiprocessing.Pool(len(config_names))
arglists = []
for config_name in config_names:
arglists.append((target_list, target_dicts, data,
params, config_name))
pool.map(CallGenerateOutputForConfig, arglists)
except KeyboardInterrupt, e:
pool.terminate()
raise e
else:
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data,
params, config_name)
|
mit
|
google/joint_vae
|
datasets/mnist_attributes/perturb_images.py
|
1
|
3457
|
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A class to add perturbations to MNIST images.
The perturbations we add to the MNIST dataset include rotation,
scaling and translation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from PIL import Image
import numpy as np
import utils
def rotate_image(input_image, rotation_angle, interpolation=Image.BICUBIC):
"""Rotate a PIL Image and return the output image and size."""
out_image = input_image.rotate(rotation_angle, interpolation, expand=True)
image_size = utils.XY(*out_image.size)
return out_image, image_size
def scale_image(input_image, image_size, scale, interpolation=Image.BICUBIC):
"""Scale a PIL Image and return the output image and size."""
input_image.thumbnail((int(image_size.x * scale), int(image_size.y * scale)),
interpolation)
image_size = utils.XY(*input_image.size)
return input_image, image_size
def paste_image_on_black_canvas(image, location, canvas_size=64):
"""Paste input image at given location on a black canvas."""
canvas = Image.fromarray(np.zeros((canvas_size, canvas_size, 3)).astype(np.uint8))
canvas.paste(image, (location.x, location.y))
return canvas
class PerturbImages(object):
"""Intitializes the perturb images class."""
def __init__(self, valid_transformations, image_size=28, canvas_size=64):
"""Intializes the perturbations for images."""
self._valid_transformations = valid_transformations
self._interpolation_type = Image.BICUBIC
self._image_size = image_size
self._canvas_size = canvas_size
self._initialize_canvas()
def _initialize_canvas(self):
"""Initializes the canvas image on which we overlay a digit."""
self._canvas = Image.fromarray(
np.zeros((self._canvas_size, self._canvas_size, 3)))
def bind_image(self, image):
"""Binds an mnist image to the canvas.
We apply transformations to the binded image and then paste it
on the canvas.
"""
piece = np.repeat(image, 3, axis=-1)
piece = Image.fromarray(piece)
self._piece = piece
def transform_image(self, rotation, scaling, location):
# Peform transformations in the following order:
# First do rotation, if specified.
# Then do scaling
# Then paste the image at some specified location.
if scaling > 1.0:
raise ValueError("Maximum allowed scale is 1.0.")
if "rotate" in self._valid_transformations:
self._piece = self._piece.rotate(
rotation, self._interpolation_type, expand=False)
if "scaling" in self._valid_transformations:
self._piece.thumbnail((int(self._image_size * scaling), int(
self._image_size * scaling)), self._interpolation_type)
if "location" in self._valid_transformations:
self._canvas.paste(self._piece, (location.x, location.y))
return self._canvas
|
apache-2.0
|
heracek/django-nonrel
|
tests/regressiontests/admin_validation/models.py
|
43
|
1108
|
"""
Tests of ModelAdmin validation logic.
"""
from django.db import models
class Album(models.Model):
title = models.CharField(max_length=150)
class Song(models.Model):
title = models.CharField(max_length=150)
album = models.ForeignKey(Album)
original_release = models.DateField(editable=False)
class Meta:
ordering = ('title',)
def __unicode__(self):
return self.title
def readonly_method_on_model(self):
# does nothing
pass
class TwoAlbumFKAndAnE(models.Model):
album1 = models.ForeignKey(Album, related_name="album1_set")
album2 = models.ForeignKey(Album, related_name="album2_set")
e = models.CharField(max_length=1)
class Author(models.Model):
name = models.CharField(max_length=100)
class Book(models.Model):
name = models.CharField(max_length=100)
subtitle = models.CharField(max_length=100)
price = models.FloatField()
authors = models.ManyToManyField(Author, through='AuthorsBooks')
class AuthorsBooks(models.Model):
author = models.ForeignKey(Author)
book = models.ForeignKey(Book)
|
bsd-3-clause
|
Midnighter/pymetabolism
|
pymetabolism/metabolism/nullmodels.py
|
1
|
9566
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
=====================
Metabolic Null Models
=====================
:Authors:
Moritz Emanuel Beber
:Date:
2011-07-01
:Copyright:
Copyright(c) 2011 Jacobs University of Bremen. All rights reserved.
:File:
nullmodels.py
"""
import logging
import itertools
from operator import itemgetter
from ..errors import PyMetabolismError
from .. import miscellaneous as misc
from ..fba import FBAModel
logger = logging.getLogger(__name__)
logger.addHandler(misc.NullHandler())
#def bfs_balance(network, coefficients, seed=None):
# """
# Build a consistent stoichiometric matrix from a directed bipartite graph
# object.
#
# Requires a distribution of stoichiometric coefficients that will be
# randomly chosen to construct mass-balanced reactions. Note that the
# stoichiometric coefficients can be biased by inserting certain numbers
# multiple times.
#
# Basically, this function performs a breadth-first search on the network
# assigning stoichiometric coefficients to reactions whenever possible and
# otherwise reconciles compound masses.
#
# Parameters
# ----------
# network: MetabolicNetwork
# A bipartite MetabolicNetwork with a desired architecture.
# coefficients: iterable
# An iterable of stoichiometric coefficients that may be weighted by
# including certain coefficients multiple times.
# seed: int (optional)
# A specific seed for the random number generator for reproducible runs.
# """
# rand_int = numpy.random.random_integers
# if seed:
# numpy.random.seed(int(seed))
# # a map that stores exact masses of metabolites, thus decimal type
# mass = defaultdict(dec.Decimal) # zero initialised decimal values
# # stoichiometric matrix
# stoichiometry = numpy.zeros(shape=(len(network.compounds),
# len(network.reactions)), dtype=int)
# # breadth-first search containers
# disc = Queue() # queue for discovered nodes
# nbunch = list(network.compounds)
# # initialise search
# elem = nbunch[rand_int(0, len(nbunch) - 1)]
# mass[elem] = Decimal(1)
# disc.append(elem)
# while True:
# elem = disc.pop()
# if mass[elem] > 0:
# continue
# for node in graph.successors_iter(elem):
# disc.append(node)
# # do some consistency checks for example all masses > 0 and
# # each reaction must satisfy mass substrates = mass products
#def lp_balance(network, coefficients, solver="gurobi", seed=None):
# """
# Build a consistent stoichiometric matrix from a directed bipartite graph
# object.
#
# Requires a distribution of stoichiometric coefficients that will be
# randomly chosen to construct mass-balanced reactions. Note that the
# stoichiometric coefficients can be biased by inserting certain numbers
# multiple times.
#
# Mass-balance is achieved by solving the following linear programming
# problem:
# Minimise the sum over all stoichiometric coefficients subject to the
# constraints given by the transpose of the stoichiometric matrix
# multiplied with the mass vector equals the zero vector.
#
# Parameters
# ----------
# network: MetabolicNetwork
# A bipartite MetabolicNetwork with a desired architecture.
# coefficients: iterable
# An iterable of stoichiometric coefficients that may be weighted by
# including certain coefficients multiple times.
# seed: int (optional)
# A specific seed for the random number generator for reproducible runs.
# """
# if seed:
# numpy.random.seed(int(seed))
# if seed:
# rnd.seed(seed)
# # a map that stores exact masses of metabolites, thus decimal type
# mass = dict()
# # map with metabolite matrix-indeces
# metb_idx = dict()
# for (i, metb) in enumerate(metbs):
# metb_idx[metb] = i
# mass[metb] = rnd.random()
## # map with reaction rates
## rates = dict()
# # map with reaction matrix-indeces
# rxn_idx = dict()
# for (i, rxn) in enumerate(rxns):
# rxn_idx[rxn] = i
## rates[rxn] = rnd.random() # not required atm
# m = len(metbs)
# n = len(rxns)
# # problem: minimise sum over stoichiometric coefficients
# f = np.ones([n, m], dtype=float)
# # subject to T(S).m = 0
# a_eq = np.empty([n, m], dtype=float)
# for rxn in rxns:
# for metb in metbs:
# a_eq[rxn_idx[rxn], metb_idx[metb]] = mass[metb]
# b_eq = np.zeros(n, dtype=float)
# # where
# lb = np.zeros([n, m], dtype=float)
# for rxn in rxns:
# for metb in graph.predecessors_iter(rxn):
# lb[rxn_idx[rxn], metb_idx[metb]] = -100.
# for metb in graph.successors_iter(rxn):
# lb[rxn_idx[rxn], metb_idx[metb]] = 1.
# ub = np.zeros([n, m], dtype=float)
# for rxn in rxns:
# for metb in graph.predecessors_iter(rxn):
# lb[rxn_idx[rxn], metb_idx[metb]] = -1.
# for metb in graph.successors_iter(rxn):
# lb[rxn_idx[rxn], metb_idx[metb]] = 100.
# # solve
# p = oo.LP(f=f, A=None, Aeq=a_eq, b=None, beq=b_eq, lb=lb, ub=ub)
# #p.debug = 1
# result = p.solve('cvxopt_lp')
# print result.ff
# print result.xf
def make_consistent_stoichiometry(network, coefficients, mass_vector=None):
"""
Based on a given network architecture this function attempts to generate a
consistent stoichiometry that obeys mass conservation laws.
Parameters
----------
network: MetabolicNetwork
coefficients: indexable
mass_vector: dict
"""
def balance_reaction_by_mass(reaction):
"""
Balance a single reaction by adjusting the stoichiometric coefficients in a
way that leads to mass conservation.
"""
compounds = [cmpd for cmpd in itertools.chain(network.pred[reaction],
network.succ[reaction])]
# modify the coefficients for the current reaction
temp_coeff = list()
# substrates
msg = list()
for cmpd in network.pred[reaction]:
temp_coeff.append((cmpd, -mass_vector[cmpd]))
msg.append("- %.3f %s" % (mass_vector[cmpd], str(cmpd)))
#products
for cmpd in network.succ[reaction]:
temp_coeff.append((cmpd, mass_vector[cmpd]))
msg.append("+ %.3f %s" % (mass_vector[cmpd], str(cmpd)))
msg.append("= 0")
logger.debug("%s:", reaction.name)
logger.debug("%s", " ".join(msg))
model.modify_reaction_bounds(compounds, lb=1.0)
model.modify_compound_coefficients("reaction", temp_coeff)
model.set_objective_reaction(compounds, 1.0)
model.fba(maximize=False)
msg = list()
try:
for cmpd in network.pred[reaction]:
# we asked for integers
network[cmpd][reaction]["coefficient"] = round(model.iter_flux(cmpd))
msg.append("- %.3f %.3f" % (mass_vector[cmpd], model.iter_flux(cmpd)))
for cmpd in network.succ[reaction]:
network[reaction][cmpd]["coefficient"] = round(model.iter_flux(cmpd))
msg.append("+ %.3f %.3f" % (mass_vector[cmpd], model.iter_flux(cmpd)))
msg.append("= 0")
except PyMetabolismError:
logger.debug("psssst:", exc_info=True)
raise PyMetabolismError("Reaction '%s' cannot be balanced with the"\
" given mass vector.", str(reaction))
logger.debug("%s:", reaction.name)
logger.debug("%s", " ".join(msg))
# reset bounds
model.modify_reaction_bounds(compounds, lb=0.0)
# abuse free_compound to reset all coefficients
model.free_compound("reaction")
if not mass_vector:
# the default masses for compounds:
# * compounds sorted by degree have a mass of degree equal to the other
# end of that sorted list (similar to inverse of degree but integers)
compound = itemgetter(0)
degree = itemgetter(1)
masses = [pair for pair in network.degree_iter(network.compounds)]
masses.sort(key=degree)
end = len(masses) - 1
# generate mass vector for compounds
mass_vector = dict((compound(pair), degree(masses[end - i]))\
for (i, pair) in enumerate(masses))
# prepare a single LP model for all reactions
model = FBAModel("mass balance")
for cmpd in network.compounds:
cmpd.reversible = False
model.add_reaction(network.compounds, [("reaction", 0.0)], lb=0.0,
ub=max(coefficients))
# test different objective functions:
# * only zeros leads to fast solutions (no objective function) that are
# close to the upper boundary, this can be ameliorated by picking starting
# points for the variables from the given distribution of coefficients
# * all equal leads to slower solutions that are mostly one with a few
# variations to balance the reactions
# * an objective function with entries of 1 / factor leads to very long
# solution times but much more varied coefficients
# integer variables because traditionally biochemical reactions are
# multiples of chemical groups, i.e., that's what will be in the
# coefficients
model._make_integer(network.compounds)
total = float(len(network.reactions))
for (i, rxn) in enumerate(network.reactions):
balance_reaction_by_mass(rxn)
logger.info("%.2f %% complete.", float(i + 1) / total * 100.)
|
bsd-3-clause
|
lucychambers/pepperrage
|
.bundle/ruby/2.0.0/gems/pygments.rb-0.6.0/vendor/pygments-main/external/moin-parser.py
|
42
|
3600
|
# -*- coding: utf-8 -*-
"""
The Pygments MoinMoin Parser
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This is a MoinMoin parser plugin that renders source code to HTML via
Pygments; you need Pygments 0.7 or newer for this parser to work.
To use it, set the options below to match your setup and put this file in
the data/plugin/parser subdirectory of your Moin instance, and give it the
name that the parser directive should have. For example, if you name the
file ``code.py``, you can get a highlighted Python code sample with this
Wiki markup::
{{{
#!code python
[...]
}}}
Additionally, if you set ATTACHMENTS below to True, Pygments will also be
called for all attachments for whose filenames there is no other parser
registered.
You are responsible for including CSS rules that will map the Pygments CSS
classes to colors. You can output a stylesheet file with `pygmentize`, put
it into the `htdocs` directory of your Moin instance and then include it in
the `stylesheets` configuration option in the Moin config, e.g.::
stylesheets = [('screen', '/htdocs/pygments.css')]
If you do not want to do that and are willing to accept larger HTML
output, you can set the INLINESTYLES option below to True.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# Options
# ~~~~~~~
# Set to True if you want to highlight attachments, in addition to
# {{{ }}} blocks.
ATTACHMENTS = True
# Set to True if you want inline CSS styles instead of classes
INLINESTYLES = False
import sys
from pygments import highlight
from pygments.lexers import get_lexer_by_name, get_lexer_for_filename, TextLexer
from pygments.formatters import HtmlFormatter
from pygments.util import ClassNotFound
# wrap lines in <span>s so that the Moin-generated line numbers work
class MoinHtmlFormatter(HtmlFormatter):
def wrap(self, source, outfile):
for line in source:
yield 1, '<span class="line">' + line[1] + '</span>'
htmlformatter = MoinHtmlFormatter(noclasses=INLINESTYLES)
textlexer = TextLexer()
codeid = [0]
class Parser:
"""
MoinMoin Pygments parser.
"""
if ATTACHMENTS:
extensions = '*'
else:
extensions = []
Dependencies = []
def __init__(self, raw, request, **kw):
self.raw = raw
self.req = request
if "format_args" in kw:
# called from a {{{ }}} block
try:
self.lexer = get_lexer_by_name(kw['format_args'].strip())
except ClassNotFound:
self.lexer = textlexer
return
if "filename" in kw:
# called for an attachment
filename = kw['filename']
else:
# called for an attachment by an older moin
# HACK: find out the filename by peeking into the execution
# frame which might not always work
try:
frame = sys._getframe(1)
filename = frame.f_locals['filename']
except:
filename = 'x.txt'
try:
self.lexer = get_lexer_for_filename(filename)
except ClassNotFound:
self.lexer = textlexer
def format(self, formatter):
codeid[0] += 1
id = "pygments_%s" % codeid[0]
w = self.req.write
w(formatter.code_area(1, id, start=1, step=1))
w(formatter.rawHTML(highlight(self.raw, self.lexer, htmlformatter)))
w(formatter.code_area(0, id))
|
mit
|
SiCKRAGETV/SickRage
|
sickrage/core/databases/__init__.py
|
2
|
16768
|
# Author: echel0n <[email protected]>
# URL: https://sickrage.ca
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import datetime
import io
import os
import pickle
import re
import shutil
import tarfile
import time
import traceback
from sqlite3 import OperationalError
from CodernityDB.database import RecordDeleted, RecordNotFound
from CodernityDB.database_super_thread_safe import SuperThreadSafeDatabase
from CodernityDB.index import IndexNotFoundException, IndexConflict, IndexException
from CodernityDB.storage import IU_Storage
import sickrage
from sickrage.core.helpers import randomString
def Custom_IU_Storage_get(self, start, size, status='c'):
if status == 'd':
return None
else:
self._f.seek(start)
return self.data_from(self._f.read(size))
class srDatabase(object):
_indexes = {}
_migrate_list = {}
def __init__(self, name=''):
self.name = name
self.old_db_path = ''
self.db_path = os.path.join(sickrage.app.data_dir, 'database', self.name)
self.db = SuperThreadSafeDatabase(self.db_path)
def initialize(self):
# Remove database folder if both exists
if self.db.exists() and os.path.isfile(self.old_db_path):
if not self.opened:
self.db.open()
self.db.destroy()
if self.db.exists():
# self.backup()
if not self.opened:
self.db.open()
else:
self.db.create()
# setup database indexes
self.setup_indexes()
def old_backup(self):
# Backup before start and cleanup old backups
backup_path = os.path.join(sickrage.app.data_dir, 'db_backup', self.name)
backup_count = 5
existing_backups = []
if not os.path.isdir(backup_path):
os.makedirs(backup_path)
for root, dirs, files in os.walk(backup_path):
# Only consider files being a direct child of the backup_path
if root == backup_path:
for backup_file in sorted(files):
ints = re.findall('\d+', backup_file)
# Delete non zip files
if len(ints) != 1:
try:
os.remove(os.path.join(root, backup_file))
except:
pass
else:
existing_backups.append((int(ints[0]), backup_file))
else:
# Delete stray directories.
shutil.rmtree(root)
# Remove all but the last 5
for eb in existing_backups[:-backup_count]:
os.remove(os.path.join(backup_path, eb[1]))
# Create new backup
new_backup = os.path.join(backup_path, '%s.tar.gz' % int(time.time()))
with tarfile.open(new_backup, 'w:gz') as zipf:
for root, dirs, files in os.walk(self.db_path):
for zfilename in files:
zipf.add(os.path.join(root, zfilename),
arcname='database/%s/%s' % (
self.name,
os.path.join(root[len(self.db_path) + 1:], zfilename))
)
def compact(self, try_repair=True, **kwargs):
# Removing left over compact files
for f in os.listdir(self.db.path):
for x in ['_compact_buck', '_compact_stor']:
if f[-len(x):] == x:
os.unlink(os.path.join(self.db.path, f))
try:
start = time.time()
size = float(self.db.get_db_details().get('size', 0))
sickrage.app.log.info(
'Compacting {} database, current size: {}MB'.format(self.name, round(size / 1048576, 2)))
self.db.compact()
new_size = float(self.db.get_db_details().get('size', 0))
sickrage.app.log.info(
'Done compacting {} database in {}s, new size: {}MB, saved: {}MB'.format(
self.name, round(time.time() - start, 2),
round(new_size / 1048576, 2), round((size - new_size) / 1048576, 2))
)
except (IndexException, AttributeError, TypeError) as e:
if try_repair:
sickrage.app.log.debug('Something wrong with indexes, trying repair')
# Remove all indexes
old_indexes = self._indexes.keys()
for index_name in old_indexes:
try:
self.db.destroy_index(index_name)
except IndexNotFoundException:
pass
except:
sickrage.app.log.debug('Failed removing old index %s', index_name)
# Add them again
for index_name in self._indexes:
try:
self.db.add_index(self._indexes[index_name](self.db.path, index_name))
self.db.reindex_index(index_name)
except IndexConflict:
pass
except:
sickrage.app.log.debug('Failed adding index %s', index_name)
raise
self.compact(try_repair=False)
else:
sickrage.app.log.debug('Failed compact: {}'.format(traceback.format_exc()))
except:
sickrage.app.log.debug('Failed compact: {}'.format(traceback.format_exc()))
def setup_indexes(self):
# setup database indexes
for index_name in self._indexes:
try:
# Make sure store and bucket don't exist
exists = []
for x in ['buck', 'stor']:
full_path = os.path.join(self.db.path, '%s_%s' % (index_name, x))
if os.path.exists(full_path):
exists.append(full_path)
if index_name not in self.db.indexes_names:
# Remove existing buckets if index isn't there
for x in exists:
os.unlink(x)
self.db.add_index(self._indexes[index_name](self.db.path, index_name))
self.db.reindex_index(index_name)
else:
# Previous info
previous_version = self.db.indexes_names[index_name]._version
current_version = self._indexes[index_name]._version
self.check_versions(index_name, current_version, previous_version)
except:
sickrage.app.log.debug('Failed adding index {}'.format(index_name))
def check_versions(self, index_name, current_version, previous_version):
# Only edit index if versions are different
if previous_version < current_version:
self.db.destroy_index(self.db.indexes_names[index_name])
self.db.add_index(self._indexes[index_name](self.db.path, index_name))
self.db.reindex_index(index_name)
def open(self):
self.db.open()
def close(self):
self.db.close()
def upgrade(self):
pass
def cleanup(self):
pass
@property
def version(self):
try:
dbData = list(self.all('version'))[-1]
except IndexError:
dbData = {
'_t': 'version',
'database_version': 1
}
dbData.update(self.insert(dbData))
return dbData['database_version']
@property
def opened(self):
return self.db.opened
def check_integrity(self):
for index_name in self._indexes:
sickrage.app.log.debug('Checking data integrity for index {}'.format(index_name))
data = []
failed = False
# check integrity of index data
for x in self.db.all(index_name):
try:
data += [self.db.get('id', x.get('_id'))]
except Exception:
failed = True
# check if we failed integrity check, if so then destroy index
if failed and index_name in self.db.indexes_names:
self.db.destroy_index(self.db.indexes_names[index_name])
# check if index exists, if not then add it
if index_name not in self.db.indexes_names:
self.db.add_index(self._indexes[index_name](self.db.path, index_name))
# rebuild index if failed
if failed:
sickrage.app.log.debug('Failed data integrity check, rebuilding index {}'.format(index_name))
for x in data:
del x['_id'], x['_rev']
self.insert(x)
# cleanup
del data
def migrate(self):
if os.path.isfile(self.old_db_path):
sickrage.app.log.info('=' * 30)
sickrage.app.log.info('Migrating %s database, please wait...', self.name)
migrate_start = time.time()
import sqlite3
conn = sqlite3.connect(self.old_db_path)
conn.text_factory = lambda x: (x.decode('utf-8', 'ignore'))
migrate_data = {}
rename_old = False
try:
c = conn.cursor()
for ml in self._migrate_list:
migrate_data[ml] = {}
rows = self._migrate_list[ml]
try:
c.execute('SELECT {} FROM `{}`'.format('`' + '`,`'.join(rows) + '`', ml))
except:
# ignore faulty destination_id database
rename_old = True
raise
for p in c.fetchall():
columns = {}
for row in self._migrate_list[ml]:
columns[row] = p[rows.index(row)]
if not migrate_data[ml].get(p[0]):
migrate_data[ml][p[0]] = columns
else:
if not isinstance(migrate_data[ml][p[0]], list):
migrate_data[ml][p[0]] = [migrate_data[ml][p[0]]]
migrate_data[ml][p[0]].append(columns)
sickrage.app.log.info('Getting data took %s', (time.time() - migrate_start))
if not self.db.opened:
return
for t_name in migrate_data:
t_data = migrate_data.get(t_name, {})
sickrage.app.log.info('Importing %s %s' % (len(t_data), t_name))
for k, v in t_data.items():
if isinstance(v, list):
for d in v:
d.update({'_t': t_name})
self.insert(d)
else:
v.update({'_t': t_name})
self.insert(v)
sickrage.app.log.info('Total migration took %s', (time.time() - migrate_start))
sickrage.app.log.info('=' * 30)
rename_old = True
except OperationalError:
sickrage.app.log.debug('Migrating from unsupported/corrupt %s database version', self.name)
rename_old = True
except:
sickrage.app.log.debug('Migration of %s database failed', self.name)
finally:
conn.close()
# rename old database
if rename_old:
random = randomString()
sickrage.app.log.info('Renaming old database to %s.%s_old' % (self.old_db_path, random))
os.rename(self.old_db_path, '{}.{}_old'.format(self.old_db_path, random))
if os.path.isfile(self.old_db_path + '-wal'):
os.rename(self.old_db_path + '-wal', '{}-wal.{}_old'.format(self.old_db_path, random))
if os.path.isfile(self.old_db_path + '-shm'):
os.rename(self.old_db_path + '-shm', '{}-shm.{}_old'.format(self.old_db_path, random))
def delete_corrupted(self, _id, traceback_error=''):
try:
sickrage.app.log.debug('Deleted corrupted document "{}": {}'.format(_id, traceback_error))
corrupted = self.db.get('id', _id, with_storage=False)
self.db._delete_id_index(corrupted.get('_id'), corrupted.get('_rev'), None)
except:
sickrage.app.log.debug('Failed deleting corrupted: {}'.format(traceback.format_exc()))
def all(self, *args, **kwargs):
with_doc = kwargs.pop('with_doc', True)
for data in self.db.all(*args, **kwargs):
if with_doc:
try:
doc = self.db.get('id', data['_id'])
yield doc
except (RecordDeleted, RecordNotFound):
sickrage.app.log.debug('Record not found, skipping: {}'.format(data['_id']))
except (ValueError, EOFError):
self.delete_corrupted(data.get('_id'), traceback_error=traceback.format_exc(0))
else:
yield data
def get_many(self, *args, **kwargs):
with_doc = kwargs.pop('with_doc', True)
for data in self.db.get_many(*args, **kwargs):
if with_doc:
try:
doc = self.db.get('id', data['_id'])
yield doc
except (RecordDeleted, RecordNotFound):
sickrage.app.log.debug('Record not found, skipping: {}'.format(data['_id']))
except (ValueError, EOFError):
self.delete_corrupted(data.get('_id'), traceback_error=traceback.format_exc(0))
else:
yield data
def get(self, *args, **kwargs):
try:
x = self.db.get(with_doc=kwargs.get('with_doc', True), *args, **kwargs)
return x.get('doc', x)
except (RecordDeleted, RecordNotFound):
pass
def delete(self, *args):
return self.db.delete(*args)
def update(self, *args):
return self.db.update(*args)
def insert(self, *args):
return self.db.insert(*args)
def delete_all(self):
for index_name in self.db.indexes_names.keys():
for x in self.all(index_name):
try:
self.delete(x)
except:
continue
def backup(self, backup_file=None):
sickrage.app.log.info('Backing up {} database to {}'.format(self.name, backup_file))
with io.open(backup_file, 'wb') as f:
rows = []
for index_name in self.db.indexes_names.keys():
if index_name in ['id']:
continue
for row in self.all(index_name):
for x in ['_rev', '_id']:
del row[x]
rows += [row]
pickle.dump(rows, f)
del rows
return backup_file
def restore(self, restore_file=None):
backup_file = os.path.join(sickrage.app.data_dir, '{}_{}.codernitydb.bak'.format(self.name,
datetime.datetime.now().strftime(
'%Y%m%d_%H%M%S')))
if os.path.exists(restore_file):
self.backup(backup_file)
sickrage.app.log.info('Restoring database file {}'.format(restore_file))
with io.open(restore_file, 'rb') as f:
rows = pickle.load(f)
if self.db.exists():
if not self.opened:
self.db.open()
self.db.destroy()
self.db.create()
[self.insert(row) for row in rows]
del rows
# Monkey-Patch storage to suppress logging messages
IU_Storage.get = Custom_IU_Storage_get
|
gpl-3.0
|
mbayon/TFG-MachineLearning
|
venv/lib/python3.6/site-packages/sklearn/neighbors/lof.py
|
3
|
12184
|
# Authors: Nicolas Goix <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from warnings import warn
from scipy.stats import scoreatpercentile
from .base import NeighborsBase
from .base import KNeighborsMixin
from .base import UnsupervisedMixin
from ..utils.validation import check_is_fitted
from ..utils import check_array
__all__ = ["LocalOutlierFactor"]
class LocalOutlierFactor(NeighborsBase, KNeighborsMixin, UnsupervisedMixin):
"""Unsupervised Outlier Detection using Local Outlier Factor (LOF)
The anomaly score of each sample is called Local Outlier Factor.
It measures the local deviation of density of a given sample with
respect to its neighbors.
It is local in that the anomaly score depends on how isolated the object
is with respect to the surrounding neighborhood.
More precisely, locality is given by k-nearest neighbors, whose distance
is used to estimate the local density.
By comparing the local density of a sample to the local densities of
its neighbors, one can identify samples that have a substantially lower
density than their neighbors. These are considered outliers.
Parameters
----------
n_neighbors : int, optional (default=20)
Number of neighbors to use by default for :meth:`kneighbors` queries.
If n_neighbors is larger than the number of samples provided,
all samples will be used.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default=30)
Leaf size passed to :class:`BallTree` or :class:`KDTree`. This can
affect the speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or callable, default 'minkowski'
metric used for the distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If 'precomputed', the training input X is expected to be a distance
matrix.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics:
http://docs.scipy.org/doc/scipy/reference/spatial.distance.html
p : integer, optional (default=2)
Parameter for the Minkowski metric from
:func:`sklearn.metrics.pairwise.pairwise_distances`. When p = 1, this
is equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default=None)
Additional keyword arguments for the metric function.
contamination : float in (0., 0.5), optional (default=0.1)
The amount of contamination of the data set, i.e. the proportion
of outliers in the data set. When fitting this is used to define the
threshold on the decision function.
n_jobs : int, optional (default=1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Affects only :meth:`kneighbors` and :meth:`kneighbors_graph` methods.
Attributes
----------
negative_outlier_factor_ : numpy array, shape (n_samples,)
The opposite LOF of the training samples. The lower, the more abnormal.
Inliers tend to have a LOF score close to 1, while outliers tend
to have a larger LOF score.
The local outlier factor (LOF) of a sample captures its
supposed 'degree of abnormality'.
It is the average of the ratio of the local reachability density of
a sample and those of its k-nearest neighbors.
n_neighbors_ : integer
The actual number of neighbors used for :meth:`kneighbors` queries.
References
----------
.. [1] Breunig, M. M., Kriegel, H. P., Ng, R. T., & Sander, J. (2000, May).
LOF: identifying density-based local outliers. In ACM sigmod record.
"""
def __init__(self, n_neighbors=20, algorithm='auto', leaf_size=30,
metric='minkowski', p=2, metric_params=None,
contamination=0.1, n_jobs=1):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs)
self.contamination = contamination
def fit_predict(self, X, y=None):
""""Fits the model to the training set X and returns the labels
(1 inlier, -1 outlier) on the training set according to the LOF score
and the contamination parameter.
Parameters
----------
X : array-like, shape (n_samples, n_features), default=None
The query sample or samples to compute the Local Outlier Factor
w.r.t. to the training samples.
Returns
-------
is_inlier : array, shape (n_samples,)
Returns -1 for anomalies/outliers and 1 for inliers.
"""
return self.fit(X)._predict()
def fit(self, X, y=None):
"""Fit the model using X as training data.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
Returns
-------
self : object
Returns self.
"""
if not (0. < self.contamination <= .5):
raise ValueError("contamination must be in (0, 0.5]")
super(LocalOutlierFactor, self).fit(X)
n_samples = self._fit_X.shape[0]
if self.n_neighbors > n_samples:
warn("n_neighbors (%s) is greater than the "
"total number of samples (%s). n_neighbors "
"will be set to (n_samples - 1) for estimation."
% (self.n_neighbors, n_samples))
self.n_neighbors_ = max(1, min(self.n_neighbors, n_samples - 1))
self._distances_fit_X_, _neighbors_indices_fit_X_ = (
self.kneighbors(None, n_neighbors=self.n_neighbors_))
self._lrd = self._local_reachability_density(
self._distances_fit_X_, _neighbors_indices_fit_X_)
# Compute lof score over training samples to define threshold_:
lrd_ratios_array = (self._lrd[_neighbors_indices_fit_X_] /
self._lrd[:, np.newaxis])
self.negative_outlier_factor_ = -np.mean(lrd_ratios_array, axis=1)
self.threshold_ = -scoreatpercentile(
-self.negative_outlier_factor_, 100. * (1. - self.contamination))
return self
def _predict(self, X=None):
"""Predict the labels (1 inlier, -1 outlier) of X according to LOF.
If X is None, returns the same as fit_predict(X_train).
This method allows to generalize prediction to new observations (not
in the training set). As LOF originally does not deal with new data,
this method is kept private.
Parameters
----------
X : array-like, shape (n_samples, n_features), default=None
The query sample or samples to compute the Local Outlier Factor
w.r.t. to the training samples. If None, makes prediction on the
training data without considering them as their own neighbors.
Returns
-------
is_inlier : array, shape (n_samples,)
Returns -1 for anomalies/outliers and +1 for inliers.
"""
check_is_fitted(self, ["threshold_", "negative_outlier_factor_",
"n_neighbors_", "_distances_fit_X_"])
if X is not None:
X = check_array(X, accept_sparse='csr')
is_inlier = np.ones(X.shape[0], dtype=int)
is_inlier[self._decision_function(X) <= self.threshold_] = -1
else:
is_inlier = np.ones(self._fit_X.shape[0], dtype=int)
is_inlier[self.negative_outlier_factor_ <= self.threshold_] = -1
return is_inlier
def _decision_function(self, X):
"""Opposite of the Local Outlier Factor of X (as bigger is better,
i.e. large values correspond to inliers).
The argument X is supposed to contain *new data*: if X contains a
point from training, it consider the later in its own neighborhood.
Also, the samples in X are not considered in the neighborhood of any
point.
The decision function on training data is available by considering the
opposite of the negative_outlier_factor_ attribute.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The query sample or samples to compute the Local Outlier Factor
w.r.t. the training samples.
Returns
-------
opposite_lof_scores : array, shape (n_samples,)
The opposite of the Local Outlier Factor of each input samples.
The lower, the more abnormal.
"""
check_is_fitted(self, ["threshold_", "negative_outlier_factor_",
"_distances_fit_X_"])
X = check_array(X, accept_sparse='csr')
distances_X, neighbors_indices_X = (
self.kneighbors(X, n_neighbors=self.n_neighbors_))
X_lrd = self._local_reachability_density(distances_X,
neighbors_indices_X)
lrd_ratios_array = (self._lrd[neighbors_indices_X] /
X_lrd[:, np.newaxis])
# as bigger is better:
return -np.mean(lrd_ratios_array, axis=1)
def _local_reachability_density(self, distances_X, neighbors_indices):
"""The local reachability density (LRD)
The LRD of a sample is the inverse of the average reachability
distance of its k-nearest neighbors.
Parameters
----------
distances_X : array, shape (n_query, self.n_neighbors)
Distances to the neighbors (in the training samples `self._fit_X`)
of each query point to compute the LRD.
neighbors_indices : array, shape (n_query, self.n_neighbors)
Neighbors indices (of each query point) among training samples
self._fit_X.
Returns
-------
local_reachability_density : array, shape (n_samples,)
The local reachability density of each sample.
"""
dist_k = self._distances_fit_X_[neighbors_indices,
self.n_neighbors_ - 1]
reach_dist_array = np.maximum(distances_X, dist_k)
# 1e-10 to avoid `nan' when nb of duplicates > n_neighbors_:
return 1. / (np.mean(reach_dist_array, axis=1) + 1e-10)
|
mit
|
edowson/director
|
src/python/ddapp/transformUtils.py
|
1
|
5794
|
import vtkAll as vtk
#from ddapp import botpy
import math
import numpy as np
#import drc as lcmdrc
def getTransformFromNumpy(mat):
'''
Given a numpy 4x4 array, return a vtkTransform.
'''
m = vtk.vtkMatrix4x4()
for r in xrange(4):
for c in xrange(4):
m.SetElement(r, c, mat[r][c])
t = vtk.vtkTransform()
t.SetMatrix(m)
return t
def getNumpyFromTransform(transform):
'''
Given a vtkTransform, return a numpy 4x4 array
'''
mat = transform.GetMatrix()
a = np.zeros((4,4))
for r in xrange(4):
for c in xrange(4):
a[r][c] = mat.GetElement(r, c)
return a
def getTransformFromAxes(xaxis, yaxis, zaxis):
t = vtk.vtkTransform()
m = vtk.vtkMatrix4x4()
axes = np.array([xaxis, yaxis, zaxis]).transpose().copy()
vtk.vtkMath.Orthogonalize3x3(axes, axes)
for r in xrange(3):
for c in xrange(3):
m.SetElement(r, c, axes[r][c])
t.SetMatrix(m)
return t
def getTransformFromAxesAndOrigin(xaxis, yaxis, zaxis, origin):
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(origin)
return t
def getAxesFromTransform(t):
xaxis = np.array(t.TransformNormal(1,0,0))
yaxis = np.array(t.TransformNormal(0,1,0))
zaxis = np.array(t.TransformNormal(0,0,1))
return xaxis, yaxis, zaxis
def concatenateTransforms(transformList):
'''
Given a list of vtkTransform objects, returns a new vtkTransform
which is a concatenation of the whole list using vtk post multiply.
See documentation for vtkTransform::PostMultiply.
'''
result = vtk.vtkTransform()
result.PostMultiply()
for t in transformList:
result.Concatenate(t)
return result
def findTransformAxis(transform, referenceVector):
'''
Given a vtkTransform and a reference vector, find a +/- axis of the transform
that most closely matches the reference vector. Returns the matching axis
index, axis, and sign.
'''
refAxis = referenceVector / np.linalg.norm(referenceVector)
axes = getAxesFromTransform(transform)
axisProjections = np.array([np.abs(np.dot(axis, refAxis)) for axis in axes])
matchIndex = axisProjections.argmax()
matchAxis = axes[matchIndex]
matchSign = np.sign(np.dot(matchAxis, refAxis))
return matchIndex, matchAxis, matchSign
def getTransformFromOriginAndNormal(origin, normal, normalAxis=2):
normal = np.array(normal)
normal /= np.linalg.norm(normal)
axes = [[0,0,0],
[0,0,0],
[0,0,0]]
axes[normalAxis] = normal
vtk.vtkMath.Perpendiculars(axes[normalAxis], axes[(normalAxis+1) % 3], axes[(normalAxis+2) % 3], 0)
t = getTransformFromAxes(*axes)
t.PostMultiply()
t.Translate(origin)
return t
def orientationFromNormal(normal):
'''
Creates a frame where the Z axis points in the direction of the given normal.
'''
zaxis = normal
xaxis = [0,0,0]
yaxis = [0,0,0]
vtk.vtkMath.Perpendiculars(zaxis, xaxis, yaxis, 0)
return orientationFromAxes(xaxis, yaxis, zaxis)
def orientationFromAxes(xaxis, yaxis, zaxis):
t = getTransformFromAxes(xaxis, yaxis, zaxis)
return rollPitchYawFromTransform(t)
def rollPitchYawFromTransform(t):
pos, quat = poseFromTransform(t)
return quaternionToRollPitchYaw(quat)
def frameInterpolate(trans_a, trans_b, weight_b):
'''
Interpolate two frames where weight_b=[0,1]
'''
[pos_a, quat_a] = poseFromTransform(trans_a)
[pos_b, quat_b] = poseFromTransform(trans_b)
pos_c = pos_a *(1-weight_b) + pos_b * weight_b;
quat_c = botpy.quat_interpolate(quat_a,quat_b, weight_b)
return transformFromPose(pos_c, quat_c)
def transformFromPose(position, quaternion):
'''
Returns a vtkTransform
'''
rotationMatrix = np.zeros((3,3))
vtk.vtkMath.QuaternionToMatrix3x3(quaternion, rotationMatrix)
mat = np.eye(4)
mat[:3,:3] = rotationMatrix
mat[:3,3] = position
t = vtk.vtkTransform()
t.SetMatrix(mat.flatten())
return t
def poseFromTransform(transform):
'''
Returns position, quaternion
'''
angleAxis = range(4)
transform.GetOrientationWXYZ(angleAxis)
angleAxis[0] = math.radians(angleAxis[0])
pos = transform.GetPosition()
quat = botpy.angle_axis_to_quat(angleAxis[0], angleAxis[1:])
return np.array(pos), np.array(quat)
def frameFromPositionAndRPY(position, rpy):
'''
rpy specified in degrees
'''
rpy = [math.radians(deg) for deg in rpy]
angle, axis = botpy.roll_pitch_yaw_to_angle_axis(rpy)
t = vtk.vtkTransform()
t.PostMultiply()
t.RotateWXYZ(math.degrees(angle), axis)
t.Translate(position)
return t
def rollPitchYawToQuaternion(rpy):
return botpy.roll_pitch_yaw_to_quat(rpy)
def quaternionToRollPitchYaw(quat):
return botpy.quat_to_roll_pitch_yaw(quat)
def frameFromPositionMessage(positionMessage):
'''
Given an lcmdrc.position_t message, returns a vtkTransform
'''
trans = positionMessage.translation
quat = positionMessage.rotation
trans = [trans.x, trans.y, trans.z]
quat = [quat.w, quat.x, quat.y, quat.z]
return transformFromPose(trans, quat)
def positionMessageFromFrame(transform):
'''
Given a vtkTransform, returns an lcmdrc.position_t message
'''
pos, wxyz = poseFromTransform(transform)
trans = lcmdrc.vector_3d_t()
trans.x, trans.y, trans.z = pos
quat = lcmdrc.quaternion_t()
quat.w, quat.x, quat.y, quat.z = wxyz
pose = lcmdrc.position_3d_t()
pose.translation = trans
pose.rotation = quat
return pose
def copyFrame(transform):
t = vtk.vtkTransform()
t.PostMultiply()
t.SetMatrix(transform.GetMatrix())
return t
|
bsd-3-clause
|
fnordjes/mimic_hue
|
hue.py
|
1
|
4037
|
def todict(obj, classkey=None):
'''
Helper function to recursively transform custom classes to python
dictionaries. Taken from here:
http://stackoverflow.com/questions/1036409/recursively-convert-python-object-graph-to-dictionary
'''
if isinstance(obj, dict):
for k in obj.keys():
obj[k] = todict(obj[k], classkey)
return obj
elif hasattr(obj, "__iter__"):
return [todict(v, classkey) for v in obj]
elif hasattr(obj, "__dict__"):
data = dict([(key, todict(value, classkey))
for key, value in obj.__dict__.iteritems()
if not callable(value) and not key.startswith('_')])
if classkey is not None and hasattr(obj, "__class__"):
data[classkey] = obj.__class__.__name__
return data
else:
return obj
#web.header('Content-Type', 'application/json')
class State:
'''
The class State holds the properties of a lamp's state.
See the developers' api description from philips for further information.
'''
def __init__(self):
self.on = False
self.bri = 0
self.hue = 0
self.sat = 0
self.xy = [0.0, 0.0]
self.ct = 0
self.alert = "none"
self.effect = "none"
self.colormode = "hs"
self.reachable = True
class Light:
'''
The class Light holds the properties of a lamp including its State.
'''
def __init__(self):
self.type = "DIY color light"
self.name = "My diy light"
self.modelid = "DIY-1337"
self.swversion = "1337"
self.pointsymbol = {
"1": "none",
"2": "none",
"3": "none",
"4": "none",
"5": "none",
"6": "none",
"7": "none",
"8": "none"
}
self.state = State()
class Group:
'''
Lights can be combined to Groups.
"action" holds the last command that was sent to the group.
'''
def __init__(self):
self.action = {
"on" : True,
"bri": 254,
"hue": 33536,
"sat": 144,
"xy" : [0.0, 0.0],
"ct" : 153,
"effect": "none",
"colormode": "xy"
},
self.lights = {}
self.name = "My Group"
class Config:
'''
The class Config holds the properties of the bridge itself.
'''
def __init__(self):
self.name = "Smartbridge"
self.mac = "b1:6b:00:b5:ba:be"
self.dhcp = True
self.ipaddress = "192.168.1.24:1234"
self.netmask = "255.255.255.0"
self.gateway = "192.168.1.1"
self.proxyaddress = "none"
self.proxyport = 0
self.utc = "1970-01-01T00:00:01"
self.whitelist = {}
self.swversion = "1337"
self.swupdate = {
"updatestate": 0,
"url": "",
"text": "",
"notify": False
}
self.linkbutton = True
self.portalservices = False
class Schedule:
'''
The Schedule might be some timed action that the bridge sends to the lamps.
'''
def __init__(self):
self.name = "My schedule",
self.description = "",
self.command = {
"address": "/api/0/groups/0/action",
"body": {
"on": True
},
"method": "PUT"
}
self.time = "1970-01-01T00:00:00"
class FullState:
'''
THe combination of all the above containers.
'''
def __init__(self):
self.lights = {}
self.groups = {}
self.config = Config()
self.schedules = {}
'''
Create some default values
'''
users = {}
users["hue_hacker"] = {}
users["hue_hacker"]['devicetype'] = "generic device"
users["hue_hacker"]['create date'] = "2013-04-01T11:11:11"
users["hue_hacker"]['last use date'] = "2013-04-01T11:11:11"
light_1 = Light()
light_1.name = "Bedroom light"
full_state = FullState()
full_state.lights["1"] = light_1
|
gpl-2.0
|
Bachmann1234/pytest
|
doc/en/example/nonpython/conftest.py
|
202
|
1337
|
# content of conftest.py
import pytest
def pytest_collect_file(parent, path):
if path.ext == ".yml" and path.basename.startswith("test"):
return YamlFile(path, parent)
class YamlFile(pytest.File):
def collect(self):
import yaml # we need a yaml parser, e.g. PyYAML
raw = yaml.safe_load(self.fspath.open())
for name, spec in raw.items():
yield YamlItem(name, self, spec)
class YamlItem(pytest.Item):
def __init__(self, name, parent, spec):
super(YamlItem, self).__init__(name, parent)
self.spec = spec
def runtest(self):
for name, value in self.spec.items():
# some custom test execution (dumb example follows)
if name != value:
raise YamlException(self, name, value)
def repr_failure(self, excinfo):
""" called when self.runtest() raises an exception. """
if isinstance(excinfo.value, YamlException):
return "\n".join([
"usecase execution failed",
" spec failed: %r: %r" % excinfo.value.args[1:3],
" no further details known at this point."
])
def reportinfo(self):
return self.fspath, 0, "usecase: %s" % self.name
class YamlException(Exception):
""" custom exception for error reporting. """
|
mit
|
si3792/icough
|
server/project/icough/migrations/0001_initial.py
|
1
|
1210
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-25 15:17
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Appointment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time', models.DateTimeField()),
('created', models.DateTimeField(auto_now_add=True)),
('state', models.CharField(choices=[('A', 'Approved'), ('D', 'Declined'), ('P', 'Pending')], max_length=1)),
('doctor', models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='appointment_doctor', to=settings.AUTH_USER_MODEL)),
('patient', models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='appointment_patient', to=settings.AUTH_USER_MODEL)),
],
),
]
|
gpl-3.0
|
firzhugh/photography-feeds
|
node_modules/grunt-jasmine-runner/node_modules/grunt/node_modules/gzip-js/test/runner.py
|
182
|
1600
|
#!/usr/bin/env python
import os
import sys
import shutil
from colorama import Fore
import argparse
import zipTest
import unzipTest
parser = argparse.ArgumentParser(description='Process command-line arguments')
parser.add_argument('--file', '-f', metavar='path/to/file', type=str, nargs='?', help='Path to file to use for test')
parser.add_argument('--level', '-l', metavar='#', type=int, nargs='?', help='Compression level')
parser.add_argument('--no-delete', const=True, default=False, nargs='?', help='Don\'t delete files produced for test')
parser.add_argument('--test', default='both', nargs='?', help='Which test to run (zip, unzip, both)')
args = parser.parse_args()
allPassed = True
outDir = 'test-outs'
# make the test-outs directory
try:
os.mkdir(outDir)
except:
pass
delete = not getattr(args, 'no_delete')
level = getattr(args, 'level')
inFile = getattr(args, 'file')
test = getattr(args, 'test')
if test == 'zip' or test == 'both':
print Fore.CYAN + 'Running zip tests' + Fore.RESET
# if the user specifies a file, only run that test
if inFile != None:
allPassed = zipTest.runTest(inFile, level)
else:
allPassed = zipTest.runAll(level)
if test == 'unzip' or test == 'both':
print Fore.CYAN + 'Running unzip tests' + Fore.RESET
# if the user specifies a file, only run that test
if inFile != None:
allPassed = unzipTest.runTest(inFile, level)
else:
allPassed = unzipTest.runAll(level)
if delete:
shutil.rmtree(outDir)
if allPassed:
print Fore.GREEN + 'All tests passed!' + Fore.RESET
else:
print Fore.RED + 'Automated test failed' + Fore.RESET
sys.exit(1)
|
mit
|
birdland/dlkit-doc
|
dlkit/proxy/rules.py
|
1
|
10099
|
from ..osid import rules as osid_rules
class Proxy(osid_rules.OsidResult):
"""A ``Proxy`` is used to transfer external information from an application server into an OSID Provider."""
def has_authentication(self):
"""Tests if an authentication is available.
:return: ``true`` if an ``Authentication`` is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_authentication(self):
"""Gets the ``Authentication`` for this proxy.
:return: the authentication
:rtype: ``osid.authentication.process.Authentication``
:raise: ``IllegalState`` -- ``has_authentication()`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.authentication.process.Authentication
authentication = property(fget=get_authentication)
def has_effective_agent(self):
"""Tests if an effective agent is available.
:return: ``true`` if an effective agent is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_effective_agent_id(self):
"""Gets the effective ``Agent Id`` for this proxy.
:return: the effective agent ``Id``
:rtype: ``osid.id.Id``
:raise: ``IllegalState`` -- ``has_effective_agent()`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.Id
effective_agent_id = property(fget=get_effective_agent_id)
def get_effective_agent(self):
"""Gets the effective ``Agent`` for this proxy.
:return: the effective agent
:rtype: ``osid.authentication.Agent``
:raise: ``IllegalState`` -- ``has_effective_agent()`` is ``false``
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.authentication.Agent
effective_agent = property(fget=get_effective_agent)
def has_effective_date(self):
"""Tests if an effective date is available.
:return: ``true`` if an effective date is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_effective_date(self):
"""Gets the effective date.
:return: the effective date
:rtype: ``timestamp``
:raise: ``IllegalState`` -- ``has_effective_date()`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # timestamp
effective_date = property(fget=get_effective_date)
def get_effective_clock_rate(self):
"""Gets the rate of the clock.
:return: the rate
:rtype: ``decimal``
:raise: ``IllegalState`` -- ``has_effective_date()`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # decimal
effective_clock_rate = property(fget=get_effective_clock_rate)
def get_locale(self):
"""Gets the locale.
:return: a locale
:rtype: ``osid.locale.Locale``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.locale.Locale
locale = property(fget=get_locale)
def has_format_type(self):
"""Tests if a ``DisplayText`` format ``Type`` is available.
:return: ``true`` if a format type is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_format_type(self):
"""Gets the ``DisplayText`` format ``Type``.
:return: the format ``Type``
:rtype: ``osid.type.Type``
:raise: ``IllegalState`` -- ``has_format_type()`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.Type
format_type = property(fget=get_format_type)
def get_proxy_record(self, proxy_record_type):
"""Gets the proxy record corresponding to the given ``Proxy`` record ``Type``.
This method is used to retrieve an object implementing the
requested record. The ``proxy_record_type`` may be the ``Type``
returned in ``get_record_types()`` or any of its parents in a
``Type`` hierarchy where ``has_record_type(proxy_record_type)``
is ``true`` .
:param proxy_record_type: the type of proxy record to retrieve
:type proxy_record_type: ``osid.type.Type``
:return: the proxy record
:rtype: ``osid.proxy.records.ProxyRecord``
:raise: ``NullArgument`` -- ``proxy_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(proxy_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.proxy.records.ProxyRecord
class ProxyCondition(osid_rules.OsidCondition):
"""A ``ProxyCondition`` is used to transfer external information into a proxy."""
def set_effective_agent_id(self, agent_id):
"""Sets the effective agent ``Id`` to indicate acting on behalf of.
:param agent_id: an agent ``Id``
:type agent_id: ``osid.id.Id``
:raise: ``NullArgument`` -- ``agent_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
effective_agent_id = property(fset=set_effective_agent_id)
def set_effective_date(self, date, rate):
"""Sets the effective date.
:param date: a date
:type date: ``timestamp``
:param rate: the rate at which the clock should tick from the given effective date. 0 is a clock that is fixed
:type rate: ``decimal``
:raise: ``NullArgument`` -- ``date`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
def set_language_type(self, language_type):
"""Sets the language type.
:param language_type: the language type
:type language_type: ``osid.type.Type``
:raise: ``NullArgument`` -- ``language_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
language_type = property(fset=set_language_type)
def set_script_type(self, script_type):
"""Sets the script type.
:param script_type: the script type
:type script_type: ``osid.type.Type``
:raise: ``NullArgument`` -- ``script_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
script_type = property(fset=set_script_type)
def set_calendar_type(self, calendar_type):
"""Sets the calendar type.
:param calendar_type: the calendar type
:type calendar_type: ``osid.type.Type``
:raise: ``NullArgument`` -- ``calendar_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
calendar_type = property(fset=set_calendar_type)
def set_time_type(self, time_type):
"""Sets the time type.
:param time_type: the time type
:type time_type: ``osid.type.Type``
:raise: ``NullArgument`` -- ``time_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
time_type = property(fset=set_time_type)
def set_currency_type(self, currency_type):
"""Sets the currency type.
:param currency_type: the currency type
:type currency_type: ``osid.type.Type``
:raise: ``NullArgument`` -- ``currency_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
currency_type = property(fset=set_currency_type)
def set_unit_system_type(self, unit_system_type):
"""Sets the unit system type.
:param unit_system_type: the unit system type
:type unit_system_type: ``osid.type.Type``
:raise: ``NullArgument`` -- ``unit_system_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
unit_system_type = property(fset=set_unit_system_type)
def set_format_type(self, format_type):
"""Sets the ``DisplayText`` format type.
:param format_type: the format type
:type format_type: ``osid.type.Type``
:raise: ``NullArgument`` -- ``format_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
format_type = property(fset=set_format_type)
def get_proxy_condition_record(self, proxy_condition_type):
"""Gets the proxy condition record corresponding to the given ``Proxy`` record ``Type``.
This method is used to retrieve an object implementing the
requested record. The ``proxy_record_type`` may be the ``Type``
returned in ``get_record_types()`` or any of its parents in a
``Type`` hierarchy where ``has_record_type(proxy_record_type)``
is ``true`` .
:param proxy_condition_type: the type of proxy condition record to retrieve
:type proxy_condition_type: ``osid.type.Type``
:return: the proxy condition record
:rtype: ``osid.proxy.records.ProxyConditionRecord``
:raise: ``NullArgument`` -- ``proxy_condition_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(proxy_condition_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.proxy.records.ProxyConditionRecord
|
mit
|
cing/ChannelAnalysis
|
ChannelAnalysis/PoreAnalysis/Histograms.py
|
1
|
6160
|
#!/usr/bin/python
###############################################################################
#
# Prepares histograms for individual rings of channel atoms based on a user-
# -defined column of the channel atom datafiles.
#
# Example: For 14-column data with this type (described elsewhere):
#
# 1 7.0 0.413 0.373 0.294 0.300 0.282 0.425 0.358 0.246 0.422 0.305 0.392 0.350
# 2 7.0 0.412 0.337 0.280 0.388 0.292 0.419 0.384 0.233 0.469 0.287 0.389 0.301
#
# The following command will load that datafile into memory, strip
# the first 2000 lines and produce a series of histogram datafiles
# or return data that could be plotted accordingly using matplotlib.
#
# python ChannelAtom_Histograms.py -f nav.n7.thr -r 4 -remove 2000
#
# By Chris Ing, 2013 for Python 2.7
#
###############################################################################
from argparse import ArgumentParser
from collections import defaultdict
from numpy import histogram, convolve, ones, mean
from ChannelAnalysis.PoreAnalysis.Preprocessor import *
# a great helper function to iterate over chunks of a list
def chunker(seq, size):
return (seq[pos:pos + size] for pos in xrange(0, len(seq), size))
# a helper method for extracting a timeseries window.
def window(size):
return ones(size)/float(size)
# This returns the sort_column as a time series, useful
# for making scatterplot time series of channel atom positions.
def compute_atom_timeseries(data_lines, sort_col, traj_col,
col_skip=2, num_cols=3, window_size=100,
mean_shift=False):
# These are dictionaries of dict where the key is the traj_number
# and the subdict is ion_number and te value is a LIST of ion positions,
# or associated time values in the case of the associated time_per_traj
atom_pos_per_traj = defaultdict(dict)
time_per_traj = defaultdict(dict)
# First determine the mean displacement for the entire dataset.
if mean_shift:
traj_mean = 0.0
for line in data_lines:
col_blocks = list(chunker(line[col_skip:],num_cols))
traj_mean += mean([block[sort_col] for block in col_blocks])
traj_mean /= len(data_lines)
for line in data_lines:
traj_id = line[traj_col]
for atom_num, atom in enumerate(list(chunker(line[col_skip:],
num_cols))):
sort_val = atom[sort_col]
if mean_shift:
sort_val -= traj_mean
if atom_num not in atom_pos_per_traj[traj_id]:
atom_pos_per_traj[traj_id][atom_num] = [sort_val]
time_per_traj[traj_id][atom_num] = [line[0]]
else:
atom_pos_per_traj[traj_id][atom_num].append(sort_val)
time_per_traj[traj_id][atom_num].append(line[0])
if window_size != None:
for t_id, atoms in atom_pos_per_traj.iteritems():
for a_id, atom_ts in atoms.iteritems():
atom_pos_per_traj[t_id][a_id] = list(convolve(atom_ts,
window(window_size),
'same'))
return (dict(atom_pos_per_traj), dict(time_per_traj))
# Not a complicated function for getting histogrammed data for the sort_col
# in a particular group of data_lines. This does not distinguish between
# any of the residues in the ring, i.e. if one is protonated this will
# be lumped in all together.
def compute_allatom_histogram(data_lines, sort_col,
num_cols=3,
histmin=-1.50, histmax=1.5,
histbins=300, col_skip=2,
normed=True, prefix=None):
# Power datatypes son. The structure is: traj_id -> ion_num -> z_vals
atom_sortvals = []
# These are dictionaries of lists where the key is a coord_col
# and the list is a axial probability or associated z value.
coord_hist_per_atom = defaultdict(list)
z_per_atom = defaultdict(list)
for line in data_lines:
for atom in chunker(line[col_skip:],num_cols):
sort_val = atom[sort_col]
atom_sortvals.append(sort_val)
histo, edges = histogram(atom_sortvals, range=[histmin, histmax],
bins=histbins, normed=normed)
if prefix != None:
with open(prefix+"_allatom","w") as out:
for xval, yval in zip(edges,histo):
out.write(str(xval)+" "+str(yval)+"\n")
coord_hist_per_atom["ALL"].extend(list(histo))
z_per_atom["ALL"].extend(list(edges))
return (dict(coord_hist_per_atom), dict(z_per_atom))
if __name__ == '__main__':
parser = ArgumentParser(
description='This script parses input columnular ASCII data\
of channel atoms and makes it nice and pretty for subsequent analysis.')
parser.add_argument(
'-f', dest='filenames', type=str, nargs="+", required=True,
help='a filename of atom data from MDAnalysis trajectory data')
parser.add_argument(
'-c', dest='num_cols', type=int, default=3,
help='the number of columns per channel atom in the input')
parser.add_argument(
'-cs', dest='col_skip', type=int, default=2,
help='the number of columns per line in input that are not atom data')
parser.add_argument(
'-s', dest='sort_col', type=int, default=2,
help='a zero inclusive column number to pull from each res, typically z')
parser.add_argument(
'-remove', dest='remove_frames', type=int, default=0,
help='this is a number of frames to remove from the start of the data')
args = parser.parse_args()
data_f_processed = process_channelatoms(filenames=args.filenames,
remove_frames=args.remove_frames)
allatom_histo = compute_allatom_histogram(data_f_processed,
args.sort_col,
col_skip=args.col_skip,
num_cols=args.num_cols)
print allatom_histo
|
mit
|
rjschwei/azure-sdk-for-python
|
azure-mgmt-commerce/azure/mgmt/commerce/models/__init__.py
|
1
|
1427
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .info_field import InfoField
from .usage_aggregation import UsageAggregation
from .rate_card_query_parameters import RateCardQueryParameters
from .resource_rate_card_info import ResourceRateCardInfo
from .offer_term_info import OfferTermInfo
from .meter_info import MeterInfo
from .monetary_credit import MonetaryCredit
from .monetary_commitment import MonetaryCommitment
from .recurring_charge import RecurringCharge
from .error_response import ErrorResponse, ErrorResponseException
from .usage_aggregation_paged import UsageAggregationPaged
from .usage_management_client_enums import (
AggregationGranularity,
)
__all__ = [
'InfoField',
'UsageAggregation',
'RateCardQueryParameters',
'ResourceRateCardInfo',
'OfferTermInfo',
'MeterInfo',
'MonetaryCredit',
'MonetaryCommitment',
'RecurringCharge',
'ErrorResponse', 'ErrorResponseException',
'UsageAggregationPaged',
'AggregationGranularity',
]
|
mit
|
dongjoon-hyun/tensorflow
|
tensorflow/python/autograph/pyct/ast_util.py
|
9
|
9284
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AST manipulation utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import gast
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import parser
class CleanCopier(object):
"""NodeTransformer-like visitor that copies an AST."""
def __init__(self, preserve_annos):
super(CleanCopier, self).__init__()
self.preserve_annos = preserve_annos
def copy(self, node):
"""Returns a deep copy of node (excluding some fields, see copy_clean)."""
if isinstance(node, list):
return [self.copy(n) for n in node]
elif isinstance(node, tuple):
return tuple(self.copy(n) for n in node)
elif not isinstance(node, (gast.AST, ast.AST)):
# Assuming everything that's not an AST, list or tuple is a value type
# and may simply be assigned.
return node
assert isinstance(node, (gast.AST, ast.AST))
new_fields = {}
for f in node._fields:
if not f.startswith('__') and hasattr(node, f):
new_fields[f] = self.copy(getattr(node, f))
new_node = type(node)(**new_fields)
if self.preserve_annos:
for k in self.preserve_annos:
anno.copyanno(node, new_node, k)
return new_node
def copy_clean(node, preserve_annos=None):
"""Creates a deep copy of an AST.
The copy will not include fields that are prefixed by '__', with the
exception of user-specified annotations.
Args:
node: ast.AST
preserve_annos: Optional[Set[Hashable]], annotation keys to include in the
copy
Returns:
ast.AST
"""
return CleanCopier(preserve_annos).copy(node)
class SymbolRenamer(gast.NodeTransformer):
"""Transformer that can rename symbols to a simple names."""
def __init__(self, name_map):
self.name_map = name_map
def _process(self, node):
qn = anno.getanno(node, anno.Basic.QN)
if qn in self.name_map:
new_node = gast.Name(str(self.name_map[qn]), node.ctx, None)
# All annotations get carried over.
for k in anno.keys(node):
anno.copyanno(node, new_node, k)
return new_node
return self.generic_visit(node)
def visit_Name(self, node):
return self._process(node)
def visit_Attribute(self, node):
if anno.hasanno(node, anno.Basic.QN):
return self._process(node)
# Attributes of dynamic objects will not have a QN.
return self.generic_visit(node)
def rename_symbols(node, name_map):
"""Renames symbols in an AST. Requires qual_names annotations."""
renamer = SymbolRenamer(name_map)
if isinstance(node, list):
return [renamer.visit(n) for n in node]
elif isinstance(node, tuple):
return tuple(renamer.visit(n) for n in node)
return renamer.visit(node)
def keywords_to_dict(keywords):
"""Converts a list of ast.keyword objects to a dict."""
keys = []
values = []
for kw in keywords:
keys.append(gast.Str(kw.arg))
values.append(kw.value)
return gast.Dict(keys=keys, values=values)
class PatternMatcher(gast.NodeVisitor):
"""Matches a node against a pattern represented by a node."""
def __init__(self, pattern):
self.pattern = pattern
self.pattern_stack = []
self.matches = True
def compare_and_visit(self, node, pattern):
self.pattern_stack.append(self.pattern)
self.pattern = pattern
self.generic_visit(node)
self.pattern = self.pattern_stack.pop()
def no_match(self):
self.matches = False
return False
def is_wildcard(self, p):
if isinstance(p, (list, tuple)) and len(p) == 1:
p, = p
if isinstance(p, gast.Name) and p.id == '_':
return True
if p == '_':
return True
return False
def generic_visit(self, node):
if not self.matches:
return
pattern = self.pattern
for f in node._fields:
if f.startswith('__'):
continue
if not hasattr(node, f):
if hasattr(pattern, f) and getattr(pattern, f):
return self.no_match()
else:
continue
if not hasattr(pattern, f):
return self.no_match()
v = getattr(node, f)
p = getattr(pattern, f)
if self.is_wildcard(p):
continue
if isinstance(v, (list, tuple)):
if not isinstance(p, (list, tuple)) or len(v) != len(p):
return self.no_match()
for v_item, p_item in zip(v, p):
self.compare_and_visit(v_item, p_item)
elif isinstance(v, (gast.AST, ast.AST)):
if not isinstance(v, type(p)) and not isinstance(p, type(v)):
return self.no_match()
self.compare_and_visit(v, p)
else:
# Assume everything else is a value type.
if v != p:
return self.no_match()
def matches(node, pattern):
"""Basic pattern matcher for AST.
The pattern may contain wildcards represented by the symbol '_'. A node
matches a pattern if for every node in the tree, either there is a node of
the same type in pattern, or a Name node with id='_'.
Args:
node: ast.AST
pattern: ast.AST
Returns:
bool
"""
if isinstance(pattern, str):
pattern = parser.parse_expression(pattern)
matcher = PatternMatcher(pattern)
matcher.visit(node)
return matcher.matches
# TODO(mdan): Once we have error tracing, we may be able to just go to SSA.
def apply_to_single_assignments(targets, values, apply_fn):
"""Applies a function to each individual assignment.
This function can process a possibly-unpacked (e.g. a, b = c, d) assignment.
It tries to break down the unpacking if possible. In effect, it has the same
effect as passing the assigned values in SSA form to apply_fn.
Examples:
The following will result in apply_fn(a, c), apply_fn(b, d):
a, b = c, d
The following will result in apply_fn(a, c[0]), apply_fn(b, c[1]):
a, b = c
The following will result in apply_fn(a, (b, c)):
a = b, c
It uses the visitor pattern to allow subclasses to process single
assignments individually.
Args:
targets: Union[List[ast.AST, ...], Tuple[ast.AST, ...], ast.AST, should be
used with the targets field of an ast.Assign node
values: ast.AST
apply_fn: Callable[[ast.AST, ast.AST], None], called with the
respective nodes of each single assignment
"""
if not isinstance(targets, (list, tuple)):
targets = (targets,)
for target in targets:
if isinstance(target, (gast.Tuple, gast.List)):
for i in range(len(target.elts)):
target_el = target.elts[i]
if isinstance(values, (gast.Tuple, gast.List)):
value_el = values.elts[i]
else:
idx = parser.parse_expression(str(i))
value_el = gast.Subscript(values, gast.Index(idx), ctx=gast.Load())
apply_to_single_assignments(target_el, value_el, apply_fn)
else:
apply_fn(target, values)
def parallel_walk(node, other):
"""Walks two ASTs in parallel.
The two trees must have identical structure.
Args:
node: Union[ast.AST, Iterable[ast.AST]]
other: Union[ast.AST, Iterable[ast.AST]]
Yields:
Tuple[ast.AST, ast.AST]
Raises:
ValueError: if the two trees don't have identical structure.
"""
if isinstance(node, (list, tuple)):
node_stack = list(node)
else:
node_stack = [node]
if isinstance(other, (list, tuple)):
other_stack = list(other)
else:
other_stack = [other]
while node_stack and other_stack:
assert len(node_stack) == len(other_stack)
n = node_stack.pop()
o = other_stack.pop()
if (not isinstance(n, (ast.AST, gast.AST)) or
not isinstance(o, (ast.AST, gast.AST)) or
n.__class__.__name__ != o.__class__.__name__):
raise ValueError('inconsistent nodes: {} and {}'.format(n, o))
yield n, o
for f in n._fields:
n_child = getattr(n, f, None)
o_child = getattr(o, f, None)
if f.startswith('__') or n_child is None or o_child is None:
continue
if isinstance(n_child, (list, tuple)):
if (not isinstance(o_child, (list, tuple)) or
len(n_child) != len(o_child)):
raise ValueError(
'inconsistent values for field {}: {} and {}'.format(
f, n_child, o_child))
node_stack.extend(n_child)
other_stack.extend(o_child)
elif isinstance(n_child, (gast.AST, ast.AST)):
node_stack.append(n_child)
other_stack.append(o_child)
elif n_child != o_child:
raise ValueError(
'inconsistent values for field {}: {} and {}'.format(
f, n_child, o_child))
|
apache-2.0
|
crazyAxe/tp-libvirt
|
provider/libvirt_version.py
|
7
|
1753
|
"""
Shared code for tests that need to get the libvirt version
"""
import re
import logging
from virttest import virsh
LIBVIRT_LIB_VERSION = 0
def version_compare(major, minor, update):
"""
Determine/use the current libvirt library version on the system
and compare input major, minor, and update values against it.
If the running version is greater than or equal to the input
params version, then return True; otherwise, return False
This is designed to handle upstream version comparisons for
test adjustments and/or comparisons as a result of upstream
fixes or changes that could impact test results.
:param major: Major version to compare against
:param minor: Minor version to compare against
:param update: Update value to compare against
:return: True if running version is greater than or
equal to the input libvirt version
"""
global LIBVIRT_LIB_VERSION
if LIBVIRT_LIB_VERSION == 0:
try:
regex = r'[Uu]sing\s*[Ll]ibrary:\s*[Ll]ibvirt\s*'
regex += r'(\d+)\.(\d+)\.(\d+)'
lines = virsh.version().stdout.splitlines()
for line in lines:
mobj = re.search(regex, line)
if bool(mobj):
LIBVIRT_LIB_VERSION = int(mobj.group(1)) * 1000000 + \
int(mobj.group(2)) * 1000 + \
int(mobj.group(3))
break
except (ValueError, TypeError, AttributeError):
logging.warning("Error determining libvirt version")
return False
compare_version = major * 1000000 + minor * 1000 + update
if LIBVIRT_LIB_VERSION >= compare_version:
return True
return False
|
gpl-2.0
|
BackupGGCode/python-for-android
|
python3-alpha/python3-src/Lib/test/test_sunau.py
|
66
|
2139
|
from test.support import run_unittest, TESTFN
import unittest
import os
import sunau
nchannels = 2
sampwidth = 2
framerate = 8000
nframes = 100
class SunAUTest(unittest.TestCase):
def setUp(self):
self.f = None
def tearDown(self):
if self.f is not None:
self.f.close()
try:
os.remove(TESTFN)
except OSError:
pass
def test_lin(self):
self.f = sunau.open(TESTFN, 'w')
self.f.setnchannels(nchannels)
self.f.setsampwidth(sampwidth)
self.f.setframerate(framerate)
self.f.setcomptype('NONE', 'not compressed')
output = b'\xff\x00\x12\xcc' * (nframes * nchannels * sampwidth // 4)
self.f.writeframes(output)
self.f.close()
self.f = sunau.open(TESTFN, 'rb')
self.assertEqual(nchannels, self.f.getnchannels())
self.assertEqual(sampwidth, self.f.getsampwidth())
self.assertEqual(framerate, self.f.getframerate())
self.assertEqual(nframes, self.f.getnframes())
self.assertEqual('NONE', self.f.getcomptype())
self.assertEqual(self.f.readframes(nframes), output)
self.f.close()
def test_ulaw(self):
self.f = sunau.open(TESTFN, 'w')
self.f.setnchannels(nchannels)
self.f.setsampwidth(sampwidth)
self.f.setframerate(framerate)
self.f.setcomptype('ULAW', '')
# u-law compression is lossy, therefore we can't expect non-zero data
# to come back unchanged.
output = b'\0' * nframes * nchannels * sampwidth
self.f.writeframes(output)
self.f.close()
self.f = sunau.open(TESTFN, 'rb')
self.assertEqual(nchannels, self.f.getnchannels())
self.assertEqual(sampwidth, self.f.getsampwidth())
self.assertEqual(framerate, self.f.getframerate())
self.assertEqual(nframes, self.f.getnframes())
self.assertEqual('ULAW', self.f.getcomptype())
self.assertEqual(self.f.readframes(nframes), output)
self.f.close()
def test_main():
run_unittest(SunAUTest)
if __name__ == "__main__":
unittest.main()
|
apache-2.0
|
vponomaryov/manila
|
manila/exception.py
|
1
|
25195
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Manila base exception handling.
Includes decorator for re-raising Manila-type exceptions.
SHOULD include dedicated exception logging.
"""
import re
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log
import six
import webob.exc
from manila.i18n import _
from manila.i18n import _LE
LOG = log.getLogger(__name__)
exc_log_opts = [
cfg.BoolOpt('fatal_exception_format_errors',
default=False,
help='Whether to make exception message format errors fatal.'),
]
CONF = cfg.CONF
CONF.register_opts(exc_log_opts)
ProcessExecutionError = processutils.ProcessExecutionError
class ConvertedException(webob.exc.WSGIHTTPException):
def __init__(self, code=400, title="", explanation=""):
self.code = code
self.title = title
self.explanation = explanation
super(ConvertedException, self).__init__()
class Error(Exception):
pass
class ManilaException(Exception):
"""Base Manila Exception
To correctly use this class, inherit from it and define
a 'message' property. That message will get printf'd
with the keyword arguments provided to the constructor.
"""
message = _("An unknown exception occurred.")
code = 500
headers = {}
safe = False
def __init__(self, message=None, detail_data={}, **kwargs):
self.kwargs = kwargs
self.detail_data = detail_data
if 'code' not in self.kwargs:
try:
self.kwargs['code'] = self.code
except AttributeError:
pass
for k, v in self.kwargs.items():
if isinstance(v, Exception):
self.kwargs[k] = six.text_type(v)
if not message:
try:
message = self.message % kwargs
except Exception:
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_LE('Exception in string format operation.'))
for name, value in kwargs.items():
LOG.error(_LE("%(name)s: %(value)s"), {
'name': name, 'value': value})
if CONF.fatal_exception_format_errors:
raise
else:
# at least get the core message out if something happened
message = self.message
elif isinstance(message, Exception):
message = six.text_type(message)
if re.match('.*[^\.]\.\.$', message):
message = message[:-1]
self.msg = message
super(ManilaException, self).__init__(message)
class NetworkException(ManilaException):
message = _("Exception due to network failure.")
class NetworkBindException(ManilaException):
message = _("Exception due to failed port status in binding.")
class NetworkBadConfigurationException(NetworkException):
message = _("Bad network configuration: %(reason)s.")
class BadConfigurationException(ManilaException):
message = _("Bad configuration: %(reason)s.")
class NotAuthorized(ManilaException):
message = _("Not authorized.")
code = 403
class AdminRequired(NotAuthorized):
message = _("User does not have admin privileges.")
class PolicyNotAuthorized(NotAuthorized):
message = _("Policy doesn't allow %(action)s to be performed.")
class Conflict(ManilaException):
message = _("%(err)s")
code = 409
class Invalid(ManilaException):
message = _("Unacceptable parameters.")
code = 400
class InvalidRequest(Invalid):
message = _("The request is invalid.")
class InvalidResults(Invalid):
message = _("The results are invalid.")
class InvalidInput(Invalid):
message = _("Invalid input received: %(reason)s.")
class InvalidContentType(Invalid):
message = _("Invalid content type %(content_type)s.")
class InvalidHost(Invalid):
message = _("Invalid host: %(reason)s")
# Cannot be templated as the error syntax varies.
# msg needs to be constructed when raised.
class InvalidParameterValue(Invalid):
message = _("%(err)s")
class InvalidUUID(Invalid):
message = _("Expected a uuid but received %(uuid)s.")
class InvalidDriverMode(Invalid):
message = _("Invalid driver mode: %(driver_mode)s.")
class InvalidAPIVersionString(Invalid):
message = _("API Version String %(version)s is of invalid format. Must "
"be of format MajorNum.MinorNum.")
class VersionNotFoundForAPIMethod(Invalid):
message = _("API version %(version)s is not supported on this method.")
class InvalidGlobalAPIVersion(Invalid):
message = _("Version %(req_ver)s is not supported by the API. Minimum "
"is %(min_ver)s and maximum is %(max_ver)s.")
class InvalidCapacity(Invalid):
message = _("Invalid capacity: %(name)s = %(value)s.")
class NotFound(ManilaException):
message = _("Resource could not be found.")
code = 404
safe = True
class Found(ManilaException):
message = _("Resource was found.")
code = 302
safe = True
class InUse(ManilaException):
message = _("Resource is in use.")
class AvailabilityZoneNotFound(NotFound):
message = _("Availability zone %(id)s could not be found.")
class ShareNetworkNotFound(NotFound):
message = _("Share network %(share_network_id)s could not be found.")
class ShareServerNotFound(NotFound):
message = _("Share server %(share_server_id)s could not be found.")
class ShareServerNotFoundByFilters(ShareServerNotFound):
message = _("Share server could not be found by "
"filters: %(filters_description)s.")
class ShareServerInUse(InUse):
message = _("Share server %(share_server_id)s is in use.")
class InvalidShareServer(Invalid):
message = _("Share server %(share_server_id)s is not valid.")
class ShareMigrationError(ManilaException):
message = _("Error in share migration: %(reason)s")
class ShareMigrationFailed(ManilaException):
message = _("Share migration failed: %(reason)s")
class ShareDataCopyFailed(ManilaException):
message = _("Share Data copy failed: %(reason)s")
class ShareDataCopyCancelled(ManilaException):
message = _("Copy of contents from share instance %(src_instance)s "
"to share instance %(dest_instance)s was cancelled.")
class ServiceIPNotFound(ManilaException):
message = _("Service IP for instance not found: %(reason)s")
class AdminIPNotFound(ManilaException):
message = _("Admin port IP for service instance not found: %(reason)s")
class ShareServerNotCreated(ManilaException):
message = _("Share server %(share_server_id)s failed on creation.")
class ShareServerNotReady(ManilaException):
message = _("Share server %(share_server_id)s failed to reach '%(state)s' "
"within %(time)s seconds.")
class ServiceNotFound(NotFound):
message = _("Service %(service_id)s could not be found.")
class ServiceIsDown(Invalid):
message = _("Service %(service)s is down.")
class HostNotFound(NotFound):
message = _("Host %(host)s could not be found.")
class SchedulerHostFilterNotFound(NotFound):
message = _("Scheduler host filter %(filter_name)s could not be found.")
class SchedulerHostWeigherNotFound(NotFound):
message = _("Scheduler host weigher %(weigher_name)s could not be found.")
class HostBinaryNotFound(NotFound):
message = _("Could not find binary %(binary)s on host %(host)s.")
class InvalidReservationExpiration(Invalid):
message = _("Invalid reservation expiration %(expire)s.")
class InvalidQuotaValue(Invalid):
message = _("Change would make usage less than 0 for the following "
"resources: %(unders)s.")
class QuotaNotFound(NotFound):
message = _("Quota could not be found.")
class QuotaExists(ManilaException):
message = _("Quota exists for project %(project_id)s, "
"resource %(resource)s.")
class QuotaResourceUnknown(QuotaNotFound):
message = _("Unknown quota resources %(unknown)s.")
class ProjectUserQuotaNotFound(QuotaNotFound):
message = _("Quota for user %(user_id)s in project %(project_id)s "
"could not be found.")
class ProjectQuotaNotFound(QuotaNotFound):
message = _("Quota for project %(project_id)s could not be found.")
class QuotaClassNotFound(QuotaNotFound):
message = _("Quota class %(class_name)s could not be found.")
class QuotaUsageNotFound(QuotaNotFound):
message = _("Quota usage for project %(project_id)s could not be found.")
class ReservationNotFound(QuotaNotFound):
message = _("Quota reservation %(uuid)s could not be found.")
class OverQuota(ManilaException):
message = _("Quota exceeded for resources: %(overs)s.")
class MigrationNotFound(NotFound):
message = _("Migration %(migration_id)s could not be found.")
class MigrationNotFoundByStatus(MigrationNotFound):
message = _("Migration not found for instance %(instance_id)s "
"with status %(status)s.")
class FileNotFound(NotFound):
message = _("File %(file_path)s could not be found.")
class MigrationError(ManilaException):
message = _("Migration error: %(reason)s.")
class MalformedRequestBody(ManilaException):
message = _("Malformed message body: %(reason)s.")
class ConfigNotFound(NotFound):
message = _("Could not find config at %(path)s.")
class PasteAppNotFound(NotFound):
message = _("Could not load paste app '%(name)s' from %(path)s.")
class NoValidHost(ManilaException):
message = _("No valid host was found. %(reason)s.")
class WillNotSchedule(ManilaException):
message = _("Host %(host)s is not up or doesn't exist.")
class QuotaError(ManilaException):
message = _("Quota exceeded: code=%(code)s.")
code = 413
headers = {'Retry-After': 0}
safe = True
class ShareSizeExceedsAvailableQuota(QuotaError):
message = _("Requested share exceeds allowed gigabytes quota.")
class SnapshotSizeExceedsAvailableQuota(QuotaError):
message = _("Requested snapshot exceeds allowed gigabytes quota.")
class ShareLimitExceeded(QuotaError):
message = _("Maximum number of shares allowed (%(allowed)d) exceeded.")
class SnapshotLimitExceeded(QuotaError):
message = _("Maximum number of snapshots allowed (%(allowed)d) exceeded.")
class ShareNetworksLimitExceeded(QuotaError):
message = _("Maximum number of share-networks "
"allowed (%(allowed)d) exceeded.")
class GlusterfsException(ManilaException):
message = _("Unknown Gluster exception.")
class InvalidShare(Invalid):
message = _("Invalid share: %(reason)s.")
class ShareBusyException(Invalid):
message = _("Share is busy with an active task: %(reason)s.")
class InvalidShareInstance(Invalid):
message = _("Invalid share instance: %(reason)s.")
class ManageInvalidShare(InvalidShare):
message = _("Manage existing share failed due to "
"invalid share: %(reason)s")
class UnmanageInvalidShare(InvalidShare):
message = _("Unmanage existing share failed due to "
"invalid share: %(reason)s")
class PortLimitExceeded(QuotaError):
message = _("Maximum number of ports exceeded.")
class ShareAccessExists(ManilaException):
message = _("Share access %(access_type)s:%(access)s exists.")
class ShareSnapshotAccessExists(InvalidInput):
message = _("Share snapshot access %(access_type)s:%(access)s exists.")
class InvalidSnapshotAccess(Invalid):
message = _("Invalid access rule: %(reason)s")
class InvalidShareAccess(Invalid):
message = _("Invalid access rule: %(reason)s")
class InvalidShareAccessLevel(Invalid):
message = _("Invalid or unsupported share access level: %(level)s.")
class ShareBackendException(ManilaException):
message = _("Share backend error: %(msg)s.")
class ExportLocationNotFound(NotFound):
message = _("Export location %(uuid)s could not be found.")
class ShareNotFound(NotFound):
message = _("Share %(share_id)s could not be found.")
class ShareSnapshotNotFound(NotFound):
message = _("Snapshot %(snapshot_id)s could not be found.")
class ShareSnapshotInstanceNotFound(NotFound):
message = _("Snapshot instance %(instance_id)s could not be found.")
class ShareSnapshotNotSupported(ManilaException):
message = _("Share %(share_name)s does not support snapshots.")
class ShareGroupSnapshotNotSupported(ManilaException):
message = _("Share group %(share_group)s does not support snapshots.")
class ShareSnapshotIsBusy(ManilaException):
message = _("Deleting snapshot %(snapshot_name)s that has "
"dependent shares.")
class InvalidShareSnapshot(Invalid):
message = _("Invalid share snapshot: %(reason)s.")
class InvalidShareSnapshotInstance(Invalid):
message = _("Invalid share snapshot instance: %(reason)s.")
class ManageInvalidShareSnapshot(InvalidShareSnapshot):
message = _("Manage existing share snapshot failed due to "
"invalid share snapshot: %(reason)s.")
class UnmanageInvalidShareSnapshot(InvalidShareSnapshot):
message = _("Unmanage existing share snapshot failed due to "
"invalid share snapshot: %(reason)s.")
class ShareMetadataNotFound(NotFound):
message = _("Metadata item is not found.")
class InvalidShareMetadata(Invalid):
message = _("Invalid metadata.")
class InvalidShareMetadataSize(Invalid):
message = _("Invalid metadata size.")
class SecurityServiceNotFound(NotFound):
message = _("Security service %(security_service_id)s could not be found.")
class ShareNetworkSecurityServiceAssociationError(ManilaException):
message = _("Failed to associate share network %(share_network_id)s"
" and security service %(security_service_id)s: %(reason)s.")
class ShareNetworkSecurityServiceDissociationError(ManilaException):
message = _("Failed to dissociate share network %(share_network_id)s"
" and security service %(security_service_id)s: %(reason)s.")
class InvalidVolume(Invalid):
message = _("Invalid volume.")
class InvalidShareType(Invalid):
message = _("Invalid share type: %(reason)s.")
class InvalidShareGroupType(Invalid):
message = _("Invalid share group type: %(reason)s.")
class InvalidExtraSpec(Invalid):
message = _("Invalid extra_spec: %(reason)s.")
class VolumeNotFound(NotFound):
message = _("Volume %(volume_id)s could not be found.")
class VolumeSnapshotNotFound(NotFound):
message = _("Snapshot %(snapshot_id)s could not be found.")
class ShareTypeNotFound(NotFound):
message = _("Share type %(share_type_id)s could not be found.")
class ShareGroupTypeNotFound(NotFound):
message = _("Share group type %(type_id)s could not be found.")
class ShareTypeAccessNotFound(NotFound):
message = _("Share type access not found for %(share_type_id)s / "
"%(project_id)s combination.")
class ShareGroupTypeAccessNotFound(NotFound):
message = _("Share group type access not found for %(type_id)s / "
"%(project_id)s combination.")
class ShareTypeNotFoundByName(ShareTypeNotFound):
message = _("Share type with name %(share_type_name)s "
"could not be found.")
class ShareGroupTypeNotFoundByName(ShareTypeNotFound):
message = _("Share group type with name %(type_name)s "
"could not be found.")
class ShareTypeExtraSpecsNotFound(NotFound):
message = _("Share Type %(share_type_id)s has no extra specs with "
"key %(extra_specs_key)s.")
class ShareGroupTypeSpecsNotFound(NotFound):
message = _("Share group type %(type_id)s has no group specs with "
"key %(specs_key)s.")
class ShareTypeInUse(ManilaException):
message = _("Share Type %(share_type_id)s deletion is not allowed with "
"shares present with the type.")
class IPAddressInUse(InUse):
message = _("IP address %(ip)s is already used.")
class ShareGroupTypeInUse(ManilaException):
message = _("Share group Type %(type_id)s deletion is not allowed "
"with groups present with the type.")
class ShareTypeExists(ManilaException):
message = _("Share Type %(id)s already exists.")
class ShareTypeDoesNotExist(NotFound):
message = _("Share Type %(share_type)s does not exist.")
class ShareGroupTypeExists(ManilaException):
message = _("Share group type %(type_id)s already exists.")
class ShareTypeAccessExists(ManilaException):
message = _("Share type access for %(share_type_id)s / "
"%(project_id)s combination already exists.")
class ShareGroupTypeAccessExists(ManilaException):
message = _("Share group type access for %(type_id)s / "
"%(project_id)s combination already exists.")
class ShareTypeCreateFailed(ManilaException):
message = _("Cannot create share_type with "
"name %(name)s and specs %(extra_specs)s.")
class ShareGroupTypeCreateFailed(ManilaException):
message = _("Cannot create share group type with "
"name %(name)s and specs %(group_specs)s.")
class ManageExistingShareTypeMismatch(ManilaException):
message = _("Manage existing share failed due to share type mismatch: "
"%(reason)s")
class ShareExtendingError(ManilaException):
message = _("Share %(share_id)s could not be extended due to error "
"in the driver: %(reason)s")
class ShareShrinkingError(ManilaException):
message = _("Share %(share_id)s could not be shrunk due to error "
"in the driver: %(reason)s")
class ShareShrinkingPossibleDataLoss(ManilaException):
message = _("Share %(share_id)s could not be shrunk due to "
"possible data loss")
class InstanceNotFound(NotFound):
message = _("Instance %(instance_id)s could not be found.")
class BridgeDoesNotExist(ManilaException):
message = _("Bridge %(bridge)s does not exist.")
class ServiceInstanceException(ManilaException):
message = _("Exception in service instance manager occurred.")
class ServiceInstanceUnavailable(ServiceInstanceException):
message = _("Service instance is not available.")
class StorageResourceException(ManilaException):
message = _("Storage resource exception.")
class StorageResourceNotFound(StorageResourceException):
message = _("Storage resource %(name)s not found.")
code = 404
class SnapshotResourceNotFound(StorageResourceNotFound):
message = _("Snapshot %(name)s not found.")
class SnapshotUnavailable(StorageResourceException):
message = _("Snapshot %(name)s info not available.")
class NetAppException(ManilaException):
message = _("Exception due to NetApp failure.")
class VserverNotFound(NetAppException):
message = _("Vserver %(vserver)s not found.")
class VserverNotSpecified(NetAppException):
message = _("Vserver not specified.")
class EMCVmaxXMLAPIError(Invalid):
message = _("%(err)s")
class EMCVmaxLockRequiredException(ManilaException):
message = _("Unable to acquire lock(s).")
class EMCVmaxInvalidMoverID(ManilaException):
message = _("Invalid mover or vdm %(id)s.")
class EMCVnxXMLAPIError(Invalid):
message = _("%(err)s")
class EMCVnxLockRequiredException(ManilaException):
message = _("Unable to acquire lock(s).")
class EMCVnxInvalidMoverID(ManilaException):
message = _("Invalid mover or vdm %(id)s.")
class EMCUnityError(ShareBackendException):
message = _("%(err)s")
class HPE3ParInvalidClient(Invalid):
message = _("%(err)s")
class HPE3ParInvalid(Invalid):
message = _("%(err)s")
class HPE3ParUnexpectedError(ManilaException):
message = _("%(err)s")
class GPFSException(ManilaException):
message = _("GPFS exception occurred.")
class GPFSGaneshaException(ManilaException):
message = _("GPFS Ganesha exception occurred.")
class GaneshaCommandFailure(ProcessExecutionError):
_description = _("Ganesha management command failed.")
def __init__(self, **kw):
if 'description' not in kw:
kw['description'] = self._description
super(GaneshaCommandFailure, self).__init__(**kw)
class InvalidSqliteDB(Invalid):
message = _("Invalid Sqlite database.")
class SSHException(ManilaException):
message = _("Exception in SSH protocol negotiation or logic.")
class HDFSException(ManilaException):
message = _("HDFS exception occurred!")
class MapRFSException(ManilaException):
message = _("MapRFS exception occurred: %(msg)s")
class ZFSonLinuxException(ManilaException):
message = _("ZFSonLinux exception occurred: %(msg)s")
class QBException(ManilaException):
message = _("Quobyte exception occurred: %(msg)s")
class QBRpcException(ManilaException):
"""Quobyte backend specific exception."""
message = _("Quobyte JsonRpc call to backend raised "
"an exception: %(result)s, Quobyte error"
" code %(qbcode)s")
class SSHInjectionThreat(ManilaException):
message = _("SSH command injection detected: %(command)s")
class HNASBackendException(ManilaException):
message = _("HNAS Backend Exception: %(msg)s")
class HNASConnException(ManilaException):
message = _("HNAS Connection Exception: %(msg)s")
class HNASSSCIsBusy(ManilaException):
message = _("HNAS SSC is busy and cannot execute the command: %(msg)s")
class HNASSSCContextChange(ManilaException):
message = _("HNAS SSC Context has been changed unexpectedly: %(msg)s")
class HNASDirectoryNotEmpty(ManilaException):
message = _("HNAS Directory is not empty: %(msg)s")
class HNASItemNotFoundException(StorageResourceNotFound):
message = _("HNAS Item Not Found Exception: %(msg)s")
class HNASNothingToCloneException(ManilaException):
message = _("HNAS Nothing To Clone Exception: %(msg)s")
# ShareGroup
class ShareGroupNotFound(NotFound):
message = _("Share group %(share_group_id)s could not be found.")
class ShareGroupSnapshotNotFound(NotFound):
message = _(
"Share group snapshot %(share_group_snapshot_id)s could not be found.")
class ShareGroupSnapshotMemberNotFound(NotFound):
message = _("Share group snapshot member %(member_id)s could not be "
"found.")
class InvalidShareGroup(Invalid):
message = _("Invalid share group: %(reason)s")
class InvalidShareGroupSnapshot(Invalid):
message = _("Invalid share group snapshot: %(reason)s")
class DriverNotInitialized(ManilaException):
message = _("Share driver '%(driver)s' not initialized.")
class ShareResourceNotFound(StorageResourceNotFound):
message = _("Share id %(share_id)s could not be found "
"in storage backend.")
class ShareUmountException(ManilaException):
message = _("Failed to unmount share: %(reason)s")
class ShareMountException(ManilaException):
message = _("Failed to mount share: %(reason)s")
class ShareCopyDataException(ManilaException):
message = _("Failed to copy data: %(reason)s")
# Replication
class ReplicationException(ManilaException):
message = _("Unable to perform a replication action: %(reason)s.")
class ShareReplicaNotFound(NotFound):
message = _("Share Replica %(replica_id)s could not be found.")
# Tegile Storage drivers
class TegileAPIException(ShareBackendException):
message = _("Unexpected response from Tegile IntelliFlash API: "
"%(response)s")
class StorageCommunicationException(ShareBackendException):
message = _("Could not communicate with storage array.")
class EvaluatorParseException(ManilaException):
message = _("Error during evaluator parsing: %(reason)s")
# Hitachi Scaleout Platform driver
class HSPBackendException(ShareBackendException):
message = _("HSP Backend Exception: %(msg)s")
class HSPTimeoutException(ShareBackendException):
message = _("HSP Timeout Exception: %(msg)s")
class HSPItemNotFoundException(ShareBackendException):
message = _("HSP Item Not Found Exception: %(msg)s")
class NexentaException(ShareBackendException):
message = _("Exception due to Nexenta failure. %(reason)s")
# Tooz locking
class LockCreationFailed(ManilaException):
message = _('Unable to create lock. Coordination backend not started.')
class LockingFailed(ManilaException):
message = _('Lock acquisition failed.')
|
apache-2.0
|
nhicher/ansible
|
lib/ansible/utils/module_docs_fragments/k8s_name_options.py
|
80
|
2450
|
#
# Copyright 2018 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Options for selecting or identifying a specific K8s object
class ModuleDocFragment(object):
DOCUMENTATION = '''
options:
api_version:
description:
- Use to specify the API version. Use to create, delete, or discover an object without providing a full
resource definition. Use in conjunction with I(kind), I(name), and I(namespace) to identify a
specific object. If I(resource definition) is provided, the I(apiVersion) from the I(resource_definition)
will override this option.
default: v1
aliases:
- api
- version
kind:
description:
- Use to specify an object model. Use to create, delete, or discover an object without providing a full
resource definition. Use in conjunction with I(api_version), I(name), and I(namespace) to identify a
specific object. If I(resource definition) is provided, the I(kind) from the I(resource_definition)
will override this option.
name:
description:
- Use to specify an object name. Use to create, delete, or discover an object without providing a full
resource definition. Use in conjunction with I(api_version), I(kind) and I(namespace) to identify a
specific object. If I(resource definition) is provided, the I(metadata.name) value from the
I(resource_definition) will override this option.
namespace:
description:
- Use to specify an object namespace. Useful when creating, deleting, or discovering an object without
providing a full resource definition. Use in conjunction with I(api_version), I(kind), and I(name)
to identify a specfic object. If I(resource definition) is provided, the I(metadata.namespace) value
from the I(resource_definition) will override this option.
'''
|
gpl-3.0
|
lshain-android-source/external-chromium_org
|
remoting/tools/verify_resources.py
|
23
|
5013
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Verifies that GRD resource files define all the strings used by a given
set of source files. For file formats where it is not possible to infer which
strings represent message identifiers, localized strings should be explicitly
annotated with the string "i18n-content", for example:
LocalizeString(/*i18n-content*/"PRODUCT_NAME");
This script also recognises localized strings in HTML and manifest.json files:
HTML: <span i18n-content="PRODUCT_NAME"></span>
or ...i18n-value-name-1="BUTTON_NAME"...
manifest.json: __MSG_PRODUCT_NAME__
Note that these forms must be exact; extra spaces are not permitted, though
either single or double quotes are recognized.
In addition, the script checks that all the messages are still in use; if
this is not the case then a warning is issued, but the script still succeeds.
"""
import json
import os
import optparse
import re
import sys
import xml.dom.minidom as minidom
WARNING_MESSAGE = """
To remove this warning, either remove the unused tags from
resource files, add the files that use the tags listed above to
remoting.gyp, or annotate existing uses of those tags with the
prefix /*i18n-content*/
"""
def LoadTagsFromGrd(filename):
xml = minidom.parse(filename)
tags = []
msgs_and_structs = xml.getElementsByTagName("message")
msgs_and_structs.extend(xml.getElementsByTagName("structure"))
for res in msgs_and_structs:
name = res.getAttribute("name")
if not name or not name.startswith("IDR_"):
raise Exception("Tag name doesn't start with IDR_: %s" % name)
tags.append(name[4:])
return tags
def ExtractTagFromLine(file_type, line):
"""Extract a tag from a line of HTML, C++, JS or JSON."""
if file_type == "html":
# HTML-style (tags)
m = re.search('i18n-content=[\'"]([^\'"]*)[\'"]', line)
if m: return m.group(1)
# HTML-style (substitutions)
m = re.search('i18n-value-name-[1-9]=[\'"]([^\'"]*)[\'"]', line)
if m: return m.group(1)
elif file_type == 'js':
# Javascript style
m = re.search('/\*i18n-content\*/[\'"]([^\`"]*)[\'"]', line)
if m: return m.group(1)
elif file_type == 'cc' or file_type == 'mm':
# C++ style
m = re.search('IDR_([A-Z0-9_]*)', line)
if m: return m.group(1)
m = re.search('/\*i18n-content\*/["]([^\`"]*)["]', line)
if m: return m.group(1)
elif file_type == 'json':
# Manifest style
m = re.search('__MSG_(.*)__', line)
if m: return m.group(1)
elif file_type == 'jinja2':
# Jinja2 template file
m = re.search('\{\%\s+trans\s+\%\}([A-Z0-9_]+)\{\%\s+endtrans\s+\%\}', line)
if m: return m.group(1)
return None
def VerifyFile(filename, messages, used_tags):
"""
Parse |filename|, looking for tags and report any that are not included in
|messages|. Return True if all tags are present and correct, or False if
any are missing. If no tags are found, print a warning message and return
True.
"""
base_name, extension = os.path.splitext(filename)
extension = extension[1:]
if extension not in ['js', 'cc', 'html', 'json', 'jinja2', 'mm']:
raise Exception("Unknown file type: %s" % extension)
result = True
matches = False
f = open(filename, 'r')
lines = f.readlines()
for i in xrange(0, len(lines)):
tag = ExtractTagFromLine(extension, lines[i])
if tag:
tag = tag.upper()
used_tags.add(tag)
matches = True
if not tag in messages:
result = False
print '%s/%s:%d: error: Undefined tag: %s' % \
(os.getcwd(), filename, i + 1, tag)
if not matches:
print '%s/%s:0: warning: No tags found' % (os.getcwd(), filename)
f.close()
return result
def main():
parser = optparse.OptionParser(
usage='Usage: %prog [options...] [source_file...]')
parser.add_option('-t', '--touch', dest='touch',
help='File to touch when finished.')
parser.add_option('-r', '--grd', dest='grd', action='append',
help='grd file')
options, args = parser.parse_args()
if not options.touch:
print '-t is not specified.'
return 1
if len(options.grd) == 0 or len(args) == 0:
print 'At least one GRD file needs to be specified.'
return 1
resources = []
for f in options.grd:
resources.extend(LoadTagsFromGrd(f))
used_tags = set([])
exit_code = 0
for f in args:
if not VerifyFile(f, resources, used_tags):
exit_code = 1
warnings = False
for tag in resources:
if tag not in used_tags:
print ('%s/%s:0: warning: %s is defined but not used') % \
(os.getcwd(), sys.argv[2], tag)
warnings = True
if warnings:
print WARNING_MESSAGE
if exit_code == 0:
f = open(options.touch, 'a')
f.close()
os.utime(options.touch, None)
return exit_code
if __name__ == '__main__':
sys.exit(main())
|
bsd-3-clause
|
aeph6Ee0/youtube-dl
|
youtube_dl/extractor/iconosquare.py
|
110
|
2981
|
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
get_element_by_id,
remove_end,
)
class IconosquareIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?(?:iconosquare\.com|statigr\.am)/p/(?P<id>[^/]+)'
_TEST = {
'url': 'http://statigr.am/p/522207370455279102_24101272',
'md5': '6eb93b882a3ded7c378ee1d6884b1814',
'info_dict': {
'id': '522207370455279102_24101272',
'ext': 'mp4',
'title': 'Instagram photo by @aguynamedpatrick (Patrick Janelle)',
'description': 'md5:644406a9ec27457ed7aa7a9ebcd4ce3d',
'timestamp': 1376471991,
'upload_date': '20130814',
'uploader': 'aguynamedpatrick',
'uploader_id': '24101272',
'comment_count': int,
'like_count': int,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
media = self._parse_json(
get_element_by_id('mediaJson', webpage),
video_id)
formats = [{
'url': f['url'],
'format_id': format_id,
'width': int_or_none(f.get('width')),
'height': int_or_none(f.get('height'))
} for format_id, f in media['videos'].items()]
self._sort_formats(formats)
title = remove_end(self._og_search_title(webpage), ' - via Iconosquare')
timestamp = int_or_none(media.get('created_time') or media.get('caption', {}).get('created_time'))
description = media.get('caption', {}).get('text')
uploader = media.get('user', {}).get('username')
uploader_id = media.get('user', {}).get('id')
comment_count = int_or_none(media.get('comments', {}).get('count'))
like_count = int_or_none(media.get('likes', {}).get('count'))
thumbnails = [{
'url': t['url'],
'id': thumbnail_id,
'width': int_or_none(t.get('width')),
'height': int_or_none(t.get('height'))
} for thumbnail_id, t in media.get('images', {}).items()]
comments = [{
'id': comment.get('id'),
'text': comment['text'],
'timestamp': int_or_none(comment.get('created_time')),
'author': comment.get('from', {}).get('full_name'),
'author_id': comment.get('from', {}).get('username'),
} for comment in media.get('comments', {}).get('data', []) if 'text' in comment]
return {
'id': video_id,
'title': title,
'description': description,
'thumbnails': thumbnails,
'timestamp': timestamp,
'uploader': uploader,
'uploader_id': uploader_id,
'comment_count': comment_count,
'like_count': like_count,
'formats': formats,
'comments': comments,
}
|
unlicense
|
anirudhvenkats/clowdflows
|
workflows/migrations/0033_auto__add_field_input_order__add_field_output_order.py
|
6
|
16613
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Input.order'
db.add_column('workflows_input', 'order',
self.gf('django.db.models.fields.PositiveIntegerField')(default=1),
keep_default=False)
# Adding field 'Output.order'
db.add_column('workflows_output', 'order',
self.gf('django.db.models.fields.PositiveIntegerField')(default=1),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Input.order'
db.delete_column('workflows_input', 'order')
# Deleting field 'Output.order'
db.delete_column('workflows_output', 'order')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'workflows.abstractinput': {
'Meta': {'ordering': "('order',)", 'object_name': 'AbstractInput'},
'default': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'multi': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'parameter': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parameter_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'variable': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'widget': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'inputs'", 'to': "orm['workflows.AbstractWidget']"})
},
'workflows.abstractoption': {
'Meta': {'ordering': "['name']", 'object_name': 'AbstractOption'},
'abstract_input': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['workflows.AbstractInput']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'value': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'workflows.abstractoutput': {
'Meta': {'ordering': "('order',)", 'object_name': 'AbstractOutput'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'variable': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'widget': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'outputs'", 'to': "orm['workflows.AbstractWidget']"})
},
'workflows.abstractwidget': {
'Meta': {'ordering': "('order', 'name')", 'object_name': 'AbstractWidget'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'widgets'", 'to': "orm['workflows.Category']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'has_progress_bar': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('workflows.thumbs.ThumbnailField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'interaction_view': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'interactive': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'post_interact_action': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'treeview_image': ('workflows.thumbs.ThumbnailField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'widgets'", 'null': 'True', 'to': "orm['auth.User']"}),
'visualization_view': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'wsdl': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'wsdl_method': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'})
},
'workflows.category': {
'Meta': {'ordering': "('order', 'name')", 'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['workflows.Category']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'categories'", 'null': 'True', 'to': "orm['auth.User']"}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'categories'", 'null': 'True', 'to': "orm['workflows.Workflow']"})
},
'workflows.connection': {
'Meta': {'object_name': 'Connection'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'input': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'connections'", 'to': "orm['workflows.Input']"}),
'output': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'connections'", 'to': "orm['workflows.Output']"}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'connections'", 'to': "orm['workflows.Workflow']"})
},
'workflows.input': {
'Meta': {'ordering': "('order',)", 'object_name': 'Input'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inner_output': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'outer_input_rel'", 'null': 'True', 'to': "orm['workflows.Output']"}),
'multi_id': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'outer_output': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'inner_input_rel'", 'null': 'True', 'to': "orm['workflows.Output']"}),
'parameter': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parameter_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'value': ('picklefield.fields.PickledObjectField', [], {'null': 'True'}),
'variable': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'widget': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'inputs'", 'to': "orm['workflows.Widget']"})
},
'workflows.option': {
'Meta': {'ordering': "['name']", 'object_name': 'Option'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'input': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['workflows.Input']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'workflows.output': {
'Meta': {'ordering': "('order',)", 'object_name': 'Output'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inner_input': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'outer_output_rel'", 'null': 'True', 'to': "orm['workflows.Input']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'outer_input': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'inner_output_rel'", 'null': 'True', 'to': "orm['workflows.Input']"}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'value': ('picklefield.fields.PickledObjectField', [], {'null': 'True'}),
'variable': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'widget': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'outputs'", 'to': "orm['workflows.Widget']"})
},
'workflows.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'active_workflow': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'users'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['workflows.Workflow']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'userprofile'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'workflows.widget': {
'Meta': {'object_name': 'Widget'},
'abstract_widget': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'instances'", 'null': 'True', 'to': "orm['workflows.AbstractWidget']"}),
'error': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'finished': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interaction_waiting': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'progress': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'running': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'regular'", 'max_length': '50'}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'widgets'", 'to': "orm['workflows.Workflow']"}),
'x': ('django.db.models.fields.IntegerField', [], {}),
'y': ('django.db.models.fields.IntegerField', [], {})
},
'workflows.workflow': {
'Meta': {'ordering': "['name']", 'object_name': 'Workflow'},
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Untitled workflow'", 'max_length': '200'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'template_parent': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['workflows.Workflow']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'workflows'", 'to': "orm['auth.User']"}),
'widget': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'workflow_link'", 'unique': 'True', 'null': 'True', 'to': "orm['workflows.Widget']"})
}
}
complete_apps = ['workflows']
|
gpl-3.0
|
thuswa/subdms
|
subdms/frontend.py
|
1
|
17223
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# $Id$
# Last modified Wed Jul 7 20:47:07 2010 on stalker
# update count: 1192
#
# subdms - A document management system based on subversion.
# Copyright (C) 2009 Albert Thuswaldner
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# from . import integration # Python 3.X
# from . import lowlevel # Python 3.X
import integration
import lowlevel
""" Front-end classes. """
class project:
def __init__(self):
""" Initialize project class """
self.conf = lowlevel.config()
self.link = lowlevel.linkname()
self.svncmd = lowlevel.svncmd()
def createcategory(self, category):
""" Create category dir in repo. """
self.svncmd.mkdir(self.link.const_caturl(category), "Created a category")
def createproject(self, category, project, projname, doctypes):
""" Create project dir in repo. """
self.svncmd.mkdir(self.link.const_projurl(category, project), \
self.conf.newproj+projname)
self.adddoctypes(category, project, doctypes)
def adddoctypes(self, category, project, doctypes):
""" Add new doctypes. """
for doc in doctypes:
self.svncmd.mkdir(self.link.const_doctypeurl(category, project, doc), \
self.conf.newdoctype+"Added doctype")
################################################################################
class document:
def __init__(self):
""" Initialize document class """
self.cmd = lowlevel.command()
self.conf = lowlevel.config()
self.integ = integration.docinteg()
self.link = lowlevel.linkname()
self.state = docstate()
self.status = docstatus()
self.svncmd = lowlevel.svncmd()
def createdocument(self, createfromurl, docnamelist, doctitle, dockeywords):
"""
Create a document
createfromurl: link in repository to the template or document that
this new document should be based on.
docnamelist: list containing the building blocks of the document name
doctitle: document title string.
"""
docurl=self.link.const_docurl(docnamelist)
docfileurl=self.link.const_docfileurl(docnamelist)
checkoutpath=self.link.const_checkoutpath(docnamelist)
docpath=self.link.const_docpath(docnamelist)
# Create document url in repository
self.svncmd.mkdir(docurl, "Document directory created.")
# Create document from template or existing document
self.svncmd.server_side_copy(createfromurl, docfileurl, \
"Document created")
self.checkout(docnamelist)
# Document integration
if self.integ.dodocinteg(docnamelist):
self.integ.setallfields(docnamelist, doctitle, dockeywords, \
self.getauthor(checkoutpath), \
self.conf.statuslist[0])
# Set document title and commit document
self.settitle(docpath, doctitle)
self.setkeywords(docpath, dockeywords)
self.status.setpreliminary(docpath)
self.svncmd.checkin(docpath, self.conf.newdoc+ \
"Commited document properties")
def adddocument(self, addfilepath, docnamelist, doctitle, dockeywords):
""" Add an existing document. """
docname=self.link.const_docfname(docnamelist)
docurl=self.link.const_docurl(docnamelist)
checkoutpath=self.link.const_checkoutpath(docnamelist)
docpath=self.link.const_docpath(docnamelist)
# Create document url in repository and check it out to workspace
self.svncmd.mkdir(docurl, "Document directory created.")
self.svncmd.checkout(docurl, checkoutpath)
# Copy file to workspace
self.cmd.copyfile(addfilepath, docpath)
self.svncmd.add(docpath)
# Document integration
if self.integ.dodocinteg(docnamelist):
self.integ.setallfields(docnamelist, doctitle, dockeywords, \
self.getauthor(checkoutpath), \
self.conf.statuslist[0])
# Set document title and commit document
self.settitle(docpath, doctitle)
self.setkeywords(docpath, dockeywords)
self.setsvnkeywords(docpath)
self.status.setpreliminary(docpath)
self.svncmd.checkin(docpath, self.conf.newdoc+ \
"Commited document properties.")
def addviewcopy(self, addvcpath, docnamelist):
""" Add a view-copy to an existing document. """
wascheckedout = True
docpath=self.link.const_docpath(docnamelist)
vcpath=self.link.const_viewcopyfilepath(docnamelist)
if not self.state.ischeckedout(docnamelist):
self.checkout(docnamelist)
wascheckedout = False
# Copy file to workspace
self.cmd.copyfile(addvcpath, vcpath)
self.svncmd.add(vcpath)
self.svncmd.checkin(vcpath, self.conf.newdoc+ \
"Commited view-copy")
if not wascheckedout:
self.checkin(docnamelist)
def commit(self, docnamelist, message):
""" Commit changes on file. """
self.svncmd.checkin(self.link.const_docpath(docnamelist), message)
def checkin(self, docnamelist):
""" Check-in file from workspace. """
docname = self.link.const_docname(docnamelist)
message = "Checking in: "+docname
self.commit(docnamelist, "Checked-in")
# Remove file from workspace
self.cmd.rmtree(self.link.const_checkoutpath(docnamelist))
return message
def checkout(self, docnamelist):
""" Check-out file to workspace. """
self.svncmd.checkout(self.link.const_docurl(docnamelist), \
self.link.const_checkoutpath(docnamelist))
# self.client.lock( 'file.txt', 'reason for locking' )
def export(self, docnamelist):
""" Export file to workspace. """
checkoutpath = self.link.const_readonlypath(docnamelist)
docpath = self.link.const_readonlyfilepath(docnamelist)
# self.cmd.rmtree(checkoutpath)
self.svncmd.export(self.link.const_docurl(docnamelist), checkoutpath)
self.cmd.setreadonly(docpath)
def release(self, docnamelist):
""" Release the document. """
current_issue = self.getissueno(docnamelist)
docname = self.link.const_docname(docnamelist)
message = "Release "+docname
if not self.state.ischeckedout(docnamelist):
self.checkout(docnamelist)
# Document integration
if self.integ.dodocinteg(docnamelist):
self.integ.releaseupdate(docnamelist)
# Set status of document to released
self.status.setreleased(self.link.const_docpath(docnamelist))
self.commit(docnamelist, self.conf.release+"Status set to released")
# Remove file from workspace
self.cmd.rmtree(self.link.const_checkoutpath(docnamelist))
# Set previous issue to obsolete
if current_issue > 1:
old_issue = str(current_issue - 1)
old_docnamelist = self.setissueno(docnamelist, old_issue)
self.obsolete(old_docnamelist)
return message
def obsolete(self, docnamelist):
""" Obsolete the document. """
docpath = self.link.const_docpath(docnamelist)
docurl = self.link.const_docurl(docnamelist)
self.svncmd.checkout(docurl, self.link.const_checkoutpath(docnamelist))
self.status.setobsolete(docpath)
message = "Status set to obsolete"
# Document integration
if self.integ.dodocinteg(docnamelist):
self.integ.obsoleteupdate(docnamelist)
self.commit(docnamelist, self.conf.obsolete+"Status set to obsolete")
# Remove file from workspace
self.cmd.rmtree(self.link.const_checkoutpath(docnamelist))
return message
def editdocument(self, docnamelist):
""" Edit the document. """
if not self.state.ischeckedout(docnamelist):
self.checkout(docnamelist)
self.cmd.launch_editor(docnamelist)
def viewdocument(self, docnamelist):
""" View the document. """
self.export(docnamelist)
self.cmd.launch_viewer(docnamelist)
def newissue(self, docnamelist):
""" Create new issue of the document. """
new_issue = str(self.getissueno(docnamelist) + 1)
new_docnamelist = self.setissueno(docnamelist, new_issue)
docname = self.link.const_docname(new_docnamelist)
docurl=self.link.const_docurl(new_docnamelist)
docfileurl=self.link.const_docfileurl(new_docnamelist)
docpath = self.link.const_docpath(new_docnamelist)
checkoutpath=self.link.const_checkoutpath(new_docnamelist)
message = "New issue created."
# Create document url in repository
self.svncmd.mkdir(docurl, "Document directory created")
# Copy issue to new issue
self.svncmd.server_side_copy(self.link.const_docfileurl(docnamelist), \
docfileurl, message)
self.svncmd.checkout(docurl, checkoutpath)
# Document integration
if self.integ.dodocinteg(new_docnamelist):
self.integ.setallfields(new_docnamelist, \
self.gettitle(new_docnamelist), \
self.getkeywords(new_docnamelist), \
self.getauthor(checkoutpath), \
self.conf.statuslist[0])
# Set document status and commit document
self.status.setpreliminary(docpath)
self.svncmd.checkin(docpath, self.conf.newdoc+\
"Commited document properties")
return message
def changetitle(self, docnamelist, doctitle):
""" Change document title. """
wascheckedout = True
docpath = self.link.const_docpath(docnamelist)
if not self.state.ischeckedout(docnamelist):
self.checkout(docnamelist)
wascheckedout = False
# Document integration
if self.integ.dodocinteg(docnamelist):
self.integ.updatetitle(docnamelist, doctitle)
# Set document title and commit document
self.settitle(docpath, doctitle)
self.svncmd.checkin(docpath, self.conf.newtitle+ \
"Changed document title")
if not wascheckedout:
self.checkin(docnamelist)
def changekeywords(self, docnamelist, dockeywords):
""" Change document keywords. """
wascheckedout = True
docpath = self.link.const_docpath(docnamelist)
if not self.state.ischeckedout(docnamelist):
self.checkout(docnamelist)
wascheckedout = False
# Document integration
if self.integ.dodocinteg(docnamelist):
self.integ.updatekeywords(docnamelist, dockeywords)
# Set document keywords and commit document
self.setkeywords(docpath, dockeywords)
self.svncmd.checkin(docpath, self.conf.newkeywords+ \
"Changed document keywords")
if not wascheckedout:
self.checkin(docnamelist)
def getauthor(self, path):
""" Get commit author. """
return self.svncmd.info(path).commit_author
def getdate(self, path):
""" Get commit date. """
return self.svncmd.info(path).commit_time
def getissueno(self, docnamelist):
""" Get document issue number. """
return int(docnamelist[4])
def setissueno(self, docnamelist, issue_no):
""" Set document issue number. """
returnlist = docnamelist[:4]
returnlist.extend([issue_no, docnamelist[5]])
return returnlist
def gettitle(self, docnamelist):
""" Get document title. """
return self.svncmd.propget(self.conf.proplist[0], \
self.link.const_docurl(docnamelist))
def getkeywords(self, docnamelist):
""" Get document keywords. """
return self.svncmd.propget(self.conf.proplist[3], \
self.link.const_docurl(docnamelist))
def settitle(self, docpath, doctitle):
""" Set document title. """
self.svncmd.propset(self.conf.proplist[0], doctitle, docpath)
def setsvnkeywords(self, docpath):
""" Set svn keywords. """
self.svncmd.propset(self.conf.proplist[2], self.conf.svnkeywords, \
docpath)
def setkeywords(self, docpath, dockeywords):
""" Set document keywords. """
self.svncmd.propset(self.conf.proplist[3], dockeywords, docpath)
def reverttohead(self, docnamelist):
""" Revert to head revision undo local changes. """
if self.state.ischeckedout(docnamelist):
if self.state.ismodified(docnamelist):
self.svncmd.revert(self.link.const_docpath(docnamelist))
def undocheckout(self, docnamelist):
""" undo checkout. """
# Remove file from workspace
self.cmd.rmtree(self.link.const_checkoutpath(docnamelist))
def reverttoprerev(self, docnamelist):
""" Revert to previous revision. """
return None #fixme
################################################################################
class docstatus:
def __init__(self):
""" Initialize document status class. """
self.conf = lowlevel.config()
self.link = lowlevel.linkname()
self.svncmd = lowlevel.svncmd()
def getstatus(self, docnamelist):
""" Get document status. """
return self.svncmd.propget(self.conf.proplist[1],
self.link.const_docurl(docnamelist))
def setpreliminary(self, docpath):
""" Set document status to preliminary. """
self.svncmd.propset(self.conf.proplist[1], \
self.conf.statuslist[0], docpath)
def setreleased(self, docpath):
""" Set document status to released. """
self.svncmd.propset(self.conf.proplist[1], self.conf.statuslist[4], \
docpath)
def setobsolete(self, docpath):
""" Set document status to obsolete. """
self.svncmd.propset(self.conf.proplist[1], self.conf.statuslist[5], \
docpath)
def ispreliminary(self, docnamelist):
""" Return true if document is preliminary. """
if self.getstatus(docnamelist) == self.conf.statuslist[0]:
return True
else:
return False
def isreleased(self, docnamelist):
""" Return true if document is released. """
if self.getstatus(docnamelist) == self.conf.statuslist[4]:
return True
else:
return False
def isnotreleased(self, docnamelist):
""" Return true if document is not released. """
return not self.isreleased(docnamelist)
def isobsolete(self, docnamelist):
""" Return true if document is obsolete. """
if self.getstatus(docnamelist) == self.conf.statuslist[5]:
return True
else:
return False
def isreadonly(self, docnamelist):
""" Return true if document status implies read-only. """
if self.isreleased(docnamelist) or self.isobsolete(docnamelist):
return True
else:
return False
################################################################################
class docstate:
def __init__(self):
""" Initialize document state class. """
self.cmd = lowlevel.command()
#self.conf = lowlevel.config()
self.link = lowlevel.linkname()
self.svncmd = lowlevel.svncmd()
def ischeckedout(self, docnamelist):
""" Return true if docname is checked out. """
return self.cmd.workingcopyexists(docnamelist)
def ismodified(self, docnamelist):
""" Return true if docname is modified. """
docpath = self.link.const_docpath(docnamelist)
state = self.svncmd.status(docpath)
if state.text_status == self.svncmd.modified:
return True
else:
return False
def isconflicted(self, docnamelist):
""" Return true if docname is conflicted. """
docpath = self.link.const_docpath(docnamelist)
state = self.svncmd.status(docpath)
if state.text_status == self.svncmd.conflicted:
return True
else:
return False
def getstate(self, docnamelist):
""" Get document state. """
if self.ischeckedout(docnamelist):
return_state = ['O', 'Checked Out']
if self.ismodified(docnamelist):
return_state = ['M', 'Modified']
elif self.isconflicted(docnamelist):
return_state = ['C', 'Conflict']
else:
return_state = ['I', 'Checked In']
return return_state
|
gpl-3.0
|
mhamed/wordtosay
|
sites/all/libraries/fckeditor/editor/filemanager/connectors/py/config.py
|
3
|
7087
|
#!/usr/bin/env python
"""
* FCKeditor - The text editor for Internet - http://www.fckeditor.net
* Copyright (C) 2003-2010 Frederico Caldeira Knabben
*
* == BEGIN LICENSE ==
*
* Licensed under the terms of any of the following licenses at your
* choice:
*
* - GNU General Public License Version 2 or later (the "GPL")
* http://www.gnu.org/licenses/gpl.html
*
* - GNU Lesser General Public License Version 2.1 or later (the "LGPL")
* http://www.gnu.org/licenses/lgpl.html
*
* - Mozilla Public License Version 1.1 or later (the "MPL")
* http://www.mozilla.org/MPL/MPL-1.1.html
*
* == END LICENSE ==
*
* Configuration file for the File Manager Connector for Python
"""
# INSTALLATION NOTE: You must set up your server environment accordingly to run
# python scripts. This connector requires Python 2.4 or greater.
#
# Supported operation modes:
# * WSGI (recommended): You'll need apache + mod_python + modpython_gateway
# or any web server capable of the WSGI python standard
# * Plain Old CGI: Any server capable of running standard python scripts
# (although mod_python is recommended for performance)
# This was the previous connector version operation mode
#
# If you're using Apache web server, replace the htaccess.txt to to .htaccess,
# and set the proper options and paths.
# For WSGI and mod_python, you may need to download modpython_gateway from:
# http://projects.amor.org/misc/svn/modpython_gateway.py and copy it in this
# directory.
# SECURITY: You must explicitly enable this "connector". (Set it to "True").
# WARNING: don't just set "ConfigIsEnabled = True", you must be sure that only
# authenticated users can access this file or use some kind of session checking.
Enabled = False
# Path to user files relative to the document root.
UserFilesPath = '/userfiles/'
# Fill the following value it you prefer to specify the absolute path for the
# user files directory. Useful if you are using a virtual directory, symbolic
# link or alias. Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'.
# Attention: The above 'UserFilesPath' must point to the same directory.
# WARNING: GetRootPath may not work in virtual or mod_python configurations, and
# may not be thread safe. Use this configuration parameter instead.
UserFilesAbsolutePath = ''
# Due to security issues with Apache modules, it is recommended to leave the
# following setting enabled.
ForceSingleExtension = True
# What the user can do with this connector
ConfigAllowedCommands = [ 'QuickUpload', 'FileUpload', 'GetFolders', 'GetFoldersAndFiles', 'CreateFolder' ]
# Allowed Resource Types
ConfigAllowedTypes = ['File', 'Image', 'Flash', 'Media']
# After file is uploaded, sometimes it is required to change its permissions
# so that it was possible to access it at the later time.
# If possible, it is recommended to set more restrictive permissions, like 0755.
# Set to 0 to disable this feature.
# Note: not needed on Windows-based servers.
ChmodOnUpload = 0755
# See comments above.
# Used when creating folders that does not exist.
ChmodOnFolderCreate = 0755
# Do not touch this 3 lines, see "Configuration settings for each Resource Type"
AllowedExtensions = {}; DeniedExtensions = {};
FileTypesPath = {}; FileTypesAbsolutePath = {};
QuickUploadPath = {}; QuickUploadAbsolutePath = {};
# Configuration settings for each Resource Type
#
# - AllowedExtensions: the possible extensions that can be allowed.
# If it is empty then any file type can be uploaded.
# - DeniedExtensions: The extensions that won't be allowed.
# If it is empty then no restrictions are done here.
#
# For a file to be uploaded it has to fulfill both the AllowedExtensions
# and DeniedExtensions (that's it: not being denied) conditions.
#
# - FileTypesPath: the virtual folder relative to the document root where
# these resources will be located.
# Attention: It must start and end with a slash: '/'
#
# - FileTypesAbsolutePath: the physical path to the above folder. It must be
# an absolute path.
# If it's an empty string then it will be autocalculated.
# Useful if you are using a virtual directory, symbolic link or alias.
# Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'.
# Attention: The above 'FileTypesPath' must point to the same directory.
# Attention: It must end with a slash: '/'
#
#
# - QuickUploadPath: the virtual folder relative to the document root where
# these resources will be uploaded using the Upload tab in the resources
# dialogs.
# Attention: It must start and end with a slash: '/'
#
# - QuickUploadAbsolutePath: the physical path to the above folder. It must be
# an absolute path.
# If it's an empty string then it will be autocalculated.
# Useful if you are using a virtual directory, symbolic link or alias.
# Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'.
# Attention: The above 'QuickUploadPath' must point to the same directory.
# Attention: It must end with a slash: '/'
# WARNING: It is recommended to remove swf extension from the list of allowed extensions.
# SWF files can be used to perform XSS attack.
AllowedExtensions['File'] = ['7z','aiff','asf','avi','bmp','csv','doc','fla','flv','gif','gz','gzip','jpeg','jpg','mid','mov','mp3','mp4','mpc','mpeg','mpg','ods','odt','pdf','png','ppt','pxd','qt','ram','rar','rm','rmi','rmvb','rtf','sdc','sitd','swf','sxc','sxw','tar','tgz','tif','tiff','txt','vsd','wav','wma','wmv','xls','xml','zip']
DeniedExtensions['File'] = []
FileTypesPath['File'] = UserFilesPath + 'file/'
FileTypesAbsolutePath['File'] = (not UserFilesAbsolutePath == '') and (UserFilesAbsolutePath + 'file/') or ''
QuickUploadPath['File'] = FileTypesPath['File']
QuickUploadAbsolutePath['File'] = FileTypesAbsolutePath['File']
AllowedExtensions['Image'] = ['bmp','gif','jpeg','jpg','png']
DeniedExtensions['Image'] = []
FileTypesPath['Image'] = UserFilesPath + 'image/'
FileTypesAbsolutePath['Image'] = (not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'image/' or ''
QuickUploadPath['Image'] = FileTypesPath['Image']
QuickUploadAbsolutePath['Image']= FileTypesAbsolutePath['Image']
AllowedExtensions['Flash'] = ['swf','flv']
DeniedExtensions['Flash'] = []
FileTypesPath['Flash'] = UserFilesPath + 'flash/'
FileTypesAbsolutePath['Flash'] = ( not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'flash/' or ''
QuickUploadPath['Flash'] = FileTypesPath['Flash']
QuickUploadAbsolutePath['Flash']= FileTypesAbsolutePath['Flash']
AllowedExtensions['Media'] = ['aiff','asf','avi','bmp','fla', 'flv','gif','jpeg','jpg','mid','mov','mp3','mp4','mpc','mpeg','mpg','png','qt','ram','rm','rmi','rmvb','swf','tif','tiff','wav','wma','wmv']
DeniedExtensions['Media'] = []
FileTypesPath['Media'] = UserFilesPath + 'media/'
FileTypesAbsolutePath['Media'] = ( not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'media/' or ''
QuickUploadPath['Media'] = FileTypesPath['Media']
QuickUploadAbsolutePath['Media']= FileTypesAbsolutePath['Media']
|
gpl-2.0
|
adrian-ionescu/apache-spark
|
examples/src/main/python/mllib/multi_label_metrics_example.py
|
158
|
2277
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# $example on$
from pyspark.mllib.evaluation import MultilabelMetrics
# $example off$
from pyspark import SparkContext
if __name__ == "__main__":
sc = SparkContext(appName="MultiLabelMetricsExample")
# $example on$
scoreAndLabels = sc.parallelize([
([0.0, 1.0], [0.0, 2.0]),
([0.0, 2.0], [0.0, 1.0]),
([], [0.0]),
([2.0], [2.0]),
([2.0, 0.0], [2.0, 0.0]),
([0.0, 1.0, 2.0], [0.0, 1.0]),
([1.0], [1.0, 2.0])])
# Instantiate metrics object
metrics = MultilabelMetrics(scoreAndLabels)
# Summary stats
print("Recall = %s" % metrics.recall())
print("Precision = %s" % metrics.precision())
print("F1 measure = %s" % metrics.f1Measure())
print("Accuracy = %s" % metrics.accuracy)
# Individual label stats
labels = scoreAndLabels.flatMap(lambda x: x[1]).distinct().collect()
for label in labels:
print("Class %s precision = %s" % (label, metrics.precision(label)))
print("Class %s recall = %s" % (label, metrics.recall(label)))
print("Class %s F1 Measure = %s" % (label, metrics.f1Measure(label)))
# Micro stats
print("Micro precision = %s" % metrics.microPrecision)
print("Micro recall = %s" % metrics.microRecall)
print("Micro F1 measure = %s" % metrics.microF1Measure)
# Hamming loss
print("Hamming loss = %s" % metrics.hammingLoss)
# Subset accuracy
print("Subset accuracy = %s" % metrics.subsetAccuracy)
# $example off$
|
apache-2.0
|
steigr/redisrpc
|
python/examples/client.py
|
2
|
1175
|
#!/usr/bin/env python
import argparse
import logging
import traceback
import sys
import redis
# Allow this script to run without installing redisrpc.
sys.path.append('..')
import redisrpc
import calc
# Direct all RedisPRC logging messages to stderr.
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
parser = argparse.ArgumentParser(description='Example calculator server')
parser.add_argument('--transport', choices=('json', 'pickle'), default='json',
help='data encoding used for transport')
args = parser.parse_args()
def do_calculations(calculator):
calculator.clr()
calculator.add(5)
calculator.sub(3)
calculator.mul(4)
calculator.div(2)
assert calculator.val() == 4
try:
calculator.missing_method()
assert False
except (AttributeError, redisrpc.RemoteException):
pass
# 1. Local object
calculator = calc.Calculator()
do_calculations(calculator)
# 2. Remote object, should act like local object
redis_server = redis.Redis()
message_queue = 'calc'
calculator = redisrpc.Client(redis_server, message_queue, timeout=1, transport=args.transport)
do_calculations(calculator)
print('success!')
|
gpl-3.0
|
sql-machine-learning/sqlflow
|
python/runtime/dbapi/pyalisa/config.py
|
1
|
4854
|
# Copyright 2020 The SQLFlow Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import base64
import json
import os
from collections import OrderedDict
from six.moves.urllib.parse import parse_qs, urlparse
class Config(object):
"""Alisa config object, this can be parsed from an alisa dsn
Args:
url(string): a connection url like :
"alisa://user:pwd@host/path?env=AAB&with=XSE".
There are three required params in the url: current_project,
env and with. The env and with params are maps, which is
dumpped to json and then encoded in base64 format, that is:
env=base64(json.dumps({"a":1, "b":2}))
"""
def __init__(self, url=None):
if url:
self._parse_url(url)
def _parse_url(self, url):
urlpts = urlparse(url)
kvs = parse_qs(urlpts.query)
required = ["env", "with", "curr_project"]
for k in required:
if k not in kvs:
raise ValueError("Given dsn does not contain: %s" % k)
# extract the param if it's only has one element
for k, v in kvs.items():
if len(v) == 1:
kvs[k] = v[0]
self.pop_access_id = urlpts.username
self.pop_access_secret = urlpts.password
self.pop_url = urlpts.hostname + urlpts.path
self.pop_scheme = urlpts.scheme
self.env = Config._decode_json_base64(kvs["env"])
self.withs = Config._decode_json_base64(kvs["with"])
self.scheme = kvs["scheme"] or "http"
self.verbose = kvs["verbose"] == "true"
self.curr_project = kvs["curr_project"]
@staticmethod
def _encode_json_base64(env):
# We sort the env params to ensure the consistent encoding
jstr = json.dumps(OrderedDict(env))
b64 = base64.urlsafe_b64encode(jstr.encode("utf8")).decode("utf8")
return b64.rstrip("=")
@staticmethod
def _decode_json_base64(b64env):
padded = b64env + "=" * (len(b64env) % 4)
jstr = base64.urlsafe_b64decode(padded.encode('utf8')).decode("utf8")
return json.loads(jstr)
def to_url(self):
"""Serialize a config to connection url
Returns:
(string) a connection url build from this config
"""
parts = (
self.pop_access_id,
self.pop_access_secret,
self.pop_url,
self.scheme,
"true" if self.verbose else "false",
self.curr_project,
Config._encode_json_base64(self.env),
Config._encode_json_base64(self.withs),
)
return ("alisa://%s:%s@%s?scheme=%s&verbose"
"=%s&curr_project=%s&env=%s&with=%s") % parts
@staticmethod
def from_env():
"""Build a Client from environment variable
Returns:
a Client instance
"""
if not os.getenv("POP_SECRET"):
return None
conf = Config()
conf.pop_url = os.getenv("POP_URL")
conf.pop_access_id = os.getenv("POP_ID")
conf.pop_access_secret = os.getenv("POP_SECRET")
conf.pop_scheme = "http"
conf.verbose = os.getenv("VERBOSE") == "true"
conf.env = {
"SKYNET_ONDUTY": os.getenv("SKYNET_ONDUTY"),
"SKYNET_ACCESSID": os.getenv("SKYNET_ACCESSID"),
"SKYNET_ACCESSKEY": os.getenv("SKYNET_ACCESSKEY"),
"SKYNET_ENDPOINT": os.getenv("SKYNET_ENDPOINT"),
"SKYNET_SYSTEMID": os.getenv("SKYNET_SYSTEMID"),
"SKYNET_PACKAGEID": os.getenv("SKYNET_PACKAGEID"),
"SKYNET_SYSTEM_ENV": os.getenv("SKYNET_SYSTEM_ENV"),
"SKYNET_BIZDATE": os.getenv("SKYNET_BIZDATE"),
"SKYNET_TENANT_ID": os.getenv("SKYNET_TENANT_ID"),
"ALISA_TASK_EXEC_TARGET": os.getenv("ALISA_TASK_EXEC_TARGET"),
}
conf.withs = {
"CustomerId": os.getenv("CustomerId"),
"PluginName": os.getenv("PluginName"),
"Exec": os.getenv("Exec"),
"PluginName4PyODPS": os.getenv("PluginName4PyODPS"),
"Exec4PyODPS": os.getenv("Exec4PyODPS"),
}
conf.curr_project = conf.env["SKYNET_PACKAGEID"]
if len(conf.env["SKYNET_SYSTEMID"]) > 0:
conf.curr_project += "_" + conf.env["SKYNET_SYSTEMID"]
return conf
|
apache-2.0
|
mbareta/edx-platform-ft
|
cms/djangoapps/contentstore/features/upload.py
|
60
|
7456
|
# pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
from lettuce import world, step
from lettuce.django import django_url
from django.conf import settings
import requests
import string
import random
import os
from django.contrib.auth.models import User
from student.models import CourseEnrollment
from nose.tools import assert_equal, assert_not_equal
TEST_ROOT = settings.COMMON_TEST_DATA_ROOT
ASSET_NAMES_CSS = 'td.name-col > span.title > a.filename'
@step(u'I go to the files and uploads page$')
def go_to_uploads(_step):
menu_css = 'li.nav-course-courseware'
uploads_css = 'li.nav-course-courseware-uploads a'
world.css_click(menu_css)
world.css_click(uploads_css)
@step(u'I upload the( test)? file "([^"]*)"$')
def upload_file(_step, is_test_file, file_name, button_text=None):
if button_text:
world.click_link(button_text)
else:
world.click_link('Upload New File')
if not is_test_file:
_write_test_file(file_name, "test file")
# uploading the file itself
path = os.path.join(TEST_ROOT, 'uploads/', file_name)
world.browser.execute_script("$('input.file-input').css('display', 'block')")
world.browser.attach_file('file', os.path.abspath(path))
close_css = 'a.close-button'
world.css_click(close_css)
@step(u'I upload the file "([^"]*)" by clicking "([^"]*)"')
def upload_file_on_button_press(_step, file_name, button_text=None):
upload_file(_step, '', file_name, button_text)
@step(u'I upload the files "([^"]*)"$')
def upload_files(_step, files_string):
# files_string should be comma separated with no spaces.
files = files_string.split(",")
upload_css = 'a.upload-button'
world.css_click(upload_css)
# uploading the files
for filename in files:
_write_test_file(filename, "test file")
path = os.path.join(TEST_ROOT, 'uploads/', filename)
world.browser.execute_script("$('input.file-input').css('display', 'block')")
world.browser.attach_file('file', os.path.abspath(path))
close_css = 'a.close-button'
world.css_click(close_css)
@step(u'I should not see the file "([^"]*)" was uploaded$')
def check_not_there(_step, file_name):
# Either there are no files, or there are files but
# not the one I expect not to exist.
# Since our only test for deletion right now deletes
# the only file that was uploaded, our success criteria
# will be that there are no files.
# In the future we can refactor if necessary.
assert world.is_css_not_present(ASSET_NAMES_CSS)
@step(u'I should see the file "([^"]*)" was uploaded$')
def check_upload(_step, file_name):
index = get_index(file_name)
assert_not_equal(index, -1)
@step(u'The url for the file "([^"]*)" is valid$')
def check_url(_step, file_name):
r = get_file(file_name)
assert_equal(r.status_code, 200)
@step(u'I delete the file "([^"]*)"$')
def delete_file(_step, file_name):
index = get_index(file_name)
assert index != -1
delete_css = "a.remove-asset-button"
world.css_click(delete_css, index=index)
world.confirm_studio_prompt()
@step(u'I should see only one "([^"]*)"$')
def no_duplicate(_step, file_name):
all_names = world.css_find(ASSET_NAMES_CSS)
only_one = False
for i in range(len(all_names)):
if file_name == world.css_html(ASSET_NAMES_CSS, index=i):
only_one = not only_one
assert only_one
@step(u'I can download the correct "([^"]*)" file$')
def check_download(_step, file_name):
path = os.path.join(TEST_ROOT, 'uploads/', file_name)
with open(os.path.abspath(path), 'r') as cur_file:
cur_text = cur_file.read()
r = get_file(file_name)
downloaded_text = r.text
assert cur_text == downloaded_text
# resetting the file back to its original state
_write_test_file(file_name, "This is an arbitrary file for testing uploads")
def _write_test_file(file_name, text):
path = os.path.join(TEST_ROOT, 'uploads/', file_name)
# resetting the file back to its original state
with open(os.path.abspath(path), 'w') as cur_file:
cur_file.write(text)
@step(u'I modify "([^"]*)"$')
def modify_upload(_step, file_name):
new_text = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(10))
_write_test_file(file_name, new_text)
@step(u'I upload an asset$')
def upload_an_asset(step):
step.given('I upload the file "asset.html"')
@step(u'I (lock|unlock) the asset$')
def lock_unlock_file(_step, _lock_state):
index = get_index('asset.html')
assert index != -1, 'Expected to find an asset but could not.'
# Warning: this is a misnomer, it really only toggles the
# lock state. TODO: fix it.
lock_css = "input.lock-checkbox"
world.css_find(lock_css)[index].click()
@step(u'the user "([^"]*)" is enrolled in the course$')
def user_foo_is_enrolled_in_the_course(step, name):
world.create_user(name, 'test')
user = User.objects.get(username=name)
course_id = world.scenario_dict['COURSE'].id
CourseEnrollment.enroll(user, course_id)
@step(u'Then the asset is (locked|unlocked)$')
def verify_lock_unlock_file(_step, lock_state):
index = get_index('asset.html')
assert index != -1, 'Expected to find an asset but could not.'
lock_css = "input.lock-checkbox"
checked = world.css_find(lock_css)[index]._element.get_attribute('checked')
assert_equal(lock_state == "locked", bool(checked))
@step(u'I am at the files and upload page of a Studio course')
def at_upload_page(step):
step.given('I have opened a new course in studio')
step.given('I go to the files and uploads page')
@step(u'I have created a course with a (locked|unlocked) asset$')
def open_course_with_locked(step, lock_state):
step.given('I am at the files and upload page of a Studio course')
step.given('I upload the file "asset.html"')
if lock_state == "locked":
step.given('I lock the asset')
step.given('I reload the page')
@step(u'Then the asset is (viewable|protected)$')
def view_asset(_step, status):
asset_loc = world.scenario_dict['COURSE'].id.make_asset_key(asset_type='asset', path='asset.html')
svr_loc = django_url()
asset_url = unicode(asset_loc)
divider = '/'
if asset_url[0] == '/':
divider = ''
url = '{}{}{}'.format(svr_loc, divider, asset_url)
if status == 'viewable':
expected_text = 'test file'
else:
expected_text = 'Unauthorized'
# Note that world.visit would trigger a 403 error instead of displaying "Unauthorized"
# Instead, we can drop back into the selenium driver get command.
world.browser.driver.get(url)
assert_equal(world.css_text('body'), expected_text)
@step('I see a confirmation that the file was deleted$')
def i_see_a_delete_confirmation(_step):
alert_css = '#notification-confirmation'
assert world.is_css_present(alert_css)
def get_index(file_name):
all_names = world.css_find(ASSET_NAMES_CSS)
for i in range(len(all_names)):
if file_name == world.css_html(ASSET_NAMES_CSS, index=i):
return i
return -1
def get_file(file_name):
index = get_index(file_name)
assert index != -1
url_css = 'a.filename'
def get_url():
return world.css_find(url_css)[index]._element.get_attribute('href')
url = world.retry_on_exception(get_url)
return requests.get(url)
|
agpl-3.0
|
vrenaville/ngo-addons-backport
|
addons/sale_order_dates/__openerp__.py
|
69
|
1812
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Dates on Sales Order',
'version': '1.1',
'category': 'Sales Management',
'description': """
Add additional date information to the sales order.
===================================================
You can add the following additional dates to a sales order:
------------------------------------------------------------
* Requested Date (will be used as the expected date on pickings)
* Commitment Date
* Effective Date
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'images': ['images/sale_order_dates.jpeg'],
'depends': ['sale_stock'],
'data': ['sale_order_dates_view.xml'],
'demo': [],
'test': ['test/requested_date.yml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
jmanuel1/GitSavvy
|
core/commands/custom.py
|
3
|
1592
|
import sublime
import threading
from sublime_plugin import WindowCommand
from ..git_command import GitCommand
from ...common import util
ALL_REMOTES = "All remotes."
class CustomCommandThread(threading.Thread):
def __init__(self, func, *args, **kwargs):
super(CustomCommandThread, self).__init__(**kwargs)
self.cmd_args = args
self.cmd_func = func
self.daemon = True
def run(self):
return self.cmd_func(*self.cmd_args)
class GsCustomCommand(WindowCommand, GitCommand):
"""
Run the specified custom command asynchronously.
"""
def run(self, **kwargs):
sublime.set_timeout_async(lambda: self.run_async(**kwargs), 0)
def run_async(self,
output_to_panel=False,
args=None,
start_msg="Starting custom command...",
complete_msg="Completed custom command.",
run_in_thread=False):
if not args:
sublime.error_message("Custom command must provide args.")
for idx, arg in enumerate(args):
if arg == "{REPO_PATH}":
args[idx] = self.repo_path
elif arg == "{FILE_PATH}":
args[idx] = self.file_path
sublime.status_message(start_msg)
if run_in_thread:
stdout = ''
cmd_thread = CustomCommandThread(self.git, *args)
cmd_thread.start()
else:
stdout = self.git(*args)
sublime.status_message(complete_msg)
if output_to_panel:
util.log.panel(stdout)
|
mit
|
orbitfp7/nova
|
nova/db/sqlalchemy/migrate_repo/versions/275_add_keypair_type.py
|
8
|
1932
|
# Copyright (c) 2015 Cloudbase Solutions SRL
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData, Column, Table
from sqlalchemy import Enum
from nova.objects import keypair
def upgrade(migrate_engine):
"""Function adds key_pairs type field."""
meta = MetaData(bind=migrate_engine)
key_pairs = Table('key_pairs', meta, autoload=True)
shadow_key_pairs = Table('shadow_key_pairs', meta, autoload=True)
enum = Enum('ssh', 'x509', metadata=meta, name='keypair_types')
enum.create()
keypair_type = Column('type', enum, nullable=False,
server_default=keypair.KEYPAIR_TYPE_SSH)
if hasattr(key_pairs.c, 'type'):
key_pairs.c.type.drop()
if hasattr(shadow_key_pairs.c, 'type'):
shadow_key_pairs.c.type.drop()
key_pairs.create_column(keypair_type)
shadow_key_pairs.create_column(keypair_type.copy())
def downgrade(migrate_engine):
"""Function removes key_pairs type field."""
meta = MetaData(bind=migrate_engine)
key_pairs = Table('key_pairs', meta, autoload=True)
shadow_key_pairs = Table('shadow_key_pairs', meta, autoload=True)
enum = Enum(metadata=meta, name='keypair_types')
if hasattr(key_pairs.c, 'type'):
key_pairs.c.type.drop()
if hasattr(shadow_key_pairs.c, 'type'):
shadow_key_pairs.c.type.drop()
enum.drop()
|
apache-2.0
|
spthaolt/curtsies
|
curtsies/escseqparse.py
|
1
|
3273
|
r"""
Parses ascii escape sequences into marked up strings
>>> r = peel_off_esc_code('Amore')
>>> r == ('', {'csi': '\x1b', 'command': 'A', 'seq': '\x1bA'}, 'more')
True
>>> r = peel_off_esc_code('[2Astuff')
>>> r == ('', {'csi': '\x1b[', 'seq': '\x1b[2A', 'intermed': '', 'private': '', 'command': 'A', 'numbers': [2]}, 'stuff')
True
"""
from .termformatconstants import (FG_NUMBER_TO_COLOR, BG_NUMBER_TO_COLOR,
NUMBER_TO_STYLE, RESET_ALL, RESET_FG,
RESET_BG, STYLES)
import re
def parse(s):
r"""
>>> parse(">>> []")
['>>> []']
>>> #parse("\x1b[33m[\x1b[39m\x1b[33m]\x1b[39m\x1b[33m[\x1b[39m\x1b[33m]\x1b[39m\x1b[33m[\x1b[39m\x1b[33m]\x1b[39m\x1b[33m[\x1b[39m")
"""
stuff = []
rest = s
while True:
front, token, rest = peel_off_esc_code(rest)
if front:
stuff.append(front)
if token:
try:
stuff.append(token_type(token))
except ValueError:
raise ValueError("Can't parse escape sequence: %r %r %r %r" % (s, repr(front), token, repr(rest)))
if not rest:
break
return stuff
def peel_off_esc_code(s):
"""Returns processed text, the next token, and unprocessed text"""
p = r"""(?P<front>.*?)
(?P<seq>
(?P<csi>
(?:[]\[)
|
["""+'\x9b' + r"""])
(?P<private>)
(?P<numbers>
(?:\d+;)*
(?:\d+)?)
(?P<intermed>""" + '[\x20-\x2f]*)' + r"""
(?P<command>""" + '[\x40-\x7e]))' + r"""
(?P<rest>.*)"""
m1 = re.match(p, s, re.VERBOSE) #multibyte esc seq
m2 = re.match('(?P<front>.*?)(?P<seq>(?P<csi>)(?P<command>[\x40-\x5f]))(?P<rest>.*)', s) # 2 byte escape sequence
if m1 and m2:
m = m1 if len(m1.groupdict()['front']) <= len(m2.groupdict()['front']) else m2
# choose the match which has less processed text in order to get the
# first escape sequence
elif m1: m = m1
elif m2: m = m2
else: m = None
if m:
d = m.groupdict()
del d['front']
del d['rest']
if 'numbers' in d and d['numbers'].split(';'):
d['numbers'] = [int(x) for x in d['numbers'].split()]
return m.groupdict()['front'], d, m.groupdict()['rest']
else:
return s, None, ''
def token_type(info):
"""
"""
if info['command'] == 'm':
value, = info['numbers']
if value in FG_NUMBER_TO_COLOR: return {'fg':FG_NUMBER_TO_COLOR[value]}
if value in BG_NUMBER_TO_COLOR: return {'bg':BG_NUMBER_TO_COLOR[value]}
if value in NUMBER_TO_STYLE: return {NUMBER_TO_STYLE[value]:True}
if value == RESET_ALL: return dict(dict((k, None) for k in STYLES), **{'fg':None, 'bg':None})
if value == RESET_FG: return {'fg':None}
if value == RESET_BG: return {'bg':None}
raise ValueError("Can't parse escape seq %r" % info)
if __name__ == '__main__':
import doctest; doctest.testmod()
#print(peel_off_esc_code('[2Astuff'))
#print(peel_off_esc_code('Amore'))
print((repr(parse('[31mstuff is the best[32myay'))))
|
mit
|
ngoix/OCRF
|
examples/cluster/plot_dict_face_patches.py
|
337
|
2747
|
"""
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
|
bsd-3-clause
|
LiaoPan/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers
|
Chapter5_LossFunctions/DarkWorldsMetric.py
|
92
|
20375
|
""" DarkWorldsMetricMountianOsteric.py
Custom evaluation metric for the 'Observing Dark Worlds' competition.
[Description of metric, or reference to documentation.]
Update: Made for the training set only so users can check there results from the training c
@Author: David Harvey
Created: 22 August 2012
"""
import numpy as np
import math as mt
import itertools as it
import csv as c
import getopt as gt
import sys as sys
import argparse as ap
import string as st
import random as rd
def calc_delta_r(x_predicted,y_predicted,x_true,y_true):
""" Compute the scalar distance between predicted halo centers
and the true halo centers. Predictions are matched to the closest
halo center.
Notes: It takes in the predicted and true positions, and then loops over each possible configuration and finds the most optimal one.
Arguments:
x_predicted, y_predicted: vector for predicted x- and y-positions (1 to 3 elements)
x_true, y_true: vector for known x- and y-positions (1 to 3 elements)
Returns:
radial_distance: vector containing the scalar distances between the predicted halo centres and the true halo centres (1 to 3 elements)
true_halo_idexes: vector containing indexes of the input true halos which matches the predicted halo indexes (1 to 3 elements)
measured_halo_indexes: vector containing indexes of the predicted halo position with the reference to the true halo position.
e.g if true_halo_indexes=[0,1] and measured_halo_indexes=[1,0] then the first x,y coordinates of the true halo position matches the second input of the predicted x,y coordinates.
"""
num_halos=len(x_true) #Only works for number of halos > 1
num_configurations=mt.factorial(num_halos) #The number of possible different comb
configurations=np.zeros([num_halos,num_configurations],int) #The array of combinations
#I will pass back
distances = np.zeros([num_configurations],float) #The array of the distances
#for all possible combinations
radial_distance=[] #The vector of distances
#I will pass back
#Pick a combination of true and predicted
a=['01','012'] #Input for the permutatiosn, 01 number halos or 012
count=0 #For the index of the distances array
true_halo_indexes=[] #The tuples which will show the order of halos picked
predicted_halo_indexes=[]
distances_perm=np.zeros([num_configurations,num_halos],float) #The distance between each
#true and predicted
#halo for every comb
true_halo_indexes_perm=[] #log of all the permutations of true halos used
predicted_halo_indexes_perm=[] #log of all the predicted permutations
for perm in it.permutations(a[num_halos-2],num_halos):
which_true_halos=[]
which_predicted_halos=[]
for j in xrange(num_halos): #loop through all the true halos with the
distances_perm[count,j]=np.sqrt((x_true[j]-x_predicted[int(perm[j])])**2\
+(y_true[j]-y_predicted[int(perm[j])])**2)
#This array logs the distance between true and
#predicted halo for ALL configurations
which_true_halos.append(j) #log the order in which I try each true halo
which_predicted_halos.append(int(perm[j])) #log the order in which I true
#each predicted halo
true_halo_indexes_perm.append(which_true_halos) #this is a tuple of tuples of
#all of thifferent config
#true halo indexes
predicted_halo_indexes_perm.append(which_predicted_halos)
distances[count]=sum(distances_perm[count,0::]) #Find what the total distances
#are for each configuration
count=count+1
config = np.where(distances == min(distances))[0][0] #The configuration used is the one
#which has the smallest distance
radial_distance.append(distances_perm[config,0::]) #Find the tuple of distances that
#correspond to this smallest distance
true_halo_indexes=true_halo_indexes_perm[config] #Find the tuple of the index which refers
#to the smallest distance
predicted_halo_indexes=predicted_halo_indexes_perm[config]
return radial_distance,true_halo_indexes,predicted_halo_indexes
def calc_theta(x_predicted, y_predicted, x_true, y_true, x_ref, y_ref):
""" Calculate the angle the predicted position and the true position, where the zero degree corresponds to the line joing the true halo position and the reference point given.
Arguments:
x_predicted, y_predicted: vector for predicted x- and y-positions (1 to 3 elements)
x_true, y_true: vector for known x- and y-positions (1 to 3 elements)
Note that the input of these are matched up so that the first elements of each
vector are associated with one another
x_ref, y_ref: scalars of the x,y coordinate of reference point
Returns:
Theta: A vector containing the angles of the predicted halo w.r.t the true halo
with the vector joining the reference point and the halo as the zero line.
"""
num_halos=len(x_predicted)
theta=np.zeros([num_halos+1],float) #Set up the array which will pass back the values
phi = np.zeros([num_halos],float)
psi = np.arctan( (y_true-y_ref)/(x_true-x_ref) )
# Angle at which the halo is at
#with respect to the reference point
phi[x_true != x_ref] = np.arctan((y_predicted[x_true != x_predicted]-\
y_true[x_true != x_predicted])\
/(x_predicted[x_true != x_predicted]-\
x_true[x_true != x_predicted])) # Angle of the estimate
#wrt true halo centre
#Before finding the angle with the zero line as the line joiing the halo and the reference
#point I need to convert the angle produced by Python to an angle between 0 and 2pi
phi =convert_to_360(phi, x_predicted-x_true,\
y_predicted-y_true)
psi = convert_to_360(psi, x_true-x_ref,\
y_true-y_ref)
theta = phi-psi #The angle with the baseline as the line joing the ref and the halo
theta[theta< 0.0]=theta[theta< 0.0]+2.0*mt.pi #If the angle of the true pos wrt the ref is
#greater than the angle of predicted pos
#and the true pos then add 2pi
return theta
def convert_to_360(angle, x_in, y_in):
""" Convert the given angle to the true angle in the range 0:2pi
Arguments:
angle:
x_in, y_in: the x and y coordinates used to determine the quartile
the coordinate lies in so to add of pi or 2pi
Returns:
theta: the angle in the range 0:2pi
"""
n = len(x_in)
for i in xrange(n):
if x_in[i] < 0 and y_in[i] > 0:
angle[i] = angle[i]+mt.pi
elif x_in[i] < 0 and y_in[i] < 0:
angle[i] = angle[i]+mt.pi
elif x_in[i] > 0 and y_in[i] < 0:
angle[i] = angle[i]+2.0*mt.pi
elif x_in[i] == 0 and y_in[i] == 0:
angle[i] = 0
elif x_in[i] == 0 and y_in[i] > 0:
angle[i] = mt.pi/2.
elif x_in[i] < 0 and y_in[i] == 0:
angle[i] = mt.pi
elif x_in[i] == 0 and y_in[i] < 0:
angle[i] = 3.*mt.pi/2.
return angle
def get_ref(x_halo,y_halo,weight):
""" Gets the reference point of the system of halos by weighted averaging the x and y
coordinates.
Arguments:
x_halo, y_halo: Vector num_halos referring to the coordinates of the halos
weight: the weight which will be assigned to the position of the halo
num_halos: number of halos in the system
Returns:
x_ref, y_ref: The coordinates of the reference point for the metric
"""
#Find the weighted average of the x and y coordinates
x_ref = np.sum([x_halo*weight])/np.sum([weight])
y_ref = np.sum([y_halo*weight])/np.sum([weight])
return x_ref,y_ref
def main_score( nhalo_all, x_true_all, y_true_all, x_ref_all, y_ref_all, sky_prediction):
"""abstracts the score from the old command-line interface.
sky_prediction is a dx2 array of predicted x,y positions
-camdp"""
r=np.array([],dtype=float) # The array which I will log all the calculated radial distances
angle=np.array([],dtype=float) #The array which I will log all the calculated angles
#Load in the sky_ids from the true
num_halos_total=0 #Keep track of how many halos are input into the metric
for selectskyinsolutions, sky in enumerate(sky_prediction): #Loop through each line in result.csv and analyse each one
nhalo=int(nhalo_all[selectskyinsolutions])#How many halos in the
#selected sky?
x_true=x_true_all[selectskyinsolutions][0:nhalo]
y_true=y_true_all[selectskyinsolutions][0:nhalo]
x_predicted=np.array([],dtype=float)
y_predicted=np.array([],dtype=float)
for i in xrange(nhalo):
x_predicted=np.append(x_predicted,float(sky[0])) #get the predicted values
y_predicted=np.append(y_predicted,float(sky[1]))
#The solution file for the test data provides masses
#to calculate the centre of mass where as the Training_halo.csv
#direct provides x_ref y_ref. So in the case of test data
#we need to calculate the ref point from the masses using
#Get_ref()
x_ref=x_ref_all[selectskyinsolutions]
y_ref=y_ref_all[selectskyinsolutions]
num_halos_total=num_halos_total+nhalo
#Single halo case, this needs to be separately calculated since
#x_ref = x_true
if nhalo == 1:
#What is the radial distance between the true and predicted position
r=np.append(r,np.sqrt( (x_predicted-x_true)**2 \
+ (y_predicted-y_true)**2))
#What is the angle between the predicted position and true halo position
if (x_predicted-x_true) != 0:
psi = np.arctan((y_predicted-y_true)/(x_predicted-x_true))
else: psi=0.
theta = convert_to_360([psi], [x_predicted-x_true], [y_predicted-y_true])
angle=np.append(angle,theta)
else:
#r_index_index, contains the radial distances of the predicted to
#true positions. These are found by matching up the true halos to
#the predicted halos such that the average of all the radial distances
#is optimal. it also contains indexes of the halos used which are used to
#show which halo has been mathced to which.
r_index_index = calc_delta_r(x_predicted, y_predicted, x_true, \
y_true)
r=np.append(r,r_index_index[0][0])
halo_index= r_index_index[1] #The true halos indexes matched with the
predicted_index=r_index_index[2] #predicted halo index
angle=np.append(angle,calc_theta\
(x_predicted[predicted_index],\
y_predicted[predicted_index],\
x_true[halo_index],\
y_true[halo_index],x_ref,\
y_ref)) # Find the angles of the predicted
#position wrt to the halo and
# add to the vector angle
# Find what the average distance the estimate is from the halo position
av_r=sum(r)/len(r)
#In order to quantify the orientation invariance we will express each angle
# as a vector and find the average vector
#R_bar^2=(1/N Sum^Ncos(theta))^2+(1/N Sum^Nsin(theta))**2
N = float(num_halos_total)
angle_vec = np.sqrt(( 1.0/N * sum(np.cos(angle)) )**2 + \
( 1.0/N * sum(np.sin(angle)) )**2)
W1=1./1000. #Weight the av_r such that < 1 is a good score > 1 is not so good.
W2=1.
metric = W1*av_r + W2*angle_vec #Weighted metric, weights TBD
print 'Your average distance in pixels you are away from the true halo is', av_r
print 'Your average angular vector is', angle_vec
print 'Your score for the training data is', metric
return metric
def main(user_fname, fname):
""" Script to compute the evaluation metric for the Observing Dark Worlds competition. You can run it on your training data to understand how well you have done with the training data.
"""
r=np.array([],dtype=float) # The array which I will log all the calculated radial distances
angle=np.array([],dtype=float) #The array which I will log all the calculated angles
#Load in the sky_ids from the true
true_sky_id=[]
sky_loader = c.reader(open(fname, 'rb')) #Load in the sky_ids from the solution file
for row in sky_loader:
true_sky_id.append(row[0])
#Load in the true values from the solution file
nhalo_all=np.loadtxt(fname,usecols=(1,),delimiter=',',skiprows=1)
x_true_all=np.loadtxt(fname,usecols=(4,6,8),delimiter=',',skiprows=1)
y_true_all=np.loadtxt(fname,usecols=(5,7,9),delimiter=',',skiprows=1)
x_ref_all=np.loadtxt(fname,usecols=(2,),delimiter=',',skiprows=1)
y_ref_all=np.loadtxt(fname,usecols=(3,),delimiter=',',skiprows=1)
for row in sky_loader:
true_sky_id.append(row[1])
num_halos_total=0 #Keep track of how many halos are input into the metric
sky_prediction = c.reader(open(user_fname, 'rb')) #Open the result.csv
try: #See if the input file from user has a header on it
#with open('JoyceTest/trivialUnitTest_Pred.txt', 'r') as f:
with open(user_fname, 'r') as f:
header = float((f.readline()).split(',')[1]) #try and make where the
#first input would be
#a float, if succeed it
#is not a header
print 'THE INPUT FILE DOES NOT APPEAR TO HAVE A HEADER'
except :
print 'THE INPUT FILE APPEARS TO HAVE A HEADER, SKIPPING THE FIRST LINE'
skip_header = sky_prediction.next()
for sky in sky_prediction: #Loop through each line in result.csv and analyse each one
sky_id = str(sky[0]) #Get the sky_id of the input
does_it_exist=true_sky_id.count(sky_id) #Is the input sky_id
#from user a real one?
if does_it_exist > 0: #If it does then find the matching solutions to the sky_id
selectskyinsolutions=true_sky_id.index(sky_id)-1
else: #Otherwise exit
print 'Sky_id does not exist, formatting problem: ',sky_id
sys.exit(2)
nhalo=int(nhalo_all[selectskyinsolutions])#How many halos in the
#selected sky?
x_true=x_true_all[selectskyinsolutions][0:nhalo]
y_true=y_true_all[selectskyinsolutions][0:nhalo]
x_predicted=np.array([],dtype=float)
y_predicted=np.array([],dtype=float)
for i in xrange(nhalo):
x_predicted=np.append(x_predicted,float(sky[2*i+1])) #get the predicted values
y_predicted=np.append(y_predicted,float(sky[2*i+2]))
#The solution file for the test data provides masses
#to calculate the centre of mass where as the Training_halo.csv
#direct provides x_ref y_ref. So in the case of test data
#we need to calculae the ref point from the masses using
#Get_ref()
x_ref=x_ref_all[selectskyinsolutions]
y_ref=y_ref_all[selectskyinsolutions]
num_halos_total=num_halos_total+nhalo
#Single halo case, this needs to be separately calculated since
#x_ref = x_true
if nhalo == 1:
#What is the radial distance between the true and predicted position
r=np.append(r,np.sqrt( (x_predicted-x_true)**2 \
+ (y_predicted-y_true)**2))
#What is the angle between the predicted position and true halo position
if (x_predicted-x_true) != 0:
psi = np.arctan((y_predicted-y_true)/(x_predicted-x_true))
else: psi=0.
theta = convert_to_360([psi], [x_predicted-x_true], [y_predicted-y_true])
angle=np.append(angle,theta)
else:
#r_index_index, contains the radial distances of the predicted to
#true positions. These are found by matching up the true halos to
#the predicted halos such that the average of all the radial distances
#is optimal. it also contains indexes of the halos used which are used to
#show which halo has been mathced to which.
r_index_index = calc_delta_r(x_predicted, y_predicted, x_true, \
y_true)
r=np.append(r,r_index_index[0][0])
halo_index= r_index_index[1] #The true halos indexes matched with the
predicted_index=r_index_index[2] #predicted halo index
angle=np.append(angle,calc_theta\
(x_predicted[predicted_index],\
y_predicted[predicted_index],\
x_true[halo_index],\
y_true[halo_index],x_ref,\
y_ref)) # Find the angles of the predicted
#position wrt to the halo and
# add to the vector angle
# Find what the average distance the estimate is from the halo position
av_r=sum(r)/len(r)
#In order to quantify the orientation invariance we will express each angle
# as a vector and find the average vector
#R_bar^2=(1/N Sum^Ncos(theta))^2+(1/N Sum^Nsin(theta))**2
N = float(num_halos_total)
angle_vec = np.sqrt(( 1.0/N * sum(np.cos(angle)) )**2 + \
( 1.0/N * sum(np.sin(angle)) )**2)
W1=1./1000. #Weight the av_r such that < 1 is a good score > 1 is not so good.
W2=1.
metric = W1*av_r + W2*angle_vec #Weighted metric, weights TBD
print 'Your average distance in pixels you are away from the true halo is', av_r
print 'Your average angular vector is', angle_vec
print 'Your score for the training data is', metric
if __name__ == "__main__":
#For help just typed 'python DarkWorldsMetric.py -h'
parser = ap.ArgumentParser(description='Work out the Metric for your input file')
parser.add_argument('inputfile',type=str,nargs=1,help='Input file of halo positions. Needs to be in the format SkyId,halo_x1,haloy1,halox_2,halo_y2,halox3,halo_y3 ')
parser.add_argument('reffile',type=str,nargs=1,help='This should point to Training_halos.csv')
args = parser.parse_args()
user_fname=args.inputfile[0]
filename = (args.reffile[0]).count('Training_halos.csv')
if filename == 0:
fname=args.reffile[0]+str('Training_halos.csv')
else:
fname=args.reffile[0]
main(user_fname, fname)
|
mit
|
johnuseast/hyde
|
templates/simple/settings.py
|
40
|
3451
|
import os
here = lambda *x: os.path.join(os.path.abspath(os.path.dirname(__file__)), *x)
#Directories
LAYOUT_DIR = here('layout')
CONTENT_DIR = here('content')
MEDIA_DIR = here('media')
DEPLOY_DIR = here('deploy')
TMP_DIR = here('deploy_tmp')
BACKUPS_DIR = here('backups')
BACKUP = False
SITE_ROOT = "/"
SITE_WWW_URL = "http://www.yoursite.com"
SITE_NAME = "Hyde"
#Url Configuration
GENERATE_ABSOLUTE_FS_URLS = False
# Clean urls causes Hyde to generate urls without extensions. Examples:
# http://example.com/section/page.html becomes
# http://example.com/section/page/, and the listing for that section becomes
# http://example.com/section/
# The built-in CherryPy webserver is capable of serving pages with clean urls
# without any additional configuration, but Apache will need to use Mod_Rewrite
# to map the clean urls to the actual html files. The HtaccessGenerator site
# post processor is capable of automatically generating the necessary
# RewriteRules for use with Apache.
GENERATE_CLEAN_URLS = False
# A list of filenames (without extensions) that will be considered listing
# pages for their enclosing folders.
# LISTING_PAGE_NAMES = ['index']
LISTING_PAGE_NAMES = []
# Determines whether or not to append a trailing slash to generated urls when
# clean urls are enabled.
APPEND_SLASH = False
# {folder : extension : (processors)}
# The processors are run in the given order and are chained.
# Only a lone * is supported as an indicator for folders. Path
# should be specified. No wildcard card support yet.
# Starting under the media folder. For example, if you have media/css under
# your site root,you should specify just css. If you have media/css/ie you
# should specify css/ie for the folder name. css/* is not supported (yet).
# Extensions do not support wildcards.
MEDIA_PROCESSORS = {
'*':{
'.css':('hydeengine.media_processors.TemplateProcessor',
'hydeengine.media_processors.CSSmin',),
'.less':('hydeengine.media_processors.TemplateProcessor',
'hydeengine.media_processors.LessCSS',
'hydeengine.media_processors.CSSmin',),
'.js':(
'hydeengine.media_processors.TemplateProcessor',
'hydeengine.media_processors.JSmin',),
'.coffee':(
'hydeengine.media_processors.TemplateProcessor',
'hydeengine.media_processors.CoffeeScript',
'hydeengine.media_processors.JSmin',)
}
}
CONTENT_PROCESSORS = {}
SITE_POST_PROCESSORS = {
# 'media/js': {
# 'hydeengine.site_post_processors.FolderFlattener' : {
# 'remove_processed_folders': True,
# 'pattern':"*.js"
# }
# }
}
CONTEXT = {
'GENERATE_CLEAN_URLS': GENERATE_CLEAN_URLS
}
FILTER = {
'include': (".htaccess",),
'exclude': (".*","*~")
}
#Processor Configuration
# path for YUICompressor, or None if you don't
# want to compress JS/CSS. Project homepage:
# http://developer.yahoo.com/yui/compressor/
#YUI_COMPRESSOR = "./lib/yuicompressor-2.4.1.jar"
YUI_COMPRESSOR = None
# path for HSS, which is a preprocessor for CSS-like files (*.hss)
# project page at http://ncannasse.fr/projects/hss
#HSS_PATH = "./lib/hss-1.0-osx"
HSS_PATH = None # if you don't want to use HSS
#Django settings
TEMPLATE_DIRS = (LAYOUT_DIR, CONTENT_DIR, TMP_DIR, MEDIA_DIR)
INSTALLED_APPS = (
'hydeengine',
'django.contrib.webdesign',
)
|
mit
|
brandonPurvis/osf.io
|
website/addons/badges/tests/test_views.py
|
33
|
14279
|
import mock
import random
import string
from nose.tools import *
import website.app
from webtest_plus import TestApp
from website.util import api_url_for, web_url_for
from website.addons.base.testing import AddonTestCase
from tests.factories import AuthUserFactory
from utils import create_mock_badger, create_badge_dict, get_garbage
class TestBadgesViews(AddonTestCase):
ADDON_SHORT_NAME = 'badges'
def setUp(self):
super(TestBadgesViews, self).setUp()
def set_node_settings(self, settings):
return settings
def set_user_settings(self, settings):
return create_mock_badger(settings)
def create_app(self):
return TestApp(app)
@mock.patch('website.addons.badges.model.badges.acquire_badge_image')
def test_create_badge(self, img_proc):
img_proc.return_value = 'temp.png'
badge = create_badge_dict()
ret = self.app.post_json(api_url_for('create_badge'), badge, auth=self.user.auth)
self.user_settings.reload()
assert_equals(ret.status_int, 201)
assert_equals(ret.content_type, 'application/json')
assert_true(ret.json['badgeid'] in [badge._id for badge in self.user_settings.badges])
@mock.patch('website.addons.badges.model.badges.acquire_badge_image')
def test_create_badge_no_data(self, img_proc):
url = api_url_for('create_badge')
badge = {}
ret = self.app.post_json(url, badge, auth=self.user.auth, expect_errors=True)
assert_equals(ret.status_int, 400)
@mock.patch('website.addons.badges.model.badges.acquire_badge_image')
def test_create_badge_some_data(self, img_proc):
img_proc.return_value = 'temp.png'
url = api_url_for('create_badge')
badge = {
'badgeName': ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(4)),
'description': 'Just doesn\'t '.join(random.choice(string.ascii_letters + string.digits) for _ in range(6))
}
ret = self.app.post_json(url, badge, auth=self.user.auth, expect_errors=True)
assert_equals(ret.status_int, 400)
@mock.patch('website.addons.badges.model.badges.acquire_badge_image')
def test_create_badge_empty_data(self, img_proc):
img_proc.return_value = 'temp.png'
url = api_url_for('create_badge')
badge = create_badge_dict()
badge['imageurl'] = ''
ret = self.app.post_json(url, badge, auth=self.user.auth, expect_errors=True)
assert_equals(ret.status_int, 400)
@mock.patch('website.addons.badges.model.badges.acquire_badge_image')
def test_create_badge_cant_issue(self, img_proc):
img_proc.return_value = 'temp.png'
self.user.delete_addon('badges')
url = api_url_for('create_badge')
badge = create_badge_dict()
ret = self.app.post_json(url, badge, auth=self.user.auth, expect_errors=True)
assert_equals(ret.status_int, 400)
def test_award_badge(self):
badgeid = self.user_settings.badges[0]._id
initnum = len(self.project.badgeassertion__awarded)
assert_true(self.user_settings.can_award)
url = api_url_for('award_badge', pid=self.project._id)
ret = self.app.post_json(url, {'badgeid': badgeid}, auth=self.user.auth)
self.project.reload()
assert_equals(ret.status_int, 200)
assert_equals(initnum + 1, len(self.project.badgeassertion__awarded))
def test_award_badge_bad_badge_id(self):
badgeid = 'badid67'
assert_true(self.user_settings.can_award)
url = api_url_for('award_badge', pid=self.project._id)
ret = self.app.post_json(url, {'badgeid': badgeid}, auth=self.user.auth, expect_errors=True)
assert_equals(ret.status_int, 400)
def test_award_badge_empty_badge_id(self):
assert_true(self.user_settings.can_award)
url = api_url_for('award_badge', pid=self.project._id)
ret = self.app.post_json(url, {'badgeid': ''}, auth=self.user.auth, expect_errors=True)
assert_equals(ret.status_int, 400)
def test_award_badge_no_badge_id(self):
assert_true(self.user_settings.can_award)
url = api_url_for('award_badge', pid=self.project._id)
ret = self.app.post_json(url, {}, auth=self.user.auth, expect_errors=True)
assert_equals(ret.status_int, 400)
@mock.patch('website.addons.badges.model.badges.acquire_badge_image')
def test_badge_html(self, img_proc):
img_proc.return_value = 'temp.png'
badge = {
'badgeName': get_garbage(),
'description': get_garbage(),
'imageurl': get_garbage(),
'criteria': get_garbage()
}
ret = self.app.post_json(api_url_for('create_badge'), badge, auth=self.user.auth)
self.user_settings.reload()
assert_equals(ret.status_int, 201)
assert_equals(ret.content_type, 'application/json')
assert_true(ret.json['badgeid'] in [badge._id for badge in self.user_settings.badges])
with self.app.app.test_request_context():
bstr = str(self.user_settings.badges[0].to_openbadge())
assert_false('>' in bstr)
assert_false('<' in bstr)
def test_revoke_badge(self):
badgeid = self.user_settings.badges[0]._id
initnum = len(self.project.badgeassertion__awarded)
assert_true(self.user_settings.can_award)
url = api_url_for('award_badge', pid=self.project._id)
ret = self.app.post_json(url, {'badgeid': badgeid}, auth=self.user.auth)
self.project.reload()
assert_equals(ret.status_int, 200)
assert_equals(initnum + 1, len(self.project.badgeassertion__awarded))
assertion = self.project.badgeassertion__awarded[0]
revoke = api_url_for('revoke_badge', pid=self.project._id)
ret = self.app.post_json(revoke,
{
'id': assertion._id,
'reason': ''
}, auth=self.user.auth)
self.project.reload()
self.user_settings.reload()
assertion.reload()
assert_equals(ret.status_int, 200)
assert_true(self.project.badgeassertion__awarded[0]._id, assertion._id)
assert_true(assertion.revoked)
assert_true(assertion._id in self.user_settings.revocation_list)
assert_equals(len(self.user_settings.revocation_list), 1)
def test_revoke_badge_reason(self):
badgeid = self.user_settings.badges[0]._id
initnum = len(self.project.badgeassertion__awarded)
assert_true(self.user_settings.can_award)
url = api_url_for('award_badge', pid=self.project._id)
ret = self.app.post_json(url, {'badgeid': badgeid}, auth=self.user.auth)
self.project.reload()
assert_equals(ret.status_int, 200)
assert_equals(initnum + 1, len(self.project.badgeassertion__awarded))
assertion = self.project.badgeassertion__awarded[0]
revoke = api_url_for('revoke_badge', pid=self.project._id)
ret = self.app.post_json(revoke,
{
'id': assertion._id,
'reason': 'Is a loser'
}, auth=self.user.auth)
self.project.reload()
self.user_settings.reload()
assertion.reload()
assert_equals(ret.status_int, 200)
assert_true(self.project.badgeassertion__awarded[0]._id, assertion._id)
assert_true(assertion._id in self.user_settings.revocation_list)
assert_equals(len(self.user_settings.revocation_list), 1)
assert_true(assertion.revoked)
assert_equals(self.user_settings.revocation_list[assertion._id], 'Is a loser')
def test_revoke_badge_no_addon(self):
badgeid = self.user_settings.badges[0]._id
initnum = len(self.project.badgeassertion__awarded)
assert_true(self.user_settings.can_award)
url = api_url_for('award_badge', pid=self.project._id)
ret = self.app.post_json(url, {'badgeid': badgeid}, auth=self.user.auth)
self.project.reload()
assert_equals(ret.status_int, 200)
assert_equals(initnum + 1, len(self.project.badgeassertion__awarded))
assertion = self.project.badgeassertion__awarded[0]
revoke = api_url_for('revoke_badge', pid=self.project._id)
self.user.delete_addon('badges')
self.user.save()
self.user.reload()
ret = self.app.post_json(revoke,
{
'id': assertion._id,
'reason': ''
}, auth=self.user.auth, expect_errors=True)
self.project.reload()
self.user_settings.reload()
assertion.reload()
assert_equals(ret.status_int, 400)
assert_false(assertion.revoked)
assert_true(self.project.badgeassertion__awarded[0]._id, assertion._id)
assert_false(assertion._id in self.user_settings.revocation_list)
def test_revoke_didnt_award(self):
badgeid = self.user_settings.badges[0]._id
initnum = len(self.project.badgeassertion__awarded)
assert_true(self.user_settings.can_award)
url = api_url_for('award_badge', pid=self.project._id)
ret = self.app.post_json(url, {'badgeid': badgeid}, auth=self.user.auth)
self.project.reload()
assert_equals(ret.status_int, 200)
assert_equals(initnum + 1, len(self.project.badgeassertion__awarded))
assertion = self.project.badgeassertion__awarded[0]
revoke = api_url_for('revoke_badge', pid=self.project._id)
user2 = AuthUserFactory()
user2.add_addon('badges', override=True)
user2.save()
user2.reload()
ret = self.app.post_json(revoke,
{
'id': assertion._id,
'reason': ''
}, auth=user2.auth, expect_errors=True)
self.project.reload()
self.user_settings.reload()
assertion.reload()
assert_equals(ret.status_int, 400)
assert_false(assertion.revoked)
assert_true(self.project.badgeassertion__awarded[0]._id, assertion._id)
assert_false(assertion._id in self.user_settings.revocation_list)
def test_issuer_html(self):
pass
def test_revoke_bad_aid(self):
badgeid = self.user_settings.badges[0]._id
initnum = len(self.project.badgeassertion__awarded)
assert_true(self.user_settings.can_award)
url = api_url_for('award_badge', pid=self.project._id)
ret = self.app.post_json(url, {'badgeid': badgeid}, auth=self.user.auth)
self.project.reload()
assert_equals(ret.status_int, 200)
assert_equals(initnum + 1, len(self.project.badgeassertion__awarded))
assertion = self.project.badgeassertion__awarded[0]
revoke = api_url_for('revoke_badge', pid=self.project._id)
ret = self.app.post_json(revoke,
{
'id': 'Im a bad id :D',
'reason': ''
}, auth=self.user.auth, expect_errors=True)
self.project.reload()
self.user_settings.reload()
assertion.reload()
assert_equals(ret.status_int, 400)
assert_false(assertion.revoked)
assert_true(self.project.badgeassertion__awarded[0]._id, assertion._id)
assert_false(assertion._id in self.user_settings.revocation_list)
def test_system_badge_awarder(self):
badgeid = self.user_settings.badges[0]._id
self.user_settings.badges[0].make_system_badge()
initnum = len(self.project.badgeassertion__awarded)
assert_true(self.user_settings.can_award)
url = api_url_for('award_badge', pid=self.project._id)
ret = self.app.post_json(url, {'badgeid': badgeid}, auth=self.user.auth)
self.project.reload()
assert_equals(ret.status_int, 200)
assert_equals(initnum + 1, len(self.project.badgeassertion__awarded))
assertion = self.project.badgeassertion__awarded[0]
assert_equals(assertion.awarder._id, self.user_settings._id)
def test_badge_awarder(self):
badgeid = self.user_settings.badges[0]._id
initnum = len(self.project.badgeassertion__awarded)
assert_true(self.user_settings.can_award)
url = api_url_for('award_badge', pid=self.project._id)
ret = self.app.post_json(url, {'badgeid': badgeid}, auth=self.user.auth)
self.project.reload()
assert_equals(ret.status_int, 200)
assert_equals(initnum + 1, len(self.project.badgeassertion__awarded))
assertion = self.project.badgeassertion__awarded[0]
assert_equals(assertion.awarder._id, self.user_settings._id)
def test_award_times(self):
badge = self.user_settings.badges[0]
assert_true(self.user_settings.can_award)
url = api_url_for('award_badge', pid=self.project._id)
ret = self.app.post_json(url, {'badgeid': badge._id}, auth=self.user.auth)
ret = self.app.post_json(url, {'badgeid': badge._id}, auth=self.user.auth)
ret = self.app.post_json(url, {'badgeid': badge._id}, auth=self.user.auth)
self.project.reload()
assert_equals(ret.status_int, 200)
badge.reload()
assert_equals(badge.awarded_count, 3)
ret = self.app.post_json(url, {'badgeid': badge._id}, auth=self.user.auth)
ret = self.app.post_json(url, {'badgeid': badge._id}, auth=self.user.auth)
badge.reload()
assert_equals(badge.awarded_count, 5)
def test_unique_awards(self):
badge = self.user_settings.badges[0]
assert_true(self.user_settings.can_award)
url = api_url_for('award_badge', pid=self.project._id)
ret = self.app.post_json(url, {'badgeid': badge._id}, auth=self.user.auth)
ret = self.app.post_json(url, {'badgeid': badge._id}, auth=self.user.auth)
ret = self.app.post_json(url, {'badgeid': badge._id}, auth=self.user.auth)
self.project.reload()
assert_equals(ret.status_int, 200)
badge.reload()
assert_equals(badge.unique_awards_count, 1)
ret = self.app.post_json(url, {'badgeid': badge._id}, auth=self.user.auth)
ret = self.app.post_json(url, {'badgeid': badge._id}, auth=self.user.auth)
badge.reload()
assert_equals(badge.unique_awards_count, 1)
|
apache-2.0
|
tbeadle/django
|
django/db/migrations/serializer.py
|
49
|
14738
|
from __future__ import unicode_literals
import collections
import datetime
import decimal
import functools
import math
import types
from importlib import import_module
from django.db import models
from django.db.migrations.operations.base import Operation
from django.db.migrations.utils import COMPILED_REGEX_TYPE, RegexObject
from django.utils import datetime_safe, six
from django.utils.encoding import force_text
from django.utils.functional import LazyObject, Promise
from django.utils.timezone import utc
from django.utils.version import get_docs_version
try:
import enum
except ImportError:
# No support on Python 2 if enum34 isn't installed.
enum = None
class BaseSerializer(object):
def __init__(self, value):
self.value = value
def serialize(self):
raise NotImplementedError('Subclasses of BaseSerializer must implement the serialize() method.')
class BaseSequenceSerializer(BaseSerializer):
def _format(self):
raise NotImplementedError('Subclasses of BaseSequenceSerializer must implement the _format() method.')
def serialize(self):
imports = set()
strings = []
for item in self.value:
item_string, item_imports = serializer_factory(item).serialize()
imports.update(item_imports)
strings.append(item_string)
value = self._format()
return value % (", ".join(strings)), imports
class BaseSimpleSerializer(BaseSerializer):
def serialize(self):
return repr(self.value), set()
class ByteTypeSerializer(BaseSerializer):
def serialize(self):
value_repr = repr(self.value)
if six.PY2:
# Prepend the `b` prefix since we're importing unicode_literals
value_repr = 'b' + value_repr
return value_repr, set()
class DatetimeSerializer(BaseSerializer):
def serialize(self):
if self.value.tzinfo is not None and self.value.tzinfo != utc:
self.value = self.value.astimezone(utc)
value_repr = repr(self.value).replace("<UTC>", "utc")
if isinstance(self.value, datetime_safe.datetime):
value_repr = "datetime.%s" % value_repr
imports = ["import datetime"]
if self.value.tzinfo is not None:
imports.append("from django.utils.timezone import utc")
return value_repr, set(imports)
class DateSerializer(BaseSerializer):
def serialize(self):
value_repr = repr(self.value)
if isinstance(self.value, datetime_safe.date):
value_repr = "datetime.%s" % value_repr
return value_repr, {"import datetime"}
class DecimalSerializer(BaseSerializer):
def serialize(self):
return repr(self.value), {"from decimal import Decimal"}
class DeconstructableSerializer(BaseSerializer):
@staticmethod
def serialize_deconstructed(path, args, kwargs):
name, imports = DeconstructableSerializer._serialize_path(path)
strings = []
for arg in args:
arg_string, arg_imports = serializer_factory(arg).serialize()
strings.append(arg_string)
imports.update(arg_imports)
for kw, arg in sorted(kwargs.items()):
arg_string, arg_imports = serializer_factory(arg).serialize()
imports.update(arg_imports)
strings.append("%s=%s" % (kw, arg_string))
return "%s(%s)" % (name, ", ".join(strings)), imports
@staticmethod
def _serialize_path(path):
module, name = path.rsplit(".", 1)
if module == "django.db.models":
imports = {"from django.db import models"}
name = "models.%s" % name
else:
imports = {"import %s" % module}
name = path
return name, imports
def serialize(self):
return self.serialize_deconstructed(*self.value.deconstruct())
class DictionarySerializer(BaseSerializer):
def serialize(self):
imports = set()
strings = []
for k, v in sorted(self.value.items()):
k_string, k_imports = serializer_factory(k).serialize()
v_string, v_imports = serializer_factory(v).serialize()
imports.update(k_imports)
imports.update(v_imports)
strings.append((k_string, v_string))
return "{%s}" % (", ".join("%s: %s" % (k, v) for k, v in strings)), imports
class EnumSerializer(BaseSerializer):
def serialize(self):
enum_class = self.value.__class__
module = enum_class.__module__
imports = {"import %s" % module}
v_string, v_imports = serializer_factory(self.value.value).serialize()
imports.update(v_imports)
return "%s.%s(%s)" % (module, enum_class.__name__, v_string), imports
class FloatSerializer(BaseSimpleSerializer):
def serialize(self):
if math.isnan(self.value) or math.isinf(self.value):
return 'float("{}")'.format(self.value), set()
return super(FloatSerializer, self).serialize()
class FrozensetSerializer(BaseSequenceSerializer):
def _format(self):
return "frozenset([%s])"
class FunctionTypeSerializer(BaseSerializer):
def serialize(self):
if getattr(self.value, "__self__", None) and isinstance(self.value.__self__, type):
klass = self.value.__self__
module = klass.__module__
return "%s.%s.%s" % (module, klass.__name__, self.value.__name__), {"import %s" % module}
# Further error checking
if self.value.__name__ == '<lambda>':
raise ValueError("Cannot serialize function: lambda")
if self.value.__module__ is None:
raise ValueError("Cannot serialize function %r: No module" % self.value)
# Python 3 is a lot easier, and only uses this branch if it's not local.
if getattr(self.value, "__qualname__", None) and getattr(self.value, "__module__", None):
if "<" not in self.value.__qualname__: # Qualname can include <locals>
return "%s.%s" % \
(self.value.__module__, self.value.__qualname__), {"import %s" % self.value.__module__}
# Python 2/fallback version
module_name = self.value.__module__
# Make sure it's actually there and not an unbound method
module = import_module(module_name)
if not hasattr(module, self.value.__name__):
raise ValueError(
"Could not find function %s in %s.\n"
"Please note that due to Python 2 limitations, you cannot "
"serialize unbound method functions (e.g. a method "
"declared and used in the same class body). Please move "
"the function into the main module body to use migrations.\n"
"For more information, see "
"https://docs.djangoproject.com/en/%s/topics/migrations/#serializing-values"
% (self.value.__name__, module_name, get_docs_version())
)
# Needed on Python 2 only
if module_name == '__builtin__':
return self.value.__name__, set()
return "%s.%s" % (module_name, self.value.__name__), {"import %s" % module_name}
class FunctoolsPartialSerializer(BaseSerializer):
def serialize(self):
imports = {'import functools'}
# Serialize functools.partial() arguments
func_string, func_imports = serializer_factory(self.value.func).serialize()
args_string, args_imports = serializer_factory(self.value.args).serialize()
keywords_string, keywords_imports = serializer_factory(self.value.keywords).serialize()
# Add any imports needed by arguments
imports.update(func_imports)
imports.update(args_imports)
imports.update(keywords_imports)
return (
"functools.partial(%s, *%s, **%s)" % (
func_string, args_string, keywords_string,
),
imports,
)
class IterableSerializer(BaseSerializer):
def serialize(self):
imports = set()
strings = []
for item in self.value:
item_string, item_imports = serializer_factory(item).serialize()
imports.update(item_imports)
strings.append(item_string)
# When len(strings)==0, the empty iterable should be serialized as
# "()", not "(,)" because (,) is invalid Python syntax.
value = "(%s)" if len(strings) != 1 else "(%s,)"
return value % (", ".join(strings)), imports
class ModelFieldSerializer(DeconstructableSerializer):
def serialize(self):
attr_name, path, args, kwargs = self.value.deconstruct()
return self.serialize_deconstructed(path, args, kwargs)
class ModelManagerSerializer(DeconstructableSerializer):
def serialize(self):
as_manager, manager_path, qs_path, args, kwargs = self.value.deconstruct()
if as_manager:
name, imports = self._serialize_path(qs_path)
return "%s.as_manager()" % name, imports
else:
return self.serialize_deconstructed(manager_path, args, kwargs)
class OperationSerializer(BaseSerializer):
def serialize(self):
from django.db.migrations.writer import OperationWriter
string, imports = OperationWriter(self.value, indentation=0).serialize()
# Nested operation, trailing comma is handled in upper OperationWriter._write()
return string.rstrip(','), imports
class RegexSerializer(BaseSerializer):
def serialize(self):
imports = {"import re"}
regex_pattern, pattern_imports = serializer_factory(self.value.pattern).serialize()
regex_flags, flag_imports = serializer_factory(self.value.flags).serialize()
imports.update(pattern_imports)
imports.update(flag_imports)
args = [regex_pattern]
if self.value.flags:
args.append(regex_flags)
return "re.compile(%s)" % ', '.join(args), imports
class SequenceSerializer(BaseSequenceSerializer):
def _format(self):
return "[%s]"
class SetSerializer(BaseSequenceSerializer):
def _format(self):
# Don't use the literal "{%s}" as it doesn't support empty set
return "set([%s])"
class SettingsReferenceSerializer(BaseSerializer):
def serialize(self):
return "settings.%s" % self.value.setting_name, {"from django.conf import settings"}
class TextTypeSerializer(BaseSerializer):
def serialize(self):
value_repr = repr(self.value)
if six.PY2:
# Strip the `u` prefix since we're importing unicode_literals
value_repr = value_repr[1:]
return value_repr, set()
class TimedeltaSerializer(BaseSerializer):
def serialize(self):
return repr(self.value), {"import datetime"}
class TimeSerializer(BaseSerializer):
def serialize(self):
value_repr = repr(self.value)
if isinstance(self.value, datetime_safe.time):
value_repr = "datetime.%s" % value_repr
return value_repr, {"import datetime"}
class TupleSerializer(BaseSequenceSerializer):
def _format(self):
# When len(value)==0, the empty tuple should be serialized as "()",
# not "(,)" because (,) is invalid Python syntax.
return "(%s)" if len(self.value) != 1 else "(%s,)"
class TypeSerializer(BaseSerializer):
def serialize(self):
special_cases = [
(models.Model, "models.Model", []),
]
for case, string, imports in special_cases:
if case is self.value:
return string, set(imports)
if hasattr(self.value, "__module__"):
module = self.value.__module__
if module == six.moves.builtins.__name__:
return self.value.__name__, set()
else:
return "%s.%s" % (module, self.value.__name__), {"import %s" % module}
def serializer_factory(value):
from django.db.migrations.writer import SettingsReference
if isinstance(value, Promise):
value = force_text(value)
elif isinstance(value, LazyObject):
# The unwrapped value is returned as the first item of the arguments
# tuple.
value = value.__reduce__()[1][0]
if isinstance(value, models.Field):
return ModelFieldSerializer(value)
if isinstance(value, models.manager.BaseManager):
return ModelManagerSerializer(value)
if isinstance(value, Operation):
return OperationSerializer(value)
if isinstance(value, type):
return TypeSerializer(value)
# Anything that knows how to deconstruct itself.
if hasattr(value, 'deconstruct'):
return DeconstructableSerializer(value)
# Unfortunately some of these are order-dependent.
if isinstance(value, frozenset):
return FrozensetSerializer(value)
if isinstance(value, list):
return SequenceSerializer(value)
if isinstance(value, set):
return SetSerializer(value)
if isinstance(value, tuple):
return TupleSerializer(value)
if isinstance(value, dict):
return DictionarySerializer(value)
if enum and isinstance(value, enum.Enum):
return EnumSerializer(value)
if isinstance(value, datetime.datetime):
return DatetimeSerializer(value)
if isinstance(value, datetime.date):
return DateSerializer(value)
if isinstance(value, datetime.time):
return TimeSerializer(value)
if isinstance(value, datetime.timedelta):
return TimedeltaSerializer(value)
if isinstance(value, SettingsReference):
return SettingsReferenceSerializer(value)
if isinstance(value, float):
return FloatSerializer(value)
if isinstance(value, six.integer_types + (bool, type(None))):
return BaseSimpleSerializer(value)
if isinstance(value, six.binary_type):
return ByteTypeSerializer(value)
if isinstance(value, six.text_type):
return TextTypeSerializer(value)
if isinstance(value, decimal.Decimal):
return DecimalSerializer(value)
if isinstance(value, functools.partial):
return FunctoolsPartialSerializer(value)
if isinstance(value, (types.FunctionType, types.BuiltinFunctionType)):
return FunctionTypeSerializer(value)
if isinstance(value, collections.Iterable):
return IterableSerializer(value)
if isinstance(value, (COMPILED_REGEX_TYPE, RegexObject)):
return RegexSerializer(value)
raise ValueError(
"Cannot serialize: %r\nThere are some values Django cannot serialize into "
"migration files.\nFor more, see https://docs.djangoproject.com/en/%s/"
"topics/migrations/#migration-serializing" % (value, get_docs_version())
)
|
bsd-3-clause
|
KaranToor/MA450
|
google-cloud-sdk/.install/.backup/lib/surface/debug/snapshots/delete.py
|
6
|
3109
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Delete command for gcloud debug snapshots command group."""
import StringIO
from googlecloudsdk.api_lib.debug import debug
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.debug import flags
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core.console import console_io
from googlecloudsdk.core.resource import resource_printer
class Delete(base.DeleteCommand):
"""Delete debug snapshots.
This command deletes snapshots from a Cloud Debugger debug target. It will
ask for confirmation before deleting any snapshots. To suppress confirmation,
use the global --quiet option.
"""
@staticmethod
def Args(parser):
flags.AddIdOptions(parser, 'snapshot', 'snapshots', 'deleted')
parser.add_argument(
'--all-users', action='store_true', default=False,
help="""\
If set, matching snapshots from all users will be deleted, rather
than only snapshots created by the current user.
""")
parser.add_argument(
'--include-inactive', action='store_true', default=False,
help="""\
If set, also delete snapshots which have been completed. By default,
only pending snapshots will be deleted.
""")
def Run(self, args):
"""Run the delete command."""
project_id = properties.VALUES.core.project.Get(required=True)
debugger = debug.Debugger(project_id)
debuggee = debugger.FindDebuggee(args.target)
snapshots = debuggee.ListBreakpoints(
args.location, resource_ids=args.ids,
include_all_users=args.all_users,
include_inactive=args.include_inactive,
restrict_to_type=debugger.SNAPSHOT_TYPE)
if snapshots:
snapshot_list = StringIO.StringIO()
resource_printer.Print(
snapshots, 'table(location, condition, id)', snapshot_list)
console_io.PromptContinue(
message=(
'This command will delete the following snapshots:'
'\n\n{0}\n'.format(snapshot_list.getvalue())),
cancel_on_no=True)
for s in snapshots:
debuggee.DeleteBreakpoint(s.id)
# Guaranteed we have at least one snapshot, since ListMatchingBreakpoints
# would raise an exception otherwise.
if len(snapshots) == 1:
log.status.write('Deleted 1 snapshot.\n')
else:
log.status.write('Deleted {0} snapshots.\n'.format(len(snapshots)))
return snapshots
def Collection(self):
return 'debug.snapshots'
|
apache-2.0
|
gcoop-libre/ansible-lookup-plugin-pass
|
lookup_plugins/pass.py
|
1
|
5820
|
#
# This script comes with ABSOLUTELY NO WARRANTY, use at own risk
# Copyright (C) 2017 mpv <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Ansible lookup plugin for ZXC24 password manager [0].
#
# To change the default password store set ANSIBLE_PASS_PASSWORD_STORE_DIR
# environment variable.
#
# If the pass doesn't exist in the store it's generated. It accepts two extra
# parameters: length and symbols (if symbols is True or yes -n is appended to
# the pass generate command).
#
# example: {{ lookup('pass', 'path/to/site lenght=20 symbols=False) }}
#
# [0] https://www.passwordstore.org/
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
import subprocess
import os
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.parsing.splitter import parse_kv
PASSWORD_STORE_DIR = '~/.password-store'
if os.getenv('ANSIBLE_PASS_PASSWORD_STORE_DIR') is not None:
PASSWORD_STORE_DIR = os.environ['ANSIBLE_PASS_PASSWORD_STORE_DIR']
PASS_EXEC = 'PASSWORD_STORE_DIR=%s pass' % PASSWORD_STORE_DIR
DEFAULT_LENGTH = 32
VALID_PARAMS = frozenset(('length', 'symbols', 'regenerate'))
def _parse_parameters(term):
# Hacky parsing of params taken from password lookup.
first_split = term.split(' ', 1)
if len(first_split) <= 1:
# Only a single argument given, therefore it's a path
name = term
params = dict()
else:
name = first_split[0]
params = parse_kv(first_split[1])
if '_raw_params' in params:
# Spaces in the path?
name = ' '.join((name, params['_raw_params']))
del params['_raw_params']
# Check that we parsed the params correctly
if not term.startswith(name):
# Likely, the user had a non parameter following a parameter.
# Reject this as a user typo
raise AnsibleError('Unrecognized value after key=value parameters given to password lookup')
# No _raw_params means we already found the complete path when
# we split it initially
# Check for invalid parameters. Probably a user typo
invalid_params = frozenset(params.keys()).difference(VALID_PARAMS)
if invalid_params:
raise AnsibleError('Unrecognized parameter(s) given to password lookup: %s' % ', '.join(invalid_params))
# Set defaults
params['length'] = int(params.get('length', DEFAULT_LENGTH))
symbols = params.get('symbols', 'False')
if symbols.lower() in ['true', 'yes']:
params['symbols'] = True
else:
params['symbols'] = False
regenerate = params.get('regenerate', 'False')
if regenerate.lower() in ['true', 'yes']:
params['regenerate'] = True
else:
params['regenerate'] = False
return name, params
def get_password(path):
"""Get password from pass."""
command = '%s show %s' % (PASS_EXEC, path)
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode == 0:
return stdout.splitlines()[0].decode('utf-8')
raise Exception(stderr)
def generate_password(path, length, symbols, force=False):
"""Generate password using pass."""
command = '%s generate %s %s' % (PASS_EXEC, path, length)
display.vvv('COMMAND: %s' % command)
if not symbols:
command = command + ' -n'
if force:
command = command + ' -f'
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode != 0:
raise Exception(stderr)
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
ret = []
for term in terms:
'''
http://docs.python.org/2/library/subprocess.html#popen-constructor
The shell argument (which defaults to False) specifies whether to use the
shell as the program to execute. If shell is True, it is recommended to pass
args as a string rather than as a sequence
https://github.com/ansible/ansible/issues/6550
'''
name, params = _parse_parameters(term)
if params['regenerate']:
try:
generate_password(name, params['length'], params['symbols'], True)
display.vvv('Generated password for %s' % name)
except Exception as e:
raise AnsibleError("lookup_plugin.pass(%s) returned %s" % (term, e.message))
try:
password = get_password(term)
except:
try:
generate_password(name, params['length'], params['symbols'])
display.vvv('Generated password for %s' % name)
password = get_password(name)
except Exception as e:
raise AnsibleError("lookup_plugin.pass(%s) returned %s" % (term, e.message))
ret.append(password)
return ret
|
gpl-3.0
|
suyashphadtare/vestasi-erp-final
|
erpnext/accounts/doctype/purchase_invoice/test_purchase_invoice.py
|
25
|
8458
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest
import frappe
import frappe.model
import json
from frappe.utils import cint
import frappe.defaults
from erpnext.stock.doctype.purchase_receipt.test_purchase_receipt import set_perpetual_inventory, \
test_records as pr_test_records
test_dependencies = ["Item", "Cost Center"]
test_ignore = ["Serial No"]
class TestPurchaseInvoice(unittest.TestCase):
def test_gl_entries_without_auto_accounting_for_stock(self):
set_perpetual_inventory(0)
self.assertTrue(not cint(frappe.defaults.get_global_default("auto_accounting_for_stock")))
wrapper = frappe.copy_doc(test_records[0])
wrapper.insert()
wrapper.submit()
wrapper.load_from_db()
dl = wrapper
expected_gl_entries = {
"_Test Supplier - _TC": [0, 1512.30],
"_Test Account Cost for Goods Sold - _TC": [1250, 0],
"_Test Account Shipping Charges - _TC": [100, 0],
"_Test Account Excise Duty - _TC": [140, 0],
"_Test Account Education Cess - _TC": [2.8, 0],
"_Test Account S&H Education Cess - _TC": [1.4, 0],
"_Test Account CST - _TC": [29.88, 0],
"_Test Account VAT - _TC": [156.25, 0],
"_Test Account Discount - _TC": [0, 168.03],
}
gl_entries = frappe.db.sql("""select account, debit, credit from `tabGL Entry`
where voucher_type = 'Purchase Invoice' and voucher_no = %s""", dl.name, as_dict=1)
for d in gl_entries:
self.assertEqual([d.debit, d.credit], expected_gl_entries.get(d.account))
def test_gl_entries_with_auto_accounting_for_stock(self):
set_perpetual_inventory(1)
self.assertEqual(cint(frappe.defaults.get_global_default("auto_accounting_for_stock")), 1)
pi = frappe.copy_doc(test_records[1])
pi.insert()
pi.submit()
gl_entries = frappe.db.sql("""select account, debit, credit
from `tabGL Entry` where voucher_type='Purchase Invoice' and voucher_no=%s
order by account asc""", pi.name, as_dict=1)
self.assertTrue(gl_entries)
expected_values = sorted([
["_Test Supplier - _TC", 0, 720],
["Stock Received But Not Billed - _TC", 750.0, 0],
["Expenses Included In Valuation - _TC", 0.0, 250.0],
["_Test Account Shipping Charges - _TC", 100.0, 0],
["_Test Account VAT - _TC", 120.0, 0],
])
for i, gle in enumerate(gl_entries):
self.assertEquals(expected_values[i][0], gle.account)
self.assertEquals(expected_values[i][1], gle.debit)
self.assertEquals(expected_values[i][2], gle.credit)
set_perpetual_inventory(0)
def test_gl_entries_with_auto_accounting_for_stock_against_pr(self):
set_perpetual_inventory(1)
self.assertEqual(cint(frappe.defaults.get_global_default("auto_accounting_for_stock")), 1)
pr = frappe.copy_doc(pr_test_records[0])
pr.submit()
pi = frappe.copy_doc(test_records[1])
for d in pi.get("entries"):
d.purchase_receipt = pr.name
pi.insert()
pi.submit()
gl_entries = frappe.db.sql("""select account, debit, credit
from `tabGL Entry` where voucher_type='Purchase Invoice' and voucher_no=%s
order by account asc""", pi.name, as_dict=1)
self.assertTrue(gl_entries)
expected_values = sorted([
["_Test Supplier - _TC", 0, 720],
["Stock Received But Not Billed - _TC", 500.0, 0],
["_Test Account Shipping Charges - _TC", 100.0, 0],
["_Test Account VAT - _TC", 120.0, 0],
])
for i, gle in enumerate(gl_entries):
self.assertEquals(expected_values[i][0], gle.account)
self.assertEquals(expected_values[i][1], gle.debit)
self.assertEquals(expected_values[i][2], gle.credit)
set_perpetual_inventory(0)
def test_gl_entries_with_aia_for_non_stock_items(self):
set_perpetual_inventory()
self.assertEqual(cint(frappe.defaults.get_global_default("auto_accounting_for_stock")), 1)
pi = frappe.copy_doc(test_records[1])
pi.get("entries")[0].item_code = "_Test Non Stock Item"
pi.get("entries")[0].expense_account = "_Test Account Cost for Goods Sold - _TC"
pi.get("other_charges").pop(0)
pi.get("other_charges").pop(1)
pi.insert()
pi.submit()
gl_entries = frappe.db.sql("""select account, debit, credit
from `tabGL Entry` where voucher_type='Purchase Invoice' and voucher_no=%s
order by account asc""", pi.name, as_dict=1)
self.assertTrue(gl_entries)
expected_values = sorted([
["_Test Supplier - _TC", 0, 620],
["_Test Account Cost for Goods Sold - _TC", 500.0, 0],
["_Test Account VAT - _TC", 120.0, 0],
])
for i, gle in enumerate(gl_entries):
self.assertEquals(expected_values[i][0], gle.account)
self.assertEquals(expected_values[i][1], gle.debit)
self.assertEquals(expected_values[i][2], gle.credit)
set_perpetual_inventory(0)
def test_purchase_invoice_calculation(self):
wrapper = frappe.copy_doc(test_records[0])
wrapper.insert()
wrapper.load_from_db()
expected_values = [
["_Test Item Home Desktop 100", 90, 59],
["_Test Item Home Desktop 200", 135, 177]
]
for i, item in enumerate(wrapper.get("entries")):
self.assertEqual(item.item_code, expected_values[i][0])
self.assertEqual(item.item_tax_amount, expected_values[i][1])
self.assertEqual(item.valuation_rate, expected_values[i][2])
self.assertEqual(wrapper.net_total, 1250)
# tax amounts
expected_values = [
["_Test Account Shipping Charges - _TC", 100, 1350],
["_Test Account Customs Duty - _TC", 125, 1350],
["_Test Account Excise Duty - _TC", 140, 1490],
["_Test Account Education Cess - _TC", 2.8, 1492.8],
["_Test Account S&H Education Cess - _TC", 1.4, 1494.2],
["_Test Account CST - _TC", 29.88, 1524.08],
["_Test Account VAT - _TC", 156.25, 1680.33],
["_Test Account Discount - _TC", 168.03, 1512.30],
]
for i, tax in enumerate(wrapper.get("other_charges")):
self.assertEqual(tax.account_head, expected_values[i][0])
self.assertEqual(tax.tax_amount, expected_values[i][1])
self.assertEqual(tax.total, expected_values[i][2])
def test_purchase_invoice_with_subcontracted_item(self):
wrapper = frappe.copy_doc(test_records[0])
wrapper.get("entries")[0].item_code = "_Test FG Item"
wrapper.insert()
wrapper.load_from_db()
expected_values = [
["_Test FG Item", 90, 59],
["_Test Item Home Desktop 200", 135, 177]
]
for i, item in enumerate(wrapper.get("entries")):
self.assertEqual(item.item_code, expected_values[i][0])
self.assertEqual(item.item_tax_amount, expected_values[i][1])
self.assertEqual(item.valuation_rate, expected_values[i][2])
self.assertEqual(wrapper.net_total, 1250)
# tax amounts
expected_values = [
["_Test Account Shipping Charges - _TC", 100, 1350],
["_Test Account Customs Duty - _TC", 125, 1350],
["_Test Account Excise Duty - _TC", 140, 1490],
["_Test Account Education Cess - _TC", 2.8, 1492.8],
["_Test Account S&H Education Cess - _TC", 1.4, 1494.2],
["_Test Account CST - _TC", 29.88, 1524.08],
["_Test Account VAT - _TC", 156.25, 1680.33],
["_Test Account Discount - _TC", 168.03, 1512.30],
]
for i, tax in enumerate(wrapper.get("other_charges")):
self.assertEqual(tax.account_head, expected_values[i][0])
self.assertEqual(tax.tax_amount, expected_values[i][1])
self.assertEqual(tax.total, expected_values[i][2])
def test_purchase_invoice_with_advance(self):
from erpnext.accounts.doctype.journal_voucher.test_journal_voucher \
import test_records as jv_test_records
jv = frappe.copy_doc(jv_test_records[1])
jv.insert()
jv.submit()
pi = frappe.copy_doc(test_records[0])
pi.append("advance_allocation_details", {
"journal_voucher": jv.name,
"jv_detail_no": jv.get("entries")[0].name,
"advance_amount": 400,
"allocated_amount": 300,
"remarks": jv.remark
})
pi.insert()
pi.submit()
pi.load_from_db()
self.assertTrue(frappe.db.sql("""select name from `tabJournal Voucher Detail`
where against_voucher=%s""", pi.name))
self.assertTrue(frappe.db.sql("""select name from `tabJournal Voucher Detail`
where against_voucher=%s and debit=300""", pi.name))
self.assertEqual(pi.outstanding_amount, 1212.30)
pi.cancel()
self.assertTrue(not frappe.db.sql("""select name from `tabJournal Voucher Detail`
where against_voucher=%s""", pi.name))
def test_recurring_invoice(self):
from erpnext.controllers.tests.test_recurring_document import test_recurring_document
test_recurring_document(self, test_records)
test_records = frappe.get_test_records('Purchase Invoice')
|
agpl-3.0
|
collbb/ThinkStats2
|
code/first.py
|
79
|
4169
|
"""This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import math
import numpy as np
import nsfg
import thinkstats2
import thinkplot
def MakeFrames():
"""Reads pregnancy data and partitions first babies and others.
returns: DataFrames (all live births, first babies, others)
"""
preg = nsfg.ReadFemPreg()
live = preg[preg.outcome == 1]
firsts = live[live.birthord == 1]
others = live[live.birthord != 1]
assert len(live) == 9148
assert len(firsts) == 4413
assert len(others) == 4735
return live, firsts, others
def Summarize(live, firsts, others):
"""Print various summary statistics."""
mean = live.prglngth.mean()
var = live.prglngth.var()
std = live.prglngth.std()
print('Live mean', mean)
print('Live variance', var)
print('Live std', std)
mean1 = firsts.prglngth.mean()
mean2 = others.prglngth.mean()
var1 = firsts.prglngth.var()
var2 = others.prglngth.var()
print('Mean')
print('First babies', mean1)
print('Others', mean2)
print('Variance')
print('First babies', var1)
print('Others', var2)
print('Difference in weeks', mean1 - mean2)
print('Difference in hours', (mean1 - mean2) * 7 * 24)
print('Difference relative to 39 weeks', (mean1 - mean2) / 39 * 100)
d = thinkstats2.CohenEffectSize(firsts.prglngth, others.prglngth)
print('Cohen d', d)
def PrintExtremes(live):
"""Plots the histogram of pregnancy lengths and prints the extremes.
live: DataFrame of live births
"""
hist = thinkstats2.Hist(live.prglngth)
thinkplot.Hist(hist, label='live births')
thinkplot.Save(root='first_nsfg_hist_live',
title='Histogram',
xlabel='weeks',
ylabel='frequency')
print('Shortest lengths:')
for weeks, freq in hist.Smallest(10):
print(weeks, freq)
print('Longest lengths:')
for weeks, freq in hist.Largest(10):
print(weeks, freq)
def MakeHists(live):
"""Plot Hists for live births
live: DataFrame
others: DataFrame
"""
hist = thinkstats2.Hist(live.birthwgt_lb, label='birthwgt_lb')
thinkplot.Hist(hist)
thinkplot.Save(root='first_wgt_lb_hist',
xlabel='pounds',
ylabel='frequency',
axis=[-1, 14, 0, 3200])
hist = thinkstats2.Hist(live.birthwgt_oz, label='birthwgt_oz')
thinkplot.Hist(hist)
thinkplot.Save(root='first_wgt_oz_hist',
xlabel='ounces',
ylabel='frequency',
axis=[-1, 16, 0, 1200])
hist = thinkstats2.Hist(np.floor(live.agepreg), label='agepreg')
thinkplot.Hist(hist)
thinkplot.Save(root='first_agepreg_hist',
xlabel='years',
ylabel='frequency')
hist = thinkstats2.Hist(live.prglngth, label='prglngth')
thinkplot.Hist(hist)
thinkplot.Save(root='first_prglngth_hist',
xlabel='weeks',
ylabel='frequency',
axis=[-1, 53, 0, 5000])
def MakeComparison(firsts, others):
"""Plots histograms of pregnancy length for first babies and others.
firsts: DataFrame
others: DataFrame
"""
first_hist = thinkstats2.Hist(firsts.prglngth, label='first')
other_hist = thinkstats2.Hist(others.prglngth, label='other')
width = 0.45
thinkplot.PrePlot(2)
thinkplot.Hist(first_hist, align='right', width=width)
thinkplot.Hist(other_hist, align='left', width=width)
thinkplot.Save(root='first_nsfg_hist',
title='Histogram',
xlabel='weeks',
ylabel='frequency',
axis=[27, 46, 0, 2700])
def main(script):
live, firsts, others = MakeFrames()
MakeHists(live)
PrintExtremes(live)
MakeComparison(firsts, others)
Summarize(live, firsts, others)
if __name__ == '__main__':
import sys
main(*sys.argv)
|
gpl-3.0
|
shams169/pythonProject
|
env/lib/python3.6/site-packages/pip/_vendor/requests/packages/chardet/codingstatemachine.py
|
2931
|
2318
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .constants import eStart
from .compat import wrap_ord
class CodingStateMachine:
def __init__(self, sm):
self._mModel = sm
self._mCurrentBytePos = 0
self._mCurrentCharLen = 0
self.reset()
def reset(self):
self._mCurrentState = eStart
def next_state(self, c):
# for each byte we get its class
# if it is first byte, we also get byte length
# PY3K: aBuf is a byte stream, so c is an int, not a byte
byteCls = self._mModel['classTable'][wrap_ord(c)]
if self._mCurrentState == eStart:
self._mCurrentBytePos = 0
self._mCurrentCharLen = self._mModel['charLenTable'][byteCls]
# from byte's class and stateTable, we get its next state
curr_state = (self._mCurrentState * self._mModel['classFactor']
+ byteCls)
self._mCurrentState = self._mModel['stateTable'][curr_state]
self._mCurrentBytePos += 1
return self._mCurrentState
def get_current_charlen(self):
return self._mCurrentCharLen
def get_coding_state_machine(self):
return self._mModel['name']
|
mit
|
vitan/hue
|
desktop/core/ext-py/Django-1.6.10/tests/null_fk/models.py
|
115
|
1316
|
"""
Regression tests for proper working of ForeignKey(null=True).
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
class SystemDetails(models.Model):
details = models.TextField()
class SystemInfo(models.Model):
system_details = models.ForeignKey(SystemDetails)
system_name = models.CharField(max_length=32)
class Forum(models.Model):
system_info = models.ForeignKey(SystemInfo)
forum_name = models.CharField(max_length=32)
@python_2_unicode_compatible
class Post(models.Model):
forum = models.ForeignKey(Forum, null=True)
title = models.CharField(max_length=32)
def __str__(self):
return self.title
@python_2_unicode_compatible
class Comment(models.Model):
post = models.ForeignKey(Post, null=True)
comment_text = models.CharField(max_length=250)
class Meta:
ordering = ('comment_text',)
def __str__(self):
return self.comment_text
# Ticket 15823
class Item(models.Model):
title = models.CharField(max_length=100)
class PropertyValue(models.Model):
label = models.CharField(max_length=100)
class Property(models.Model):
item = models.ForeignKey(Item, related_name='props')
key = models.CharField(max_length=100)
value = models.ForeignKey(PropertyValue, null=True)
|
apache-2.0
|
abloomston/sympy
|
sympy/polys/tests/test_polyoptions.py
|
59
|
11994
|
"""Tests for options manager for :class:`Poly` and public API functions. """
from sympy.polys.polyoptions import (
Options, Expand, Gens, Wrt, Sort, Order, Field, Greedy, Domain,
Split, Gaussian, Extension, Modulus, Symmetric, Strict, Auto,
Frac, Formal, Polys, Include, All, Gen, Symbols, Method)
from sympy.polys.orderings import lex
from sympy.polys.domains import FF, GF, ZZ, QQ, EX
from sympy.polys.polyerrors import OptionError, GeneratorsError
from sympy import Integer, Symbol, I, sqrt
from sympy.utilities.pytest import raises
from sympy.abc import x, y, z
def test_Options_clone():
opt = Options((x, y, z), {'domain': 'ZZ'})
assert opt.gens == (x, y, z)
assert opt.domain == ZZ
assert ('order' in opt) is False
new_opt = opt.clone({'gens': (x, y), 'order': 'lex'})
assert opt.gens == (x, y, z)
assert opt.domain == ZZ
assert ('order' in opt) is False
assert new_opt.gens == (x, y)
assert new_opt.domain == ZZ
assert ('order' in new_opt) is True
def test_Expand_preprocess():
assert Expand.preprocess(False) is False
assert Expand.preprocess(True) is True
assert Expand.preprocess(0) is False
assert Expand.preprocess(1) is True
raises(OptionError, lambda: Expand.preprocess(x))
def test_Expand_postprocess():
opt = {'expand': True}
Expand.postprocess(opt)
assert opt == {'expand': True}
def test_Gens_preprocess():
assert Gens.preprocess((None,)) == ()
assert Gens.preprocess((x, y, z)) == (x, y, z)
assert Gens.preprocess(((x, y, z),)) == (x, y, z)
a = Symbol('a', commutative=False)
raises(GeneratorsError, lambda: Gens.preprocess((x, x, y)))
raises(GeneratorsError, lambda: Gens.preprocess((x, y, a)))
def test_Gens_postprocess():
opt = {'gens': (x, y)}
Gens.postprocess(opt)
assert opt == {'gens': (x, y)}
def test_Wrt_preprocess():
assert Wrt.preprocess(x) == ['x']
assert Wrt.preprocess('') == []
assert Wrt.preprocess(' ') == []
assert Wrt.preprocess('x,y') == ['x', 'y']
assert Wrt.preprocess('x y') == ['x', 'y']
assert Wrt.preprocess('x, y') == ['x', 'y']
assert Wrt.preprocess('x , y') == ['x', 'y']
assert Wrt.preprocess(' x, y') == ['x', 'y']
assert Wrt.preprocess(' x, y') == ['x', 'y']
assert Wrt.preprocess([x, y]) == ['x', 'y']
raises(OptionError, lambda: Wrt.preprocess(','))
raises(OptionError, lambda: Wrt.preprocess(0))
def test_Wrt_postprocess():
opt = {'wrt': ['x']}
Wrt.postprocess(opt)
assert opt == {'wrt': ['x']}
def test_Sort_preprocess():
assert Sort.preprocess([x, y, z]) == ['x', 'y', 'z']
assert Sort.preprocess((x, y, z)) == ['x', 'y', 'z']
assert Sort.preprocess('x > y > z') == ['x', 'y', 'z']
assert Sort.preprocess('x>y>z') == ['x', 'y', 'z']
raises(OptionError, lambda: Sort.preprocess(0))
raises(OptionError, lambda: Sort.preprocess(set([x, y, z])))
def test_Sort_postprocess():
opt = {'sort': 'x > y'}
Sort.postprocess(opt)
assert opt == {'sort': 'x > y'}
def test_Order_preprocess():
assert Order.preprocess('lex') == lex
def test_Order_postprocess():
opt = {'order': True}
Order.postprocess(opt)
assert opt == {'order': True}
def test_Field_preprocess():
assert Field.preprocess(False) is False
assert Field.preprocess(True) is True
assert Field.preprocess(0) is False
assert Field.preprocess(1) is True
raises(OptionError, lambda: Field.preprocess(x))
def test_Field_postprocess():
opt = {'field': True}
Field.postprocess(opt)
assert opt == {'field': True}
def test_Greedy_preprocess():
assert Greedy.preprocess(False) is False
assert Greedy.preprocess(True) is True
assert Greedy.preprocess(0) is False
assert Greedy.preprocess(1) is True
raises(OptionError, lambda: Greedy.preprocess(x))
def test_Greedy_postprocess():
opt = {'greedy': True}
Greedy.postprocess(opt)
assert opt == {'greedy': True}
def test_Domain_preprocess():
assert Domain.preprocess(ZZ) == ZZ
assert Domain.preprocess(QQ) == QQ
assert Domain.preprocess(EX) == EX
assert Domain.preprocess(FF(2)) == FF(2)
assert Domain.preprocess(ZZ[x, y]) == ZZ[x, y]
assert Domain.preprocess('Z') == ZZ
assert Domain.preprocess('Q') == QQ
assert Domain.preprocess('ZZ') == ZZ
assert Domain.preprocess('QQ') == QQ
assert Domain.preprocess('EX') == EX
assert Domain.preprocess('FF(23)') == FF(23)
assert Domain.preprocess('GF(23)') == GF(23)
raises(OptionError, lambda: Domain.preprocess('Z[]'))
assert Domain.preprocess('Z[x]') == ZZ[x]
assert Domain.preprocess('Q[x]') == QQ[x]
assert Domain.preprocess('ZZ[x]') == ZZ[x]
assert Domain.preprocess('QQ[x]') == QQ[x]
assert Domain.preprocess('Z[x,y]') == ZZ[x, y]
assert Domain.preprocess('Q[x,y]') == QQ[x, y]
assert Domain.preprocess('ZZ[x,y]') == ZZ[x, y]
assert Domain.preprocess('QQ[x,y]') == QQ[x, y]
raises(OptionError, lambda: Domain.preprocess('Z()'))
assert Domain.preprocess('Z(x)') == ZZ.frac_field(x)
assert Domain.preprocess('Q(x)') == QQ.frac_field(x)
assert Domain.preprocess('ZZ(x)') == ZZ.frac_field(x)
assert Domain.preprocess('QQ(x)') == QQ.frac_field(x)
assert Domain.preprocess('Z(x,y)') == ZZ.frac_field(x, y)
assert Domain.preprocess('Q(x,y)') == QQ.frac_field(x, y)
assert Domain.preprocess('ZZ(x,y)') == ZZ.frac_field(x, y)
assert Domain.preprocess('QQ(x,y)') == QQ.frac_field(x, y)
assert Domain.preprocess('Q<I>') == QQ.algebraic_field(I)
assert Domain.preprocess('QQ<I>') == QQ.algebraic_field(I)
assert Domain.preprocess('Q<sqrt(2), I>') == QQ.algebraic_field(sqrt(2), I)
assert Domain.preprocess(
'QQ<sqrt(2), I>') == QQ.algebraic_field(sqrt(2), I)
raises(OptionError, lambda: Domain.preprocess('abc'))
def test_Domain_postprocess():
raises(GeneratorsError, lambda: Domain.postprocess({'gens': (x, y),
'domain': ZZ[y, z]}))
raises(GeneratorsError, lambda: Domain.postprocess({'gens': (),
'domain': EX}))
raises(GeneratorsError, lambda: Domain.postprocess({'domain': EX}))
def test_Split_preprocess():
assert Split.preprocess(False) is False
assert Split.preprocess(True) is True
assert Split.preprocess(0) is False
assert Split.preprocess(1) is True
raises(OptionError, lambda: Split.preprocess(x))
def test_Split_postprocess():
raises(NotImplementedError, lambda: Split.postprocess({'split': True}))
def test_Gaussian_preprocess():
assert Gaussian.preprocess(False) is False
assert Gaussian.preprocess(True) is True
assert Gaussian.preprocess(0) is False
assert Gaussian.preprocess(1) is True
raises(OptionError, lambda: Gaussian.preprocess(x))
def test_Gaussian_postprocess():
opt = {'gaussian': True}
Gaussian.postprocess(opt)
assert opt == {
'gaussian': True,
'extension': set([I]),
'domain': QQ.algebraic_field(I),
}
def test_Extension_preprocess():
assert Extension.preprocess(True) is True
assert Extension.preprocess(1) is True
assert Extension.preprocess([]) is None
assert Extension.preprocess(sqrt(2)) == set([sqrt(2)])
assert Extension.preprocess([sqrt(2)]) == set([sqrt(2)])
assert Extension.preprocess([sqrt(2), I]) == set([sqrt(2), I])
raises(OptionError, lambda: Extension.preprocess(False))
raises(OptionError, lambda: Extension.preprocess(0))
def test_Extension_postprocess():
opt = {'extension': set([sqrt(2)])}
Extension.postprocess(opt)
assert opt == {
'extension': set([sqrt(2)]),
'domain': QQ.algebraic_field(sqrt(2)),
}
opt = {'extension': True}
Extension.postprocess(opt)
assert opt == {'extension': True}
def test_Modulus_preprocess():
assert Modulus.preprocess(23) == 23
assert Modulus.preprocess(Integer(23)) == 23
raises(OptionError, lambda: Modulus.preprocess(0))
raises(OptionError, lambda: Modulus.preprocess(x))
def test_Modulus_postprocess():
opt = {'modulus': 5}
Modulus.postprocess(opt)
assert opt == {
'modulus': 5,
'domain': FF(5),
}
opt = {'modulus': 5, 'symmetric': False}
Modulus.postprocess(opt)
assert opt == {
'modulus': 5,
'domain': FF(5, False),
'symmetric': False,
}
def test_Symmetric_preprocess():
assert Symmetric.preprocess(False) is False
assert Symmetric.preprocess(True) is True
assert Symmetric.preprocess(0) is False
assert Symmetric.preprocess(1) is True
raises(OptionError, lambda: Symmetric.preprocess(x))
def test_Symmetric_postprocess():
opt = {'symmetric': True}
Symmetric.postprocess(opt)
assert opt == {'symmetric': True}
def test_Strict_preprocess():
assert Strict.preprocess(False) is False
assert Strict.preprocess(True) is True
assert Strict.preprocess(0) is False
assert Strict.preprocess(1) is True
raises(OptionError, lambda: Strict.preprocess(x))
def test_Strict_postprocess():
opt = {'strict': True}
Strict.postprocess(opt)
assert opt == {'strict': True}
def test_Auto_preprocess():
assert Auto.preprocess(False) is False
assert Auto.preprocess(True) is True
assert Auto.preprocess(0) is False
assert Auto.preprocess(1) is True
raises(OptionError, lambda: Auto.preprocess(x))
def test_Auto_postprocess():
opt = {'auto': True}
Auto.postprocess(opt)
assert opt == {'auto': True}
def test_Frac_preprocess():
assert Frac.preprocess(False) is False
assert Frac.preprocess(True) is True
assert Frac.preprocess(0) is False
assert Frac.preprocess(1) is True
raises(OptionError, lambda: Frac.preprocess(x))
def test_Frac_postprocess():
opt = {'frac': True}
Frac.postprocess(opt)
assert opt == {'frac': True}
def test_Formal_preprocess():
assert Formal.preprocess(False) is False
assert Formal.preprocess(True) is True
assert Formal.preprocess(0) is False
assert Formal.preprocess(1) is True
raises(OptionError, lambda: Formal.preprocess(x))
def test_Formal_postprocess():
opt = {'formal': True}
Formal.postprocess(opt)
assert opt == {'formal': True}
def test_Polys_preprocess():
assert Polys.preprocess(False) is False
assert Polys.preprocess(True) is True
assert Polys.preprocess(0) is False
assert Polys.preprocess(1) is True
raises(OptionError, lambda: Polys.preprocess(x))
def test_Polys_postprocess():
opt = {'polys': True}
Polys.postprocess(opt)
assert opt == {'polys': True}
def test_Include_preprocess():
assert Include.preprocess(False) is False
assert Include.preprocess(True) is True
assert Include.preprocess(0) is False
assert Include.preprocess(1) is True
raises(OptionError, lambda: Include.preprocess(x))
def test_Include_postprocess():
opt = {'include': True}
Include.postprocess(opt)
assert opt == {'include': True}
def test_All_preprocess():
assert All.preprocess(False) is False
assert All.preprocess(True) is True
assert All.preprocess(0) is False
assert All.preprocess(1) is True
raises(OptionError, lambda: All.preprocess(x))
def test_All_postprocess():
opt = {'all': True}
All.postprocess(opt)
assert opt == {'all': True}
def test_Gen_postprocess():
opt = {'gen': x}
Gen.postprocess(opt)
assert opt == {'gen': x}
def test_Symbols_preprocess():
raises(OptionError, lambda: Symbols.preprocess(x))
def test_Symbols_postprocess():
opt = {'symbols': [x, y, z]}
Symbols.postprocess(opt)
assert opt == {'symbols': [x, y, z]}
def test_Method_preprocess():
raises(OptionError, lambda: Method.preprocess(10))
def test_Method_postprocess():
opt = {'method': 'f5b'}
Method.postprocess(opt)
assert opt == {'method': 'f5b'}
|
bsd-3-clause
|
4eek/edx-platform
|
common/djangoapps/student/migrations/0002_text_to_varchar_and_indexes.py
|
188
|
9581
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'UserProfile.name'
db.alter_column('auth_userprofile', 'name', self.gf('django.db.models.fields.CharField')(max_length=255))
# Adding index on 'UserProfile', fields ['name']
db.create_index('auth_userprofile', ['name'])
# Changing field 'UserProfile.language'
db.alter_column('auth_userprofile', 'language', self.gf('django.db.models.fields.CharField')(max_length=255))
# Adding index on 'UserProfile', fields ['language']
db.create_index('auth_userprofile', ['language'])
# Changing field 'UserProfile.courseware'
db.alter_column('auth_userprofile', 'courseware', self.gf('django.db.models.fields.CharField')(max_length=255))
# Changing field 'UserProfile.meta'
db.alter_column('auth_userprofile', 'meta', self.gf('django.db.models.fields.CharField')(max_length=255))
# Changing field 'UserProfile.location'
db.alter_column('auth_userprofile', 'location', self.gf('django.db.models.fields.CharField')(max_length=255))
# Adding index on 'UserProfile', fields ['location']
db.create_index('auth_userprofile', ['location'])
def backwards(self, orm):
# Removing index on 'UserProfile', fields ['location']
db.delete_index('auth_userprofile', ['location'])
# Removing index on 'UserProfile', fields ['language']
db.delete_index('auth_userprofile', ['language'])
# Removing index on 'UserProfile', fields ['name']
db.delete_index('auth_userprofile', ['name'])
# Changing field 'UserProfile.name'
db.alter_column('auth_userprofile', 'name', self.gf('django.db.models.fields.TextField')())
# Changing field 'UserProfile.language'
db.alter_column('auth_userprofile', 'language', self.gf('django.db.models.fields.TextField')())
# Changing field 'UserProfile.courseware'
db.alter_column('auth_userprofile', 'courseware', self.gf('django.db.models.fields.TextField')())
# Changing field 'UserProfile.meta'
db.alter_column('auth_userprofile', 'meta', self.gf('django.db.models.fields.TextField')())
# Changing field 'UserProfile.location'
db.alter_column('auth_userprofile', 'location', self.gf('django.db.models.fields.TextField')())
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'student.registration': {
'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"},
'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'meta': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['student']
|
agpl-3.0
|
Kamik423/uni_plan
|
plan/plan/lib64/python3.4/site-packages/setuptools/dist.py
|
4
|
39660
|
__all__ = ['Distribution']
import re
import os
import warnings
import numbers
import distutils.log
import distutils.core
import distutils.cmd
import distutils.dist
import itertools
from collections import defaultdict
from distutils.errors import (
DistutilsOptionError, DistutilsPlatformError, DistutilsSetupError,
)
from distutils.util import rfc822_escape
from setuptools.extern import six
from setuptools.extern.six.moves import map
from pkg_resources.extern import packaging
from setuptools.depends import Require
from setuptools import windows_support
from setuptools.monkey import get_unpatched
from setuptools.config import parse_configuration
import pkg_resources
from .py36compat import Distribution_parse_config_files
__import__('pkg_resources.extern.packaging.specifiers')
__import__('pkg_resources.extern.packaging.version')
def _get_unpatched(cls):
warnings.warn("Do not call this function", DeprecationWarning)
return get_unpatched(cls)
# Based on Python 3.5 version
def write_pkg_file(self, file):
"""Write the PKG-INFO format data to a file object.
"""
version = '1.0'
if (self.provides or self.requires or self.obsoletes or
self.classifiers or self.download_url):
version = '1.1'
# Setuptools specific for PEP 345
if hasattr(self, 'python_requires'):
version = '1.2'
file.write('Metadata-Version: %s\n' % version)
file.write('Name: %s\n' % self.get_name())
file.write('Version: %s\n' % self.get_version())
file.write('Summary: %s\n' % self.get_description())
file.write('Home-page: %s\n' % self.get_url())
file.write('Author: %s\n' % self.get_contact())
file.write('Author-email: %s\n' % self.get_contact_email())
file.write('License: %s\n' % self.get_license())
if self.download_url:
file.write('Download-URL: %s\n' % self.download_url)
long_desc = rfc822_escape(self.get_long_description())
file.write('Description: %s\n' % long_desc)
keywords = ','.join(self.get_keywords())
if keywords:
file.write('Keywords: %s\n' % keywords)
self._write_list(file, 'Platform', self.get_platforms())
self._write_list(file, 'Classifier', self.get_classifiers())
# PEP 314
self._write_list(file, 'Requires', self.get_requires())
self._write_list(file, 'Provides', self.get_provides())
self._write_list(file, 'Obsoletes', self.get_obsoletes())
# Setuptools specific for PEP 345
if hasattr(self, 'python_requires'):
file.write('Requires-Python: %s\n' % self.python_requires)
# from Python 3.4
def write_pkg_info(self, base_dir):
"""Write the PKG-INFO file into the release tree.
"""
with open(os.path.join(base_dir, 'PKG-INFO'), 'w',
encoding='UTF-8') as pkg_info:
self.write_pkg_file(pkg_info)
sequence = tuple, list
def check_importable(dist, attr, value):
try:
ep = pkg_resources.EntryPoint.parse('x=' + value)
assert not ep.extras
except (TypeError, ValueError, AttributeError, AssertionError):
raise DistutilsSetupError(
"%r must be importable 'module:attrs' string (got %r)"
% (attr, value)
)
def assert_string_list(dist, attr, value):
"""Verify that value is a string list or None"""
try:
assert ''.join(value) != value
except (TypeError, ValueError, AttributeError, AssertionError):
raise DistutilsSetupError(
"%r must be a list of strings (got %r)" % (attr, value)
)
def check_nsp(dist, attr, value):
"""Verify that namespace packages are valid"""
ns_packages = value
assert_string_list(dist, attr, ns_packages)
for nsp in ns_packages:
if not dist.has_contents_for(nsp):
raise DistutilsSetupError(
"Distribution contains no modules or packages for " +
"namespace package %r" % nsp
)
parent, sep, child = nsp.rpartition('.')
if parent and parent not in ns_packages:
distutils.log.warn(
"WARNING: %r is declared as a package namespace, but %r"
" is not: please correct this in setup.py", nsp, parent
)
def check_extras(dist, attr, value):
"""Verify that extras_require mapping is valid"""
try:
list(itertools.starmap(_check_extra, value.items()))
except (TypeError, ValueError, AttributeError):
raise DistutilsSetupError(
"'extras_require' must be a dictionary whose values are "
"strings or lists of strings containing valid project/version "
"requirement specifiers."
)
def _check_extra(extra, reqs):
name, sep, marker = extra.partition(':')
if marker and pkg_resources.invalid_marker(marker):
raise DistutilsSetupError("Invalid environment marker: " + marker)
list(pkg_resources.parse_requirements(reqs))
def assert_bool(dist, attr, value):
"""Verify that value is True, False, 0, or 1"""
if bool(value) != value:
tmpl = "{attr!r} must be a boolean value (got {value!r})"
raise DistutilsSetupError(tmpl.format(attr=attr, value=value))
def check_requirements(dist, attr, value):
"""Verify that install_requires is a valid requirements list"""
try:
list(pkg_resources.parse_requirements(value))
except (TypeError, ValueError) as error:
tmpl = (
"{attr!r} must be a string or list of strings "
"containing valid project/version requirement specifiers; {error}"
)
raise DistutilsSetupError(tmpl.format(attr=attr, error=error))
def check_specifier(dist, attr, value):
"""Verify that value is a valid version specifier"""
try:
packaging.specifiers.SpecifierSet(value)
except packaging.specifiers.InvalidSpecifier as error:
tmpl = (
"{attr!r} must be a string "
"containing valid version specifiers; {error}"
)
raise DistutilsSetupError(tmpl.format(attr=attr, error=error))
def check_entry_points(dist, attr, value):
"""Verify that entry_points map is parseable"""
try:
pkg_resources.EntryPoint.parse_map(value)
except ValueError as e:
raise DistutilsSetupError(e)
def check_test_suite(dist, attr, value):
if not isinstance(value, six.string_types):
raise DistutilsSetupError("test_suite must be a string")
def check_package_data(dist, attr, value):
"""Verify that value is a dictionary of package names to glob lists"""
if isinstance(value, dict):
for k, v in value.items():
if not isinstance(k, str):
break
try:
iter(v)
except TypeError:
break
else:
return
raise DistutilsSetupError(
attr + " must be a dictionary mapping package names to lists of "
"wildcard patterns"
)
def check_packages(dist, attr, value):
for pkgname in value:
if not re.match(r'\w+(\.\w+)*', pkgname):
distutils.log.warn(
"WARNING: %r not a valid package name; please use only "
".-separated package names in setup.py", pkgname
)
_Distribution = get_unpatched(distutils.core.Distribution)
class Distribution(Distribution_parse_config_files, _Distribution):
"""Distribution with support for features, tests, and package data
This is an enhanced version of 'distutils.dist.Distribution' that
effectively adds the following new optional keyword arguments to 'setup()':
'install_requires' -- a string or sequence of strings specifying project
versions that the distribution requires when installed, in the format
used by 'pkg_resources.require()'. They will be installed
automatically when the package is installed. If you wish to use
packages that are not available in PyPI, or want to give your users an
alternate download location, you can add a 'find_links' option to the
'[easy_install]' section of your project's 'setup.cfg' file, and then
setuptools will scan the listed web pages for links that satisfy the
requirements.
'extras_require' -- a dictionary mapping names of optional "extras" to the
additional requirement(s) that using those extras incurs. For example,
this::
extras_require = dict(reST = ["docutils>=0.3", "reSTedit"])
indicates that the distribution can optionally provide an extra
capability called "reST", but it can only be used if docutils and
reSTedit are installed. If the user installs your package using
EasyInstall and requests one of your extras, the corresponding
additional requirements will be installed if needed.
'features' **deprecated** -- a dictionary mapping option names to
'setuptools.Feature'
objects. Features are a portion of the distribution that can be
included or excluded based on user options, inter-feature dependencies,
and availability on the current system. Excluded features are omitted
from all setup commands, including source and binary distributions, so
you can create multiple distributions from the same source tree.
Feature names should be valid Python identifiers, except that they may
contain the '-' (minus) sign. Features can be included or excluded
via the command line options '--with-X' and '--without-X', where 'X' is
the name of the feature. Whether a feature is included by default, and
whether you are allowed to control this from the command line, is
determined by the Feature object. See the 'Feature' class for more
information.
'test_suite' -- the name of a test suite to run for the 'test' command.
If the user runs 'python setup.py test', the package will be installed,
and the named test suite will be run. The format is the same as
would be used on a 'unittest.py' command line. That is, it is the
dotted name of an object to import and call to generate a test suite.
'package_data' -- a dictionary mapping package names to lists of filenames
or globs to use to find data files contained in the named packages.
If the dictionary has filenames or globs listed under '""' (the empty
string), those names will be searched for in every package, in addition
to any names for the specific package. Data files found using these
names/globs will be installed along with the package, in the same
location as the package. Note that globs are allowed to reference
the contents of non-package subdirectories, as long as you use '/' as
a path separator. (Globs are automatically converted to
platform-specific paths at runtime.)
In addition to these new keywords, this class also has several new methods
for manipulating the distribution's contents. For example, the 'include()'
and 'exclude()' methods can be thought of as in-place add and subtract
commands that add or remove packages, modules, extensions, and so on from
the distribution. They are used by the feature subsystem to configure the
distribution for the included and excluded features.
"""
_patched_dist = None
def patch_missing_pkg_info(self, attrs):
# Fake up a replacement for the data that would normally come from
# PKG-INFO, but which might not yet be built if this is a fresh
# checkout.
#
if not attrs or 'name' not in attrs or 'version' not in attrs:
return
key = pkg_resources.safe_name(str(attrs['name'])).lower()
dist = pkg_resources.working_set.by_key.get(key)
if dist is not None and not dist.has_metadata('PKG-INFO'):
dist._version = pkg_resources.safe_version(str(attrs['version']))
self._patched_dist = dist
def __init__(self, attrs=None):
have_package_data = hasattr(self, "package_data")
if not have_package_data:
self.package_data = {}
_attrs_dict = attrs or {}
if 'features' in _attrs_dict or 'require_features' in _attrs_dict:
Feature.warn_deprecated()
self.require_features = []
self.features = {}
self.dist_files = []
self.src_root = attrs and attrs.pop("src_root", None)
self.patch_missing_pkg_info(attrs)
# Make sure we have any eggs needed to interpret 'attrs'
if attrs is not None:
self.dependency_links = attrs.pop('dependency_links', [])
assert_string_list(self, 'dependency_links', self.dependency_links)
if attrs and 'setup_requires' in attrs:
self.fetch_build_eggs(attrs['setup_requires'])
for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
vars(self).setdefault(ep.name, None)
_Distribution.__init__(self, attrs)
if isinstance(self.metadata.version, numbers.Number):
# Some people apparently take "version number" too literally :)
self.metadata.version = str(self.metadata.version)
if self.metadata.version is not None:
try:
ver = packaging.version.Version(self.metadata.version)
normalized_version = str(ver)
if self.metadata.version != normalized_version:
warnings.warn(
"Normalizing '%s' to '%s'" % (
self.metadata.version,
normalized_version,
)
)
self.metadata.version = normalized_version
except (packaging.version.InvalidVersion, TypeError):
warnings.warn(
"The version specified (%r) is an invalid version, this "
"may not work as expected with newer versions of "
"setuptools, pip, and PyPI. Please see PEP 440 for more "
"details." % self.metadata.version
)
if getattr(self, 'python_requires', None):
self.metadata.python_requires = self.python_requires
self._finalize_requires()
def _finalize_requires(self):
"""
Fix environment markers in `install_requires` and `extras_require`.
- move requirements in `install_requires` that are using environment
markers or extras to `extras_require`.
- convert requirements in `extras_require` of the form
`"extra": ["barbazquux; {marker}"]` to
`"extra:{marker}": ["barbazquux"]`.
"""
extras_require = defaultdict(list)
for k, v in (
getattr(self, 'extras_require', None) or {}
).items():
for r in pkg_resources.parse_requirements(v):
marker = r.marker
if marker:
r.marker = None
extras_require[k + ':' + str(marker)].append(r)
else:
extras_require[k].append(r)
install_requires = []
for r in pkg_resources.parse_requirements(
getattr(self, 'install_requires', None) or ()
):
marker = r.marker
extras = r.extras
if not marker and not extras:
install_requires.append(r)
continue
r.extras = ()
r.marker = None
for e in extras or ('',):
section = e
if marker:
section += ':' + str(marker)
extras_require[section].append(r)
self.extras_require = dict(
(k, [str(r) for r in v])
for k, v in extras_require.items()
)
self.install_requires = [str(r) for r in install_requires]
def parse_config_files(self, filenames=None):
"""Parses configuration files from various levels
and loads configuration.
"""
_Distribution.parse_config_files(self, filenames=filenames)
parse_configuration(self, self.command_options)
if getattr(self, 'python_requires', None):
self.metadata.python_requires = self.python_requires
def parse_command_line(self):
"""Process features after parsing command line options"""
result = _Distribution.parse_command_line(self)
if self.features:
self._finalize_features()
return result
def _feature_attrname(self, name):
"""Convert feature name to corresponding option attribute name"""
return 'with_' + name.replace('-', '_')
def fetch_build_eggs(self, requires):
"""Resolve pre-setup requirements"""
resolved_dists = pkg_resources.working_set.resolve(
pkg_resources.parse_requirements(requires),
installer=self.fetch_build_egg,
replace_conflicting=True,
)
for dist in resolved_dists:
pkg_resources.working_set.add(dist, replace=True)
return resolved_dists
def finalize_options(self):
_Distribution.finalize_options(self)
if self.features:
self._set_global_opts_from_features()
for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
value = getattr(self, ep.name, None)
if value is not None:
ep.require(installer=self.fetch_build_egg)
ep.load()(self, ep.name, value)
if getattr(self, 'convert_2to3_doctests', None):
# XXX may convert to set here when we can rely on set being builtin
self.convert_2to3_doctests = [
os.path.abspath(p)
for p in self.convert_2to3_doctests
]
else:
self.convert_2to3_doctests = []
def get_egg_cache_dir(self):
egg_cache_dir = os.path.join(os.curdir, '.eggs')
if not os.path.exists(egg_cache_dir):
os.mkdir(egg_cache_dir)
windows_support.hide_file(egg_cache_dir)
readme_txt_filename = os.path.join(egg_cache_dir, 'README.txt')
with open(readme_txt_filename, 'w') as f:
f.write('This directory contains eggs that were downloaded '
'by setuptools to build, test, and run plug-ins.\n\n')
f.write('This directory caches those eggs to prevent '
'repeated downloads.\n\n')
f.write('However, it is safe to delete this directory.\n\n')
return egg_cache_dir
def fetch_build_egg(self, req):
"""Fetch an egg needed for building"""
try:
cmd = self._egg_fetcher
cmd.package_index.to_scan = []
except AttributeError:
from setuptools.command.easy_install import easy_install
dist = self.__class__({'script_args': ['easy_install']})
dist.parse_config_files()
opts = dist.get_option_dict('easy_install')
keep = (
'find_links', 'site_dirs', 'index_url', 'optimize',
'site_dirs', 'allow_hosts'
)
for key in list(opts):
if key not in keep:
del opts[key] # don't use any other settings
if self.dependency_links:
links = self.dependency_links[:]
if 'find_links' in opts:
links = opts['find_links'][1].split() + links
opts['find_links'] = ('setup', links)
install_dir = self.get_egg_cache_dir()
cmd = easy_install(
dist, args=["x"], install_dir=install_dir,
exclude_scripts=True,
always_copy=False, build_directory=None, editable=False,
upgrade=False, multi_version=True, no_report=True, user=False
)
cmd.ensure_finalized()
self._egg_fetcher = cmd
return cmd.easy_install(req)
def _set_global_opts_from_features(self):
"""Add --with-X/--without-X options based on optional features"""
go = []
no = self.negative_opt.copy()
for name, feature in self.features.items():
self._set_feature(name, None)
feature.validate(self)
if feature.optional:
descr = feature.description
incdef = ' (default)'
excdef = ''
if not feature.include_by_default():
excdef, incdef = incdef, excdef
new = (
('with-' + name, None, 'include ' + descr + incdef),
('without-' + name, None, 'exclude ' + descr + excdef),
)
go.extend(new)
no['without-' + name] = 'with-' + name
self.global_options = self.feature_options = go + self.global_options
self.negative_opt = self.feature_negopt = no
def _finalize_features(self):
"""Add/remove features and resolve dependencies between them"""
# First, flag all the enabled items (and thus their dependencies)
for name, feature in self.features.items():
enabled = self.feature_is_included(name)
if enabled or (enabled is None and feature.include_by_default()):
feature.include_in(self)
self._set_feature(name, 1)
# Then disable the rest, so that off-by-default features don't
# get flagged as errors when they're required by an enabled feature
for name, feature in self.features.items():
if not self.feature_is_included(name):
feature.exclude_from(self)
self._set_feature(name, 0)
def get_command_class(self, command):
"""Pluggable version of get_command_class()"""
if command in self.cmdclass:
return self.cmdclass[command]
eps = pkg_resources.iter_entry_points('distutils.commands', command)
for ep in eps:
ep.require(installer=self.fetch_build_egg)
self.cmdclass[command] = cmdclass = ep.load()
return cmdclass
else:
return _Distribution.get_command_class(self, command)
def print_commands(self):
for ep in pkg_resources.iter_entry_points('distutils.commands'):
if ep.name not in self.cmdclass:
# don't require extras as the commands won't be invoked
cmdclass = ep.resolve()
self.cmdclass[ep.name] = cmdclass
return _Distribution.print_commands(self)
def get_command_list(self):
for ep in pkg_resources.iter_entry_points('distutils.commands'):
if ep.name not in self.cmdclass:
# don't require extras as the commands won't be invoked
cmdclass = ep.resolve()
self.cmdclass[ep.name] = cmdclass
return _Distribution.get_command_list(self)
def _set_feature(self, name, status):
"""Set feature's inclusion status"""
setattr(self, self._feature_attrname(name), status)
def feature_is_included(self, name):
"""Return 1 if feature is included, 0 if excluded, 'None' if unknown"""
return getattr(self, self._feature_attrname(name))
def include_feature(self, name):
"""Request inclusion of feature named 'name'"""
if self.feature_is_included(name) == 0:
descr = self.features[name].description
raise DistutilsOptionError(
descr + " is required, but was excluded or is not available"
)
self.features[name].include_in(self)
self._set_feature(name, 1)
def include(self, **attrs):
"""Add items to distribution that are named in keyword arguments
For example, 'dist.exclude(py_modules=["x"])' would add 'x' to
the distribution's 'py_modules' attribute, if it was not already
there.
Currently, this method only supports inclusion for attributes that are
lists or tuples. If you need to add support for adding to other
attributes in this or a subclass, you can add an '_include_X' method,
where 'X' is the name of the attribute. The method will be called with
the value passed to 'include()'. So, 'dist.include(foo={"bar":"baz"})'
will try to call 'dist._include_foo({"bar":"baz"})', which can then
handle whatever special inclusion logic is needed.
"""
for k, v in attrs.items():
include = getattr(self, '_include_' + k, None)
if include:
include(v)
else:
self._include_misc(k, v)
def exclude_package(self, package):
"""Remove packages, modules, and extensions in named package"""
pfx = package + '.'
if self.packages:
self.packages = [
p for p in self.packages
if p != package and not p.startswith(pfx)
]
if self.py_modules:
self.py_modules = [
p for p in self.py_modules
if p != package and not p.startswith(pfx)
]
if self.ext_modules:
self.ext_modules = [
p for p in self.ext_modules
if p.name != package and not p.name.startswith(pfx)
]
def has_contents_for(self, package):
"""Return true if 'exclude_package(package)' would do something"""
pfx = package + '.'
for p in self.iter_distribution_names():
if p == package or p.startswith(pfx):
return True
def _exclude_misc(self, name, value):
"""Handle 'exclude()' for list/tuple attrs without a special handler"""
if not isinstance(value, sequence):
raise DistutilsSetupError(
"%s: setting must be a list or tuple (%r)" % (name, value)
)
try:
old = getattr(self, name)
except AttributeError:
raise DistutilsSetupError(
"%s: No such distribution setting" % name
)
if old is not None and not isinstance(old, sequence):
raise DistutilsSetupError(
name + ": this setting cannot be changed via include/exclude"
)
elif old:
setattr(self, name, [item for item in old if item not in value])
def _include_misc(self, name, value):
"""Handle 'include()' for list/tuple attrs without a special handler"""
if not isinstance(value, sequence):
raise DistutilsSetupError(
"%s: setting must be a list (%r)" % (name, value)
)
try:
old = getattr(self, name)
except AttributeError:
raise DistutilsSetupError(
"%s: No such distribution setting" % name
)
if old is None:
setattr(self, name, value)
elif not isinstance(old, sequence):
raise DistutilsSetupError(
name + ": this setting cannot be changed via include/exclude"
)
else:
new = [item for item in value if item not in old]
setattr(self, name, old + new)
def exclude(self, **attrs):
"""Remove items from distribution that are named in keyword arguments
For example, 'dist.exclude(py_modules=["x"])' would remove 'x' from
the distribution's 'py_modules' attribute. Excluding packages uses
the 'exclude_package()' method, so all of the package's contained
packages, modules, and extensions are also excluded.
Currently, this method only supports exclusion from attributes that are
lists or tuples. If you need to add support for excluding from other
attributes in this or a subclass, you can add an '_exclude_X' method,
where 'X' is the name of the attribute. The method will be called with
the value passed to 'exclude()'. So, 'dist.exclude(foo={"bar":"baz"})'
will try to call 'dist._exclude_foo({"bar":"baz"})', which can then
handle whatever special exclusion logic is needed.
"""
for k, v in attrs.items():
exclude = getattr(self, '_exclude_' + k, None)
if exclude:
exclude(v)
else:
self._exclude_misc(k, v)
def _exclude_packages(self, packages):
if not isinstance(packages, sequence):
raise DistutilsSetupError(
"packages: setting must be a list or tuple (%r)" % (packages,)
)
list(map(self.exclude_package, packages))
def _parse_command_opts(self, parser, args):
# Remove --with-X/--without-X options when processing command args
self.global_options = self.__class__.global_options
self.negative_opt = self.__class__.negative_opt
# First, expand any aliases
command = args[0]
aliases = self.get_option_dict('aliases')
while command in aliases:
src, alias = aliases[command]
del aliases[command] # ensure each alias can expand only once!
import shlex
args[:1] = shlex.split(alias, True)
command = args[0]
nargs = _Distribution._parse_command_opts(self, parser, args)
# Handle commands that want to consume all remaining arguments
cmd_class = self.get_command_class(command)
if getattr(cmd_class, 'command_consumes_arguments', None):
self.get_option_dict(command)['args'] = ("command line", nargs)
if nargs is not None:
return []
return nargs
def get_cmdline_options(self):
"""Return a '{cmd: {opt:val}}' map of all command-line options
Option names are all long, but do not include the leading '--', and
contain dashes rather than underscores. If the option doesn't take
an argument (e.g. '--quiet'), the 'val' is 'None'.
Note that options provided by config files are intentionally excluded.
"""
d = {}
for cmd, opts in self.command_options.items():
for opt, (src, val) in opts.items():
if src != "command line":
continue
opt = opt.replace('_', '-')
if val == 0:
cmdobj = self.get_command_obj(cmd)
neg_opt = self.negative_opt.copy()
neg_opt.update(getattr(cmdobj, 'negative_opt', {}))
for neg, pos in neg_opt.items():
if pos == opt:
opt = neg
val = None
break
else:
raise AssertionError("Shouldn't be able to get here")
elif val == 1:
val = None
d.setdefault(cmd, {})[opt] = val
return d
def iter_distribution_names(self):
"""Yield all packages, modules, and extension names in distribution"""
for pkg in self.packages or ():
yield pkg
for module in self.py_modules or ():
yield module
for ext in self.ext_modules or ():
if isinstance(ext, tuple):
name, buildinfo = ext
else:
name = ext.name
if name.endswith('module'):
name = name[:-6]
yield name
def handle_display_options(self, option_order):
"""If there were any non-global "display-only" options
(--help-commands or the metadata display options) on the command
line, display the requested info and return true; else return
false.
"""
import sys
if six.PY2 or self.help_commands:
return _Distribution.handle_display_options(self, option_order)
# Stdout may be StringIO (e.g. in tests)
import io
if not isinstance(sys.stdout, io.TextIOWrapper):
return _Distribution.handle_display_options(self, option_order)
# Don't wrap stdout if utf-8 is already the encoding. Provides
# workaround for #334.
if sys.stdout.encoding.lower() in ('utf-8', 'utf8'):
return _Distribution.handle_display_options(self, option_order)
# Print metadata in UTF-8 no matter the platform
encoding = sys.stdout.encoding
errors = sys.stdout.errors
newline = sys.platform != 'win32' and '\n' or None
line_buffering = sys.stdout.line_buffering
sys.stdout = io.TextIOWrapper(
sys.stdout.detach(), 'utf-8', errors, newline, line_buffering)
try:
return _Distribution.handle_display_options(self, option_order)
finally:
sys.stdout = io.TextIOWrapper(
sys.stdout.detach(), encoding, errors, newline, line_buffering)
class Feature:
"""
**deprecated** -- The `Feature` facility was never completely implemented
or supported, `has reported issues
<https://github.com/pypa/setuptools/issues/58>`_ and will be removed in
a future version.
A subset of the distribution that can be excluded if unneeded/wanted
Features are created using these keyword arguments:
'description' -- a short, human readable description of the feature, to
be used in error messages, and option help messages.
'standard' -- if true, the feature is included by default if it is
available on the current system. Otherwise, the feature is only
included if requested via a command line '--with-X' option, or if
another included feature requires it. The default setting is 'False'.
'available' -- if true, the feature is available for installation on the
current system. The default setting is 'True'.
'optional' -- if true, the feature's inclusion can be controlled from the
command line, using the '--with-X' or '--without-X' options. If
false, the feature's inclusion status is determined automatically,
based on 'availabile', 'standard', and whether any other feature
requires it. The default setting is 'True'.
'require_features' -- a string or sequence of strings naming features
that should also be included if this feature is included. Defaults to
empty list. May also contain 'Require' objects that should be
added/removed from the distribution.
'remove' -- a string or list of strings naming packages to be removed
from the distribution if this feature is *not* included. If the
feature *is* included, this argument is ignored. This argument exists
to support removing features that "crosscut" a distribution, such as
defining a 'tests' feature that removes all the 'tests' subpackages
provided by other features. The default for this argument is an empty
list. (Note: the named package(s) or modules must exist in the base
distribution when the 'setup()' function is initially called.)
other keywords -- any other keyword arguments are saved, and passed to
the distribution's 'include()' and 'exclude()' methods when the
feature is included or excluded, respectively. So, for example, you
could pass 'packages=["a","b"]' to cause packages 'a' and 'b' to be
added or removed from the distribution as appropriate.
A feature must include at least one 'requires', 'remove', or other
keyword argument. Otherwise, it can't affect the distribution in any way.
Note also that you can subclass 'Feature' to create your own specialized
feature types that modify the distribution in other ways when included or
excluded. See the docstrings for the various methods here for more detail.
Aside from the methods, the only feature attributes that distributions look
at are 'description' and 'optional'.
"""
@staticmethod
def warn_deprecated():
msg = (
"Features are deprecated and will be removed in a future "
"version. See https://github.com/pypa/setuptools/issues/65."
)
warnings.warn(msg, DeprecationWarning, stacklevel=3)
def __init__(
self, description, standard=False, available=True,
optional=True, require_features=(), remove=(), **extras):
self.warn_deprecated()
self.description = description
self.standard = standard
self.available = available
self.optional = optional
if isinstance(require_features, (str, Require)):
require_features = require_features,
self.require_features = [
r for r in require_features if isinstance(r, str)
]
er = [r for r in require_features if not isinstance(r, str)]
if er:
extras['require_features'] = er
if isinstance(remove, str):
remove = remove,
self.remove = remove
self.extras = extras
if not remove and not require_features and not extras:
raise DistutilsSetupError(
"Feature %s: must define 'require_features', 'remove', or "
"at least one of 'packages', 'py_modules', etc."
)
def include_by_default(self):
"""Should this feature be included by default?"""
return self.available and self.standard
def include_in(self, dist):
"""Ensure feature and its requirements are included in distribution
You may override this in a subclass to perform additional operations on
the distribution. Note that this method may be called more than once
per feature, and so should be idempotent.
"""
if not self.available:
raise DistutilsPlatformError(
self.description + " is required, "
"but is not available on this platform"
)
dist.include(**self.extras)
for f in self.require_features:
dist.include_feature(f)
def exclude_from(self, dist):
"""Ensure feature is excluded from distribution
You may override this in a subclass to perform additional operations on
the distribution. This method will be called at most once per
feature, and only after all included features have been asked to
include themselves.
"""
dist.exclude(**self.extras)
if self.remove:
for item in self.remove:
dist.exclude_package(item)
def validate(self, dist):
"""Verify that feature makes sense in context of distribution
This method is called by the distribution just before it parses its
command line. It checks to ensure that the 'remove' attribute, if any,
contains only valid package/module names that are present in the base
distribution when 'setup()' is called. You may override it in a
subclass to perform any other required validation of the feature
against a target distribution.
"""
for item in self.remove:
if not dist.has_contents_for(item):
raise DistutilsSetupError(
"%s wants to be able to remove %s, but the distribution"
" doesn't contain any packages or modules under %s"
% (self.description, item, item)
)
|
apache-2.0
|
40223144/w16b_test
|
static/Brython3.1.3-20150514-095342/Lib/_markupbase.py
|
891
|
14598
|
"""Shared support for scanning document type declarations in HTML and XHTML.
This module is used as a foundation for the html.parser module. It has no
documented public API and should not be used directly.
"""
import re
_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9]*\s*').match
_declstringlit_match = re.compile(r'(\'[^\']*\'|"[^"]*")\s*').match
_commentclose = re.compile(r'--\s*>')
_markedsectionclose = re.compile(r']\s*]\s*>')
# An analysis of the MS-Word extensions is available at
# http://www.planetpublish.com/xmlarena/xap/Thursday/WordtoXML.pdf
_msmarkedsectionclose = re.compile(r']\s*>')
del re
class ParserBase:
"""Parser base class which provides some common support methods used
by the SGML/HTML and XHTML parsers."""
def __init__(self):
if self.__class__ is ParserBase:
raise RuntimeError(
"_markupbase.ParserBase must be subclassed")
def error(self, message):
raise NotImplementedError(
"subclasses of ParserBase must override error()")
def reset(self):
self.lineno = 1
self.offset = 0
def getpos(self):
"""Return current line number and offset."""
return self.lineno, self.offset
# Internal -- update line number and offset. This should be
# called for each piece of data exactly once, in order -- in other
# words the concatenation of all the input strings to this
# function should be exactly the entire input.
def updatepos(self, i, j):
if i >= j:
return j
rawdata = self.rawdata
nlines = rawdata.count("\n", i, j)
if nlines:
self.lineno = self.lineno + nlines
pos = rawdata.rindex("\n", i, j) # Should not fail
self.offset = j-(pos+1)
else:
self.offset = self.offset + j-i
return j
_decl_otherchars = ''
# Internal -- parse declaration (for use by subclasses).
def parse_declaration(self, i):
# This is some sort of declaration; in "HTML as
# deployed," this should only be the document type
# declaration ("<!DOCTYPE html...>").
# ISO 8879:1986, however, has more complex
# declaration syntax for elements in <!...>, including:
# --comment--
# [marked section]
# name in the following list: ENTITY, DOCTYPE, ELEMENT,
# ATTLIST, NOTATION, SHORTREF, USEMAP,
# LINKTYPE, LINK, IDLINK, USELINK, SYSTEM
rawdata = self.rawdata
j = i + 2
assert rawdata[i:j] == "<!", "unexpected call to parse_declaration"
if rawdata[j:j+1] == ">":
# the empty comment <!>
return j + 1
if rawdata[j:j+1] in ("-", ""):
# Start of comment followed by buffer boundary,
# or just a buffer boundary.
return -1
# A simple, practical version could look like: ((name|stringlit) S*) + '>'
n = len(rawdata)
if rawdata[j:j+2] == '--': #comment
# Locate --.*-- as the body of the comment
return self.parse_comment(i)
elif rawdata[j] == '[': #marked section
# Locate [statusWord [...arbitrary SGML...]] as the body of the marked section
# Where statusWord is one of TEMP, CDATA, IGNORE, INCLUDE, RCDATA
# Note that this is extended by Microsoft Office "Save as Web" function
# to include [if...] and [endif].
return self.parse_marked_section(i)
else: #all other declaration elements
decltype, j = self._scan_name(j, i)
if j < 0:
return j
if decltype == "doctype":
self._decl_otherchars = ''
while j < n:
c = rawdata[j]
if c == ">":
# end of declaration syntax
data = rawdata[i+2:j]
if decltype == "doctype":
self.handle_decl(data)
else:
# According to the HTML5 specs sections "8.2.4.44 Bogus
# comment state" and "8.2.4.45 Markup declaration open
# state", a comment token should be emitted.
# Calling unknown_decl provides more flexibility though.
self.unknown_decl(data)
return j + 1
if c in "\"'":
m = _declstringlit_match(rawdata, j)
if not m:
return -1 # incomplete
j = m.end()
elif c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ":
name, j = self._scan_name(j, i)
elif c in self._decl_otherchars:
j = j + 1
elif c == "[":
# this could be handled in a separate doctype parser
if decltype == "doctype":
j = self._parse_doctype_subset(j + 1, i)
elif decltype in {"attlist", "linktype", "link", "element"}:
# must tolerate []'d groups in a content model in an element declaration
# also in data attribute specifications of attlist declaration
# also link type declaration subsets in linktype declarations
# also link attribute specification lists in link declarations
self.error("unsupported '[' char in %s declaration" % decltype)
else:
self.error("unexpected '[' char in declaration")
else:
self.error(
"unexpected %r char in declaration" % rawdata[j])
if j < 0:
return j
return -1 # incomplete
# Internal -- parse a marked section
# Override this to handle MS-word extension syntax <![if word]>content<![endif]>
def parse_marked_section(self, i, report=1):
rawdata= self.rawdata
assert rawdata[i:i+3] == '<![', "unexpected call to parse_marked_section()"
sectName, j = self._scan_name( i+3, i )
if j < 0:
return j
if sectName in {"temp", "cdata", "ignore", "include", "rcdata"}:
# look for standard ]]> ending
match= _markedsectionclose.search(rawdata, i+3)
elif sectName in {"if", "else", "endif"}:
# look for MS Office ]> ending
match= _msmarkedsectionclose.search(rawdata, i+3)
else:
self.error('unknown status keyword %r in marked section' % rawdata[i+3:j])
if not match:
return -1
if report:
j = match.start(0)
self.unknown_decl(rawdata[i+3: j])
return match.end(0)
# Internal -- parse comment, return length or -1 if not terminated
def parse_comment(self, i, report=1):
rawdata = self.rawdata
if rawdata[i:i+4] != '<!--':
self.error('unexpected call to parse_comment()')
match = _commentclose.search(rawdata, i+4)
if not match:
return -1
if report:
j = match.start(0)
self.handle_comment(rawdata[i+4: j])
return match.end(0)
# Internal -- scan past the internal subset in a <!DOCTYPE declaration,
# returning the index just past any whitespace following the trailing ']'.
def _parse_doctype_subset(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
j = i
while j < n:
c = rawdata[j]
if c == "<":
s = rawdata[j:j+2]
if s == "<":
# end of buffer; incomplete
return -1
if s != "<!":
self.updatepos(declstartpos, j + 1)
self.error("unexpected char in internal subset (in %r)" % s)
if (j + 2) == n:
# end of buffer; incomplete
return -1
if (j + 4) > n:
# end of buffer; incomplete
return -1
if rawdata[j:j+4] == "<!--":
j = self.parse_comment(j, report=0)
if j < 0:
return j
continue
name, j = self._scan_name(j + 2, declstartpos)
if j == -1:
return -1
if name not in {"attlist", "element", "entity", "notation"}:
self.updatepos(declstartpos, j + 2)
self.error(
"unknown declaration %r in internal subset" % name)
# handle the individual names
meth = getattr(self, "_parse_doctype_" + name)
j = meth(j, declstartpos)
if j < 0:
return j
elif c == "%":
# parameter entity reference
if (j + 1) == n:
# end of buffer; incomplete
return -1
s, j = self._scan_name(j + 1, declstartpos)
if j < 0:
return j
if rawdata[j] == ";":
j = j + 1
elif c == "]":
j = j + 1
while j < n and rawdata[j].isspace():
j = j + 1
if j < n:
if rawdata[j] == ">":
return j
self.updatepos(declstartpos, j)
self.error("unexpected char after internal subset")
else:
return -1
elif c.isspace():
j = j + 1
else:
self.updatepos(declstartpos, j)
self.error("unexpected char %r in internal subset" % c)
# end of buffer reached
return -1
# Internal -- scan past <!ELEMENT declarations
def _parse_doctype_element(self, i, declstartpos):
name, j = self._scan_name(i, declstartpos)
if j == -1:
return -1
# style content model; just skip until '>'
rawdata = self.rawdata
if '>' in rawdata[j:]:
return rawdata.find(">", j) + 1
return -1
# Internal -- scan past <!ATTLIST declarations
def _parse_doctype_attlist(self, i, declstartpos):
rawdata = self.rawdata
name, j = self._scan_name(i, declstartpos)
c = rawdata[j:j+1]
if c == "":
return -1
if c == ">":
return j + 1
while 1:
# scan a series of attribute descriptions; simplified:
# name type [value] [#constraint]
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
c = rawdata[j:j+1]
if c == "":
return -1
if c == "(":
# an enumerated type; look for ')'
if ")" in rawdata[j:]:
j = rawdata.find(")", j) + 1
else:
return -1
while rawdata[j:j+1].isspace():
j = j + 1
if not rawdata[j:]:
# end of buffer, incomplete
return -1
else:
name, j = self._scan_name(j, declstartpos)
c = rawdata[j:j+1]
if not c:
return -1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if m:
j = m.end()
else:
return -1
c = rawdata[j:j+1]
if not c:
return -1
if c == "#":
if rawdata[j:] == "#":
# end of buffer
return -1
name, j = self._scan_name(j + 1, declstartpos)
if j < 0:
return j
c = rawdata[j:j+1]
if not c:
return -1
if c == '>':
# all done
return j + 1
# Internal -- scan past <!NOTATION declarations
def _parse_doctype_notation(self, i, declstartpos):
name, j = self._scan_name(i, declstartpos)
if j < 0:
return j
rawdata = self.rawdata
while 1:
c = rawdata[j:j+1]
if not c:
# end of buffer; incomplete
return -1
if c == '>':
return j + 1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if not m:
return -1
j = m.end()
else:
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
# Internal -- scan past <!ENTITY declarations
def _parse_doctype_entity(self, i, declstartpos):
rawdata = self.rawdata
if rawdata[i:i+1] == "%":
j = i + 1
while 1:
c = rawdata[j:j+1]
if not c:
return -1
if c.isspace():
j = j + 1
else:
break
else:
j = i
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
while 1:
c = self.rawdata[j:j+1]
if not c:
return -1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if m:
j = m.end()
else:
return -1 # incomplete
elif c == ">":
return j + 1
else:
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
# Internal -- scan a name token and the new position and the token, or
# return -1 if we've reached the end of the buffer.
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = _declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.updatepos(declstartpos, i)
self.error("expected name token at %r"
% rawdata[declstartpos:declstartpos+20])
# To be overridden -- handlers for unknown objects
def unknown_decl(self, data):
pass
|
agpl-3.0
|
Stadslab/SpiderBotMarlin
|
Marlin/create_speed_lookuptable.py
|
333
|
1382
|
#!/usr/bin/env python
""" Generate the stepper delay lookup table for Marlin firmware. """
import argparse
__author__ = "Ben Gamari <[email protected]>"
__copyright__ = "Copyright 2012, Ben Gamari"
__license__ = "GPL"
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-f', '--cpu-freq', type=int, default=16, help='CPU clockrate in MHz (default=16)')
parser.add_argument('-d', '--divider', type=int, default=8, help='Timer/counter pre-scale divider (default=8)')
args = parser.parse_args()
cpu_freq = args.cpu_freq * 1000000
timer_freq = cpu_freq / args.divider
print "#ifndef SPEED_LOOKUPTABLE_H"
print "#define SPEED_LOOKUPTABLE_H"
print
print '#include "Marlin.h"'
print
print "const uint16_t speed_lookuptable_fast[256][2] PROGMEM = {"
a = [ timer_freq / ((i*256)+(args.cpu_freq*2)) for i in range(256) ]
b = [ a[i] - a[i+1] for i in range(255) ]
b.append(b[-1])
for i in range(32):
print " ",
for j in range(8):
print "{%d, %d}," % (a[8*i+j], b[8*i+j]),
print
print "};"
print
print "const uint16_t speed_lookuptable_slow[256][2] PROGMEM = {"
a = [ timer_freq / ((i*8)+(args.cpu_freq*2)) for i in range(256) ]
b = [ a[i] - a[i+1] for i in range(255) ]
b.append(b[-1])
for i in range(32):
print " ",
for j in range(8):
print "{%d, %d}," % (a[8*i+j], b[8*i+j]),
print
print "};"
print
print "#endif"
|
gpl-2.0
|
weaselkeeper/ansible
|
lib/ansible/inventory/script.py
|
19
|
6511
|
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import subprocess
import sys
from collections import Mapping
from ansible.compat.six import iteritems
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.inventory.host import Host
from ansible.inventory.group import Group
from ansible.module_utils.basic import json_dict_bytes_to_unicode
class InventoryScript:
''' Host inventory parser for ansible using external inventory scripts. '''
def __init__(self, loader, groups=None, filename=C.DEFAULT_HOST_LIST):
if groups is None:
groups = dict()
self._loader = loader
self.groups = groups
# Support inventory scripts that are not prefixed with some
# path information but happen to be in the current working
# directory when '.' is not in PATH.
self.filename = os.path.abspath(filename)
cmd = [ self.filename, "--list" ]
try:
sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError as e:
raise AnsibleError("problem running %s (%s)" % (' '.join(cmd), e))
(stdout, stderr) = sp.communicate()
if sp.returncode != 0:
raise AnsibleError("Inventory script (%s) had an execution error: %s " % (filename,stderr))
self.data = stdout
# see comment about _meta below
self.host_vars_from_top = None
self._parse(stderr)
def _parse(self, err):
all_hosts = {}
# not passing from_remote because data from CMDB is trusted
try:
self.raw = self._loader.load(self.data)
except Exception as e:
sys.stderr.write(err + "\n")
raise AnsibleError("failed to parse executable inventory script results from {0}: {1}".format(self.filename, str(e)))
if not isinstance(self.raw, Mapping):
sys.stderr.write(err + "\n")
raise AnsibleError("failed to parse executable inventory script results from {0}: data needs to be formatted as a json dict".format(self.filename))
self.raw = json_dict_bytes_to_unicode(self.raw)
group = None
for (group_name, data) in self.raw.items():
# in Ansible 1.3 and later, a "_meta" subelement may contain
# a variable "hostvars" which contains a hash for each host
# if this "hostvars" exists at all then do not call --host for each
# host. This is for efficiency and scripts should still return data
# if called with --host for backwards compat with 1.2 and earlier.
if group_name == '_meta':
if 'hostvars' in data:
self.host_vars_from_top = data['hostvars']
continue
if group_name not in self.groups:
group = self.groups[group_name] = Group(group_name)
group = self.groups[group_name]
host = None
if not isinstance(data, dict):
data = {'hosts': data}
# is not those subkeys, then simplified syntax, host with vars
elif not any(k in data for k in ('hosts','vars')):
data = {'hosts': [group_name], 'vars': data}
if 'hosts' in data:
if not isinstance(data['hosts'], list):
raise AnsibleError("You defined a group \"%s\" with bad "
"data for the host list:\n %s" % (group_name, data))
for hostname in data['hosts']:
if not hostname in all_hosts:
all_hosts[hostname] = Host(hostname)
host = all_hosts[hostname]
group.add_host(host)
if 'vars' in data:
if not isinstance(data['vars'], dict):
raise AnsibleError("You defined a group \"%s\" with bad "
"data for variables:\n %s" % (group_name, data))
for k, v in iteritems(data['vars']):
group.set_variable(k, v)
# Separate loop to ensure all groups are defined
for (group_name, data) in self.raw.items():
if group_name == '_meta':
continue
if isinstance(data, dict) and 'children' in data:
for child_name in data['children']:
if child_name in self.groups:
self.groups[group_name].add_child_group(self.groups[child_name])
# Finally, add all top-level groups as children of 'all'.
# We exclude ungrouped here because it was already added as a child of
# 'all' at the time it was created.
for group in self.groups.values():
if group.depth == 0 and group.name not in ('all', 'ungrouped'):
self.groups['all'].add_child_group(group)
def get_host_variables(self, host):
""" Runs <script> --host <hostname> to determine additional host variables """
if self.host_vars_from_top is not None:
got = self.host_vars_from_top.get(host.name, {})
return got
cmd = [self.filename, "--host", host.name]
try:
sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError as e:
raise AnsibleError("problem running %s (%s)" % (' '.join(cmd), e))
(out, err) = sp.communicate()
if out.strip() == '':
return dict()
try:
return json_dict_bytes_to_unicode(self._loader.load(out))
except ValueError:
raise AnsibleError("could not parse post variable response: %s, %s" % (cmd, out))
|
gpl-3.0
|
aprefontaine/TMScheduler
|
tests/regressiontests/utils/module_loading.py
|
10
|
4553
|
import os
import sys
from unittest import TestCase
from zipimport import zipimporter
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
class DefaultLoader(TestCase):
def test_loader(self):
"Normal module existence can be tested"
test_module = import_module('regressiontests.utils.test_module')
# An importable child
self.assertTrue(module_has_submodule(test_module, 'good_module'))
mod = import_module('regressiontests.utils.test_module.good_module')
self.assertEqual(mod.content, 'Good Module')
# A child that exists, but will generate an import error if loaded
self.assertTrue(module_has_submodule(test_module, 'bad_module'))
self.assertRaises(ImportError, import_module, 'regressiontests.utils.test_module.bad_module')
# A child that doesn't exist
self.assertFalse(module_has_submodule(test_module, 'no_such_module'))
self.assertRaises(ImportError, import_module, 'regressiontests.utils.test_module.no_such_module')
class EggLoader(TestCase):
def setUp(self):
self.old_path = sys.path
self.egg_dir = '%s/eggs' % os.path.dirname(__file__)
def tearDown(self):
sys.path = self.old_path
sys.path_importer_cache.clear()
sys.modules.pop('egg_module.sub1.sub2.bad_module', None)
sys.modules.pop('egg_module.sub1.sub2.good_module', None)
sys.modules.pop('egg_module.sub1.sub2', None)
sys.modules.pop('egg_module.sub1', None)
sys.modules.pop('egg_module.bad_module', None)
sys.modules.pop('egg_module.good_module', None)
sys.modules.pop('egg_module', None)
def test_shallow_loader(self):
"Module existence can be tested inside eggs"
egg_name = '%s/test_egg.egg' % self.egg_dir
sys.path.append(egg_name)
egg_module = import_module('egg_module')
# An importable child
self.assertTrue(module_has_submodule(egg_module, 'good_module'))
mod = import_module('egg_module.good_module')
self.assertEqual(mod.content, 'Good Module')
# A child that exists, but will generate an import error if loaded
self.assertTrue(module_has_submodule(egg_module, 'bad_module'))
self.assertRaises(ImportError, import_module, 'egg_module.bad_module')
# A child that doesn't exist
self.assertFalse(module_has_submodule(egg_module, 'no_such_module'))
self.assertRaises(ImportError, import_module, 'egg_module.no_such_module')
def test_deep_loader(self):
"Modules deep inside an egg can still be tested for existence"
egg_name = '%s/test_egg.egg' % self.egg_dir
sys.path.append(egg_name)
egg_module = import_module('egg_module.sub1.sub2')
# An importable child
self.assertTrue(module_has_submodule(egg_module, 'good_module'))
mod = import_module('egg_module.sub1.sub2.good_module')
self.assertEqual(mod.content, 'Deep Good Module')
# A child that exists, but will generate an import error if loaded
self.assertTrue(module_has_submodule(egg_module, 'bad_module'))
self.assertRaises(ImportError, import_module, 'egg_module.sub1.sub2.bad_module')
# A child that doesn't exist
self.assertFalse(module_has_submodule(egg_module, 'no_such_module'))
self.assertRaises(ImportError, import_module, 'egg_module.sub1.sub2.no_such_module')
class TestFinder(object):
def __init__(self, *args, **kwargs):
self.importer = zipimporter(*args, **kwargs)
def find_module(self, path):
importer = self.importer.find_module(path)
if importer is None:
return
return TestLoader(importer)
class TestLoader(object):
def __init__(self, importer):
self.importer = importer
def load_module(self, name):
mod = self.importer.load_module(name)
mod.__loader__ = self
return mod
class CustomLoader(EggLoader):
"""The Custom Loader test is exactly the same as the EggLoader, but
it uses a custom defined Loader and Finder that is intentionally
split into two classes. Although the EggLoader combines both functions
into one class, this isn't required.
"""
def setUp(self):
super(CustomLoader, self).setUp()
sys.path_hooks.insert(0, TestFinder)
sys.path_importer_cache.clear()
def tearDown(self):
super(CustomLoader, self).tearDown()
sys.path_hooks.pop(0)
|
bsd-3-clause
|
shadmanj/college-code
|
BME301-Bioelectricity/hodgkin-huxley-solver-PYTHON/3rd-order-DE-solver.py
|
1
|
4453
|
# Shadman Jubaer
#BME 301
#These functions will solve any nth order differential equation using
#the 4th Order Runge-Kutta Method of numerically solving differntial
#equations
import numpy as np
import matplotlib.pyplot as plt
#Generates interval over which to solve numerically
def irange(start, stop, step):
while start < stop:
yield round(start,4)
start += step
#Function to solve ODEs using 4th Order Runge-Kutta
#Inputs: list of u functions, x-interval, step sizes, initial values
#Outputs: Matrix with numerical solutions for each ode function
def rk(odes, interval, step, initial_values):
h = step
h2 = h/2
u = initial_values
M = len(odes) #length of ODE array. Determines order of ODE
N = M - 1 #converts M for indexing
soln = [] #Main output array
for x in interval: #increment by points over the interval
#Iniitalize k arrays for the loops
k1 = []
k2 = []
k3 = []
k4 = []
output = [] #stores solved u values
#Find k1 at x for all ODEs
#k1 = f(x,u)
i = 0
while i < M:
k1_new = odes[i](x, u)
k1.append(k1_new)
i += 1
hk1 = [z*h2 for z in k1]
u_temp = [z+i for z, i in zip(hk1, u)]
#Find k2 at x for all ODEs
#k2 = f(x+(h/2),u+(1/2)*k1)
i = 0
while i < M:
k2_new = odes[i](x+h2,u_temp)
k2.append(k2_new)
i += 1
#Multiply all k values by h
hk2 = [z*h2 for z in k2]
#Make new incrementl u value
u_temp = [z+i for z, i in zip(hk2, u)]
#Find k3 at x for all ODEs
#k3 = f(x+(h/2),u+(1/2)*k2)
i = 0
while i < M:
k3_new = odes[i](x+(1/2), u_temp)
k3.append(k3_new)
u_temp.append(u[i] + k3_new)
i += 1
hk3 = [z*h2 for z in k3]
u_temp = [z+i for z, i in zip(hk3, u)]
#Find k4 at x for all ODEs
#k4 = f(x+(h/2),u+k3)
i = 0
while i < M:
k4_new = odes[i](x+(h/2), u_temp)
k4.append(k4_new)
i += 1
k_bar = [(a+2*(b+c)+d)/6 for a,b,c,d in zip(k1,k2,k3,k4)]
#Find kbar and then solve u values for all ODEs
#kbar = (k1+k2*2+k3*2+k4)/6
#u = u + h*kbar
i = 0
while i < M:
u_i = u[i] +(h*k_bar[i])
u[i] = u_i
output.append(u_i)
i += 1
soln.append(output)
return soln
#-----------------------------------------------------------
'''
def np_eq_7(step,Vnmh):
V = Vnmh[0]
n = Vnmh[1]
alpha_n = .01*(V+10)/(np.exp((V+10)/(10))-1)
beta_n = .125*np.exp(V/80)
n_prime = alpha_n * (1 - n) - beta_n * n
return n_prime
def mp_eq_15(step, Vnmh):
V = step
m = Vnmh[2]
V = Vnmh[0]
alpha_m = 0.1*(V+25)/(np.exp((V+25)/10)-1)
beta_m = 4*np.exp(V/18)
m_prime = alpha_m * (1 - m) - beta_m * m
return m_prime
def hp_eq_16(step, Vnmh):
V = Vnmh[0]
h = Vnmh[3]
alpha_h = .07*np.exp(V/20)
beta_h = 1/(np.exp((V+30)/10)+1)
h_prime = alpha_h * (1 - h) - beta_h * h
return h_prime
def vmp_eq_26(step, Vnmh):
gna_bar = 120 #m.mho/cm^3
gk_bar = 36 #m.mho/cm^3
gl_bar = 0.3 #m.mho/cm^3
Vk = 12 #mV
Vna = -115 #mV
Vl = -10.613 #mV
Cm = 1 #uF/cm^3
V = Vnmh[0]
n = Vnmh[1]
m = Vnmh[2]
h = Vnmh[3]
gna = (np.power(m,3))*h*gna_bar
gk = (np.power(n,4))*gk_bar
Ina = gna*(V-Vna)
Ik = gk*(V-Vk)
Il = gl_bar*(V-Vl)
Im = Ina+Ik+Il
vm_prime = (Im - (gk_bar*(n**4)*(V-Vk) + gna_bar*(m**3)*h*(V-Vna) + gl_bar*(V-Vl))) / Cm
return vm_prime
def ode_u1(x,u):
u1_prime = u[1]
return u1_prime
def ode_u2(x,u):
u2_prime = -np.sin(x)
return u2_prime
#------------------------------------------------------------
start = 1
stop = np.pi*5
N = 100
h = (stop-start)/(N-1)
x_hw = list(irange(start,stop,h))
u1 = 0
u2 = 0
u3 = 56
u4 = 5
rk_ans = rk([ode_u1,ode_u2], x_hw, h, [u1,u2])
ans = [row[0] for row in rk_ans]
#y = [m.sin(step) for step in x_hw]
t = x_hw
plt.plot(x_hw, ans, 'red')
plt.show()
'''
|
mit
|
ricknoelle/todo-craft
|
palette.py
|
1
|
9392
|
#!/usr/bin/env python3
#
# Urwid Palette Test. Showing off highcolor support
# Copyright (C) 2004-2009 Ian Ward
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Urwid web site: http://excess.org/urwid/
"""
Palette test. Shows the available foreground and background settings
in monochrome, 16 color, 88 color and 256 color modes.
"""
import re
import sys
import urwid
import urwid.raw_display
CHART_256 = """
brown__ dark_red_ dark_magenta_ dark_blue_ dark_cyan_ dark_green_
yellow_ light_red light_magenta light_blue light_cyan light_green
#00f#06f#08f#0af#0df#0ff black_______ dark_gray___
#60f#00d#06d#08d#0ad#0dd#0fd light_gray__ white_______
#80f#60d#00a#06a#08a#0aa#0da#0fa
#a0f#80d#60a#008#068#088#0a8#0d8#0f8
#d0f#a0d#80d#608#006#066#086#0a6#0d6#0f6
#f0f#d0d#a0a#808#606#000#060#080#0a0#0d0#0f0#0f6#0f8#0fa#0fd#0ff
#f0d#d0a#a08#806#600#660#680#6a0#6d0#6f0#6f6#6f8#6fa#6fd#6ff#0df
#f0a#d08#a06#800#860#880#8a0#8d0#8f0#8f6#8f8#8fa#8fd#8ff#6df#0af
#f08#d06#a00#a60#a80#aa0#ad0#af0#af6#af8#afa#afd#aff#8df#6af#08f
#f06#d00#d60#d80#da0#dd0#df0#df6#df8#dfa#dfd#dff#adf#8af#68f#06f
#f00#f60#f80#fa0#fd0#ff0#ff6#ff8#ffa#ffd#fff#ddf#aaf#88f#66f#00f
#fd0#fd6#fd8#fda#fdd#fdf#daf#a8f#86f#60f
#66d#68d#6ad#6dd #fa0#fa6#fa8#faa#fad#faf#d8f#a6f#80f
#86d#66a#68a#6aa#6da #f80#f86#f88#f8a#f8d#f8f#d6f#a0f
#a6d#86a#668#688#6a8#6d8 #f60#f66#f68#f6a#f6d#f6f#d0f
#d6d#a6a#868#666#686#6a6#6d6#6d8#6da#6dd #f00#f06#f08#f0a#f0d#f0f
#d6a#a68#866#886#8a6#8d6#8d8#8da#8dd#6ad
#d68#a66#a86#aa6#ad6#ad8#ada#add#8ad#68d
#d66#d86#da6#dd6#dd8#dda#ddd#aad#88d#66d g78_g82_g85_g89_g93_g100
#da6#da8#daa#dad#a8d#86d g52_g58_g62_g66_g70_g74_
#88a#8aa #d86#d88#d8a#d8d#a6d g27_g31_g35_g38_g42_g46_g50_
#a8a#888#8a8#8aa #d66#d68#d6a#d6d g0__g3__g7__g11_g15_g19_g23_
#a88#aa8#aaa#88a
#a88#a8a
"""
CHART_88 = """
brown__ dark_red_ dark_magenta_ dark_blue_ dark_cyan_ dark_green_
yellow_ light_red light_magenta light_blue light_cyan light_green
#00f#08f#0cf#0ff black_______ dark_gray___
#80f#00c#08c#0cc#0fc light_gray__ white_______
#c0f#80c#008#088#0c8#0f8
#f0f#c0c#808#000#080#0c0#0f0#0f8#0fc#0ff #88c#8cc
#f0c#c08#800#880#8c0#8f0#8f8#8fc#8ff#0cf #c8c#888#8c8#8cc
#f08#c00#c80#cc0#cf0#cf8#cfc#cff#8cf#08f #c88#cc8#ccc#88c
#f00#f80#fc0#ff0#ff8#ffc#fff#ccf#88f#00f #c88#c8c
#fc0#fc8#fcc#fcf#c8f#80f
#f80#f88#f8c#f8f#c0f g62_g74_g82_g89_g100
#f00#f08#f0c#f0f g0__g19_g35_g46_g52
"""
CHART_16 = """
brown__ dark_red_ dark_magenta_ dark_blue_ dark_cyan_ dark_green_
yellow_ light_red light_magenta light_blue light_cyan light_green
black_______ dark_gray___ light_gray__ white_______
"""
ATTR_RE = re.compile("(?P<whitespace>[ \n]*)(?P<entry>[^ \n]+)")
SHORT_ATTR = 4 # length of short high-colour descriptions which may
# be packed one after the next
def parse_chart(chart, convert):
"""
Convert string chart into text markup with the correct attributes.
chart -- palette chart as a string
convert -- function that converts a single palette entry to an
(attr, text) tuple, or None if no match is found
"""
out = []
for match in re.finditer(ATTR_RE, chart):
if match.group('whitespace'):
out.append(match.group('whitespace'))
entry = match.group('entry')
entry = entry.replace("_", " ")
while entry:
# try the first four characters
attrtext = convert(entry[:SHORT_ATTR])
if attrtext:
elen = SHORT_ATTR
entry = entry[SHORT_ATTR:].strip()
else: # try the whole thing
attrtext = convert(entry.strip())
assert attrtext, "Invalid palette entry: %r" % entry
elen = len(entry)
entry = ""
attr, text = attrtext
out.append((attr, text.ljust(elen)))
return out
def foreground_chart(chart, background, colors):
"""
Create text markup for a foreground colour chart
chart -- palette chart as string
background -- colour to use for background of chart
colors -- number of colors (88 or 256)
"""
def convert_foreground(entry):
try:
attr = urwid.AttrSpec(entry, background, colors)
except urwid.AttrSpecError:
return None
return attr, entry
return parse_chart(chart, convert_foreground)
def background_chart(chart, foreground, colors):
"""
Create text markup for a background colour chart
chart -- palette chart as string
foreground -- colour to use for foreground of chart
colors -- number of colors (88 or 256)
This will remap 8 <= colour < 16 to high-colour versions
in the hopes of greater compatibility
"""
def convert_background(entry):
try:
attr = urwid.AttrSpec(foreground, entry, colors)
except urwid.AttrSpecError:
return None
# fix 8 <= colour < 16
if colors > 16 and attr.background_basic and \
attr.background_number >= 8:
# use high-colour with same number
entry = 'h%d'%attr.background_number
attr = urwid.AttrSpec(foreground, entry, colors)
return attr, entry
return parse_chart(chart, convert_background)
def main():
palette = [
('header', 'black,underline', 'light gray', 'standout,underline',
'black,underline', '#88a'),
('panel', 'light gray', 'dark blue', '',
'#ffd', '#00a'),
('focus', 'light gray', 'dark cyan', 'standout',
'#ff8', '#806'),
]
screen = urwid.raw_display.Screen()
screen.register_palette(palette)
lb = urwid.SimpleListWalker([])
chart_offset = None # offset of chart in lb list
mode_radio_buttons = []
chart_radio_buttons = []
def fcs(widget):
# wrap widgets that can take focus
return urwid.AttrMap(widget, None, 'focus')
def set_mode(colors, is_foreground_chart):
# set terminal mode and redraw chart
screen.set_terminal_properties(colors)
screen.reset_default_terminal_palette()
chart_fn = (background_chart, foreground_chart)[is_foreground_chart]
if colors == 1:
lb[chart_offset] = urwid.Divider()
else:
chart = {16: CHART_16, 88: CHART_88, 256: CHART_256}[colors]
txt = chart_fn(chart, 'default', colors)
lb[chart_offset] = urwid.Text(txt, wrap='clip')
def on_mode_change(rb, state, colors):
# if this radio button is checked
if state:
is_foreground_chart = chart_radio_buttons[0].state
set_mode(colors, is_foreground_chart)
def mode_rb(text, colors, state=False):
# mode radio buttons
rb = urwid.RadioButton(mode_radio_buttons, text, state)
urwid.connect_signal(rb, 'change', on_mode_change, colors)
return fcs(rb)
def on_chart_change(rb, state):
# handle foreground check box state change
set_mode(screen.colors, state)
def click_exit(button):
raise urwid.ExitMainLoop()
lb.extend([
urwid.AttrMap(urwid.Text("Urwid Palette Test"), 'header'),
urwid.AttrMap(urwid.Columns([
urwid.Pile([
mode_rb("Monochrome", 1),
mode_rb("16-Color", 16, True),
mode_rb("88-Color", 88),
mode_rb("256-Color", 256),]),
urwid.Pile([
fcs(urwid.RadioButton(chart_radio_buttons,
"Foreground Colors", True, on_chart_change)),
fcs(urwid.RadioButton(chart_radio_buttons,
"Background Colors")),
urwid.Divider(),
fcs(urwid.Button("Exit", click_exit)),
]),
]),'panel')
])
chart_offset = len(lb)
lb.extend([
urwid.Divider() # placeholder for the chart
])
set_mode(16, True) # displays the chart
def unhandled_input(key):
if key in ('Q','q','esc'):
raise urwid.ExitMainLoop()
urwid.MainLoop(urwid.ListBox(lb), screen=screen,
unhandled_input=unhandled_input).run()
if __name__ == "__main__":
main()
|
mit
|
Lektorium-LLC/edx-platform
|
common/djangoapps/third_party_auth/settings.py
|
3
|
4344
|
"""Settings for the third-party auth module.
The flow for settings registration is:
The base settings file contains a boolean, ENABLE_THIRD_PARTY_AUTH, indicating
whether this module is enabled. startup.py probes the ENABLE_THIRD_PARTY_AUTH.
If true, it:
a) loads this module.
b) calls apply_settings(), passing in the Django settings
"""
_FIELDS_STORED_IN_SESSION = ['auth_entry', 'next']
_MIDDLEWARE_CLASSES = (
'third_party_auth.middleware.ExceptionMiddleware',
)
_SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/dashboard'
_SOCIAL_AUTH_AZUREAD_OAUTH2_AUTH_EXTRA_ARGUMENTS = {
'msafed': 0
}
def apply_settings(django_settings):
"""Set provider-independent settings."""
# Whitelisted URL query parameters retrained in the pipeline session.
# Params not in this whitelist will be silently dropped.
django_settings.FIELDS_STORED_IN_SESSION = _FIELDS_STORED_IN_SESSION
# Inject exception middleware to make redirects fire.
django_settings.MIDDLEWARE_CLASSES += _MIDDLEWARE_CLASSES
# Where to send the user if there's an error during social authentication
# and we cannot send them to a more specific URL
# (see middleware.ExceptionMiddleware).
django_settings.SOCIAL_AUTH_LOGIN_ERROR_URL = '/'
# Where to send the user once social authentication is successful.
django_settings.SOCIAL_AUTH_LOGIN_REDIRECT_URL = _SOCIAL_AUTH_LOGIN_REDIRECT_URL
# Adding extra key value pair in the url query string for microsoft as per request
django_settings.SOCIAL_AUTH_AZUREAD_OAUTH2_AUTH_EXTRA_ARGUMENTS = _SOCIAL_AUTH_AZUREAD_OAUTH2_AUTH_EXTRA_ARGUMENTS
# Inject our customized auth pipeline. All auth backends must work with
# this pipeline.
django_settings.SOCIAL_AUTH_PIPELINE = [
'third_party_auth.pipeline.parse_query_params',
'social_core.pipeline.social_auth.social_details',
'social_core.pipeline.social_auth.social_uid',
'social_core.pipeline.social_auth.auth_allowed',
'social_core.pipeline.social_auth.social_user',
'third_party_auth.pipeline.associate_by_email_if_login_api',
'social_core.pipeline.user.get_username',
'third_party_auth.pipeline.set_pipeline_timeout',
'third_party_auth.pipeline.ensure_user_information',
'social_core.pipeline.user.create_user',
'social_core.pipeline.social_auth.associate_user',
'social_core.pipeline.social_auth.load_extra_data',
'social_core.pipeline.user.user_details',
'third_party_auth.pipeline.set_logged_in_cookies',
'third_party_auth.pipeline.login_analytics',
]
# Required so that we can use unmodified PSA OAuth2 backends:
django_settings.SOCIAL_AUTH_STRATEGY = 'third_party_auth.strategy.ConfigurationModelStrategy'
# We let the user specify their email address during signup.
django_settings.SOCIAL_AUTH_PROTECTED_USER_FIELDS = ['email']
# Disable exceptions by default for prod so you get redirect behavior
# instead of a Django error page. During development you may want to
# enable this when you want to get stack traces rather than redirections.
django_settings.SOCIAL_AUTH_RAISE_EXCEPTIONS = False
# Allow users to login using social auth even if their account is not verified yet
# This is required since we [ab]use django's 'is_active' flag to indicate verified
# accounts; without this set to True, python-social-auth won't allow us to link the
# user's account to the third party account during registration (since the user is
# not verified at that point).
# We also generally allow unverified third party auth users to login (see the logic
# in ensure_user_information in pipeline.py) because otherwise users who use social
# auth to register with an invalid email address can become "stuck".
# TODO: Remove the following if/when email validation is separated from the is_active flag.
django_settings.SOCIAL_AUTH_INACTIVE_USER_LOGIN = True
django_settings.SOCIAL_AUTH_INACTIVE_USER_URL = '/auth/inactive'
# Context processors required under Django.
django_settings.SOCIAL_AUTH_UUID_LENGTH = 4
django_settings.DEFAULT_TEMPLATE_ENGINE['OPTIONS']['context_processors'] += (
'social_django.context_processors.backends',
'social_django.context_processors.login_redirect',
)
|
agpl-3.0
|
srjoglekar246/sympy
|
sympy/logic/tests/test_inference.py
|
2
|
6293
|
"""For more tests on satisfiability, see test_dimacs"""
from sympy import symbols
from sympy.logic.boolalg import Equivalent, Implies
from sympy.logic.inference import pl_true, satisfiable, PropKB
from sympy.logic.algorithms.dpll import dpll, dpll_satisfiable, \
find_pure_symbol, find_unit_clause, unit_propagate, \
find_pure_symbol_int_repr, find_unit_clause_int_repr, \
unit_propagate_int_repr
from sympy.utilities.pytest import raises
def test_find_pure_symbol():
A, B, C = symbols('A,B,C')
assert find_pure_symbol([A], [A]) == (A, True)
assert find_pure_symbol([A, B], [~A | B, ~B | A]) == (None, None)
assert find_pure_symbol([A, B, C], [ A | ~B, ~B | ~C, C | A]) == (A, True)
assert find_pure_symbol([A, B, C], [~A | B, B | ~C, C | A]) == (B, True)
assert find_pure_symbol([A, B, C], [~A | ~B, ~B | ~C, C | A]) == (B, False)
assert find_pure_symbol([A, B, C], [~A | B, ~B | ~C, C | A]) == (None, None)
def test_find_pure_symbol_int_repr():
assert find_pure_symbol_int_repr([1], [set([1])]) == (1, True)
assert find_pure_symbol_int_repr([1, 2],
[set([-1, 2]), set([-2, 1])]) == (None, None)
assert find_pure_symbol_int_repr([1, 2, 3],
[set([1, -2]), set([-2, -3]), set([3, 1])]) == (1, True)
assert find_pure_symbol_int_repr([1, 2, 3],
[set([-1, 2]), set([2, -3]), set([3, 1])]) == (2, True)
assert find_pure_symbol_int_repr([1, 2, 3],
[set([-1, -2]), set([-2, -3]), set([3, 1])]) == (2, False)
assert find_pure_symbol_int_repr([1, 2, 3],
[set([-1, 2]), set([-2, -3]), set([3, 1])]) == (None, None)
def test_unit_clause():
A, B, C = symbols('A,B,C')
assert find_unit_clause([A], {}) == (A, True)
assert find_unit_clause([A, ~A], {}) == (A, True) ### Wrong ??
assert find_unit_clause([A | B], {A: True}) == (B, True)
assert find_unit_clause([A | B], {B: True}) == (A, True)
assert find_unit_clause([A | B | C, B | ~C, A | ~B], {A:True}) == (B, False)
assert find_unit_clause([A | B | C, B | ~C, A | B], {A:True}) == (B, True)
assert find_unit_clause([A | B | C, B | ~C, A ], {}) == (A, True)
def test_unit_clause_int_repr():
assert find_unit_clause_int_repr(map(set, [[1]]), {}) == (1, True)
assert find_unit_clause_int_repr(map(set, [[1], [-1]]), {}) == (1, True)
assert find_unit_clause_int_repr([set([1,2])], {1: True}) == (2, True)
assert find_unit_clause_int_repr([set([1,2])], {2: True}) == (1, True)
assert find_unit_clause_int_repr(map(set, [[1,2,3], [2, -3], [1, -2]]), {1: True}) == \
(2, False)
assert find_unit_clause_int_repr(map(set, [[1, 2, 3], [3, -3], [1, 2]]), {1: True}) == \
(2, True)
A,B,C = symbols('A,B,C')
assert find_unit_clause([A | B | C, B | ~C, A ], {}) == (A, True)
def test_unit_propagate():
A, B, C = symbols('A,B,C')
assert unit_propagate([A | B], A) == []
assert unit_propagate([A | B, ~A | C, ~C | B, A], A) == [C, ~C | B, A]
def test_unit_propagate_int_repr():
assert unit_propagate_int_repr([set([1, 2])], 1) == []
assert unit_propagate_int_repr(map(set, [[1, 2], [-1, 3], [-3, 2], [1]]), 1) == \
[set([3]), set([-3, 2])]
def test_dpll():
"""This is also tested in test_dimacs"""
A, B, C = symbols('A,B,C')
assert dpll([A | B], [A, B], {A: True, B: True}) == {A: True, B: True}
def test_dpll_satisfiable():
A, B, C = symbols('A,B,C')
assert dpll_satisfiable( A & ~A ) == False
assert dpll_satisfiable( A & ~B ) == {A: True, B: False}
assert dpll_satisfiable( A | B ) in ({A: True}, {B: True}, {A: True, B: True})
assert dpll_satisfiable( (~A | B) & (~B | A) ) in ({A: True, B: True}, {A: False, B:False})
assert dpll_satisfiable( (A | B) & (~B | C) ) in ({A: True, B: False},
{A: True, C:True}, {B: True, C: True})
assert dpll_satisfiable( A & B & C ) == {A: True, B: True, C: True}
assert dpll_satisfiable( (A | B) & (A >> B) ) == {B: True}
assert dpll_satisfiable( Equivalent(A, B) & A ) == {A: True, B: True}
assert dpll_satisfiable( Equivalent(A, B) & ~A ) == {A: False, B: False}
def test_satisfiable():
A, B, C = symbols('A,B,C')
assert satisfiable(A & (A >> B) & ~B) == False
def test_pl_true():
A, B, C = symbols('A,B,C')
assert pl_true(True) == True
assert pl_true( A & B, {A : True, B : True}) == True
assert pl_true( A | B, {A : True}) == True
assert pl_true( A | B, {B : True}) == True
assert pl_true( A | B, {A: None, B: True}) == True
assert pl_true( A >> B, {A: False}) == True
assert pl_true( A | B | ~C, {A: False, B: True, C: True}) == True
assert pl_true(Equivalent(A, B), {A:False, B:False}) == True
# test for false
assert pl_true(False) == False
assert pl_true( A & B, {A: False, B: False}) == False
assert pl_true( A & B, {A: False}) == False
assert pl_true( A & B, {B: False}) == False
assert pl_true( A | B, {A: False, B: False}) == False
#test for None
assert pl_true(B, {B: None}) is None
assert pl_true( A & B, {A: True, B: None}) is None
assert pl_true( A >> B, {A: True, B: None}) is None
assert pl_true(Equivalent(A, B), {A:None}) is None
assert pl_true(Equivalent(A, B), {A:True, B:None}) is None
def test_pl_true_wrong_input():
from sympy import pi
raises(ValueError, lambda: pl_true('John Cleese'))
raises(ValueError, lambda: pl_true(42+pi+pi**2))
raises(ValueError, lambda: pl_true(42))
def test_PropKB():
A, B, C = symbols('A,B,C')
kb = PropKB()
kb.tell(A >> B)
kb.tell(B >> C)
assert kb.ask(A) == True
assert kb.ask(B) == True
assert kb.ask(C) == True
assert kb.ask(~A) == True
assert kb.ask(~B) == True
assert kb.ask(~C) == True
kb.tell(A)
assert kb.ask(A) == True
assert kb.ask(B) == True
assert kb.ask(C) == True
assert kb.ask(~C) == False
kb.retract(A)
assert kb.ask(~C) == True
kb2 = PropKB(Equivalent(A, B))
assert kb2.ask(A) == True
assert kb2.ask(B) == True
kb2.tell(A)
assert kb2.ask(A) == True
kb3 = PropKB()
kb3.tell(A)
def test_propKB_tolerant():
""""tolerant to bad input"""
kb = PropKB()
A, B, C = symbols('A,B,C')
assert kb.ask(B) == False
|
bsd-3-clause
|
michaelchu/kaleidoscope
|
kaleidoscope/options/option_series.py
|
1
|
3755
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib import cm
from kaleidoscope.options.iterator.option_chain import OptionChainIterator
pd.set_option('display.expand_frame_repr', False)
pd.set_option('display.max_rows', None)
class OptionSeries(object):
"""
This class contains the time series data for an option strategy.
"""
def __init__(self, symbol, strategy, data, index=None, dropna=False, **params):
"""
Initialize this class with a dataframe of option strategy prices by
symbol, quote date, expiration, mark, other metrics, etc
This class will then store the data in a dictionary by expiration dates
and provide methods that will act on this data.
:param symbol: symbol of option chains contained in this object
:param data: Dataframe containing the time series data of an option strategy.
This dataframe must contain the following columns:
symbol, quote_date, expiration, mark
:param index: A list containing the index position for the 4 columns listed above,
if the columns are not listed in that order in the DataFrame.
If None, this function will infer the columns by the default order
:param dropna: Drop all rows containing NaN in OptionSeries
:param params: Parameters used to construct the spread data for this OptionSeries
"""
# TODO: check index param's length is equal to 3
if not isinstance(data, pd.DataFrame):
raise ValueError('data param must be of pandas type DataFrame')
elif len(data.columns) < 3:
raise ValueError('Dataframe must contain at least 3 columns')
elif index is not None and len(index) != 3:
raise ValueError('index length must be 3')
else:
self.option_chains = {}
self.symbol = symbol
self.strategy = strategy
self.data = data
self.params = params
self.index = index
def iter_quotes(self):
"""
Return an iterator to iterate through all option chains in the backtesting period
:return:
"""
return OptionChainIterator(self.data)
def head(self, n=5):
"""
Print the top n amount of expiry dates's option data
:param n: The amount of expiry date's data to print starting from the first
:return: None
"""
for series in sorted(self.option_chains)[:n]:
print(self.option_chains[series])
def tail(self, n=5):
"""
Print the bottom n amount of expiry dates's option data
:param n: The amount of expiry date's data to print starting from the first
:return: None
"""
for series in sorted(self.option_chains)[-n:]:
print(self.option_chains[series])
def plot(self, exp):
"""
Plot this OptionSeries with a surface plot for an expiration cycle.
:param exp: The expiration to plot for
:return:
"""
data = self.option_chains[exp]
# reset dataframe labels and column names to be numeric
data.columns = [i for i in range(data.shape[1])]
data.reset_index(inplace=True)
# drop either symbol or spread_symbol columns depending on strategy
data.drop('symbol' if 'spread_symbol' not in data else 'spread_symbol', axis=1, inplace=True)
x = data.columns
y = data.index
X, Y = np.meshgrid(x, y)
Z = data
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_surface(X, Y, Z, cmap=cm.coolwarm, linewidth=0)
plt.show()
|
mit
|
craig8196/ai
|
bzagents/craig_dumb_agent.py
|
1
|
6429
|
#!/usr/bin/python -tt
# An incredibly simple agent. All we do is find the closest enemy tank, drive
# towards it, and shoot. Note that if friendly fire is allowed, you will very
# often kill your own tanks with this code.
#################################################################
# NOTE TO STUDENTS
# This is a starting point for you. You will need to greatly
# modify this code if you want to do anything useful. But this
# should help you to know how to interact with BZRC in order to
# get the information you need.
#
# After starting the bzrflag server, this is one way to start
# this code:
# python agent0.py [hostname] [port]
#
# Often this translates to something like the following (with the
# port name being printed out by the bzrflag server):
# python agent0.py localhost 49857
#################################################################
from __future__ import division
import sys
import math
import time
import random
from bzrc import BZRC, Command
class Agent(object):
"""Class handles all command and control logic for a teams tanks."""
def __init__(self, bzrc):
self.bzrc = bzrc
self.constants = self.bzrc.get_constants()
self.commands = []
print self.constants
self.tanks = {}
def tick(self, time_diff):
"""Some time has passed; decide what to do next."""
mytanks, othertanks, flags, shots = self.bzrc.get_lots_o_stuff()
self.mytanks = mytanks
self.othertanks = othertanks
self.flags = flags
self.shots = shots
self.enemies = [tank for tank in othertanks if tank.color !=
self.constants['team']]
self.commands = []
for tank in mytanks:
if not tank.index in self.tanks:
self.tanks[tank.index] = {
'speed': 1,
'start_turn': False,
'start_forward': 0,
'forward_len': random.uniform(3, 8),
'prev_shot': 0,
'shot_len': 0,
'shoot': False,
'angvel': 0,
'target_angle': tank.angle,
'turning': False,
}
self.act_dumb(tank, time_diff)
results = self.bzrc.do_commands(self.commands)
self.prev_time = time_diff
def act_dumb(self, tank, time_diff):
"""Make the given tank act dumb."""
data = self.tanks[tank.index]
if time_diff - data['start_forward'] > data['forward_len']:
data['speed'] = 0
data['start_turn'] = True
data['start_forward'] = time_diff
sixty_deg_in_radians = 60/180*math.pi
if data['start_turn']:
l_or_r = random.randint(0, 1)
if l_or_r == 0:
direction = -1
else:
direction = 1
data['start_angle'] = self.normalize_angle(tank.angle)
data['target_angle'] = self.normalize_angle(tank.angle + direction*sixty_deg_in_radians)
data['start_turn'] = False
data['angvel'] = direction*1.0
data['turning'] = True
if data['turning']:
if self.min_angle_between(data['target_angle'], tank.angle) > sixty_deg_in_radians:
data['turning'] = False
data['angvel'] = 0
data['forward_len'] = random.uniform(3, 8)
data['start_forward'] = time_diff
data['speed'] = 1
#~ print self.min_angle_between(data['target_angle'], tank.angle)
if time_diff - data['prev_shot'] > data['shot_len']:
data['shoot'] = True
data['prev_shot'] = time_diff
data['shot_len'] = random.uniform(1.5, 2.5)
command = Command(tank.index, data['speed'], data['angvel'], data['shoot'])
self.commands.append(command)
def attack_enemies(self, tank):
"""Find the closest enemy and chase it, shooting as you go."""
best_enemy = None
best_dist = 2 * float(self.constants['worldsize'])
for enemy in self.enemies:
if enemy.status != 'alive':
continue
dist = math.sqrt((enemy.x - tank.x)**2 + (enemy.y - tank.y)**2)
if dist < best_dist:
best_dist = dist
best_enemy = enemy
if best_enemy is None:
command = Command(tank.index, 0, 0, False)
self.commands.append(command)
else:
self.move_to_position(tank, best_enemy.x, best_enemy.y)
def move_to_position(self, tank, target_x, target_y):
"""Set command to move to given coordinates."""
target_angle = math.atan2(target_y - tank.y,
target_x - tank.x)
relative_angle = self.normalize_angle(target_angle - tank.angle)
command = Command(tank.index, 1, 2 * relative_angle, True)
self.commands.append(command)
def min_angle_between(self, angle1, angle2):
"""Inputs must be normalized.
Return the minimal positive angle between the given angles.
"""
diff = math.fabs(angle1 - angle2)
if diff > math.pi:
diff = 2*math.pi - diff
return diff
def normalize_angle(self, angle):
"""Make any angle be between +/- pi."""
angle -= 2 * math.pi * int (angle / (2 * math.pi))
if angle <= -math.pi:
angle += 2 * math.pi
elif angle > math.pi:
angle -= 2 * math.pi
return angle
def main():
# Process CLI arguments.
try:
execname, host, port = sys.argv
except ValueError:
execname = sys.argv[0]
print >>sys.stderr, '%s: incorrect number of arguments' % execname
print >>sys.stderr, 'usage: %s hostname port' % sys.argv[0]
sys.exit(-1)
# Connect.
#bzrc = BZRC(host, int(port), debug=True)
bzrc = BZRC(host, int(port))
agent = Agent(bzrc)
prev_time = time.time()
# Run the agent
try:
while True:
time_diff = time.time() - prev_time
agent.tick(time_diff)
except KeyboardInterrupt:
print "Exiting due to keyboard interrupt."
bzrc.close()
if __name__ == '__main__':
main()
# vim: et sw=4 sts=4
|
gpl-3.0
|
xflows/clowdflows
|
tweepy2/cursor.py
|
2
|
4028
|
# Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
from tweepy2.error import TweepError
class Cursor(object):
"""Pagination helper class"""
def __init__(self, method, *args, **kargs):
if hasattr(method, 'pagination_mode'):
if method.pagination_mode == 'cursor':
self.iterator = CursorIterator(method, args, kargs)
else:
self.iterator = PageIterator(method, args, kargs)
else:
raise TweepError('This method does not perform pagination')
def pages(self, limit=0):
"""Return iterator for pages"""
if limit > 0:
self.iterator.limit = limit
return self.iterator
def items(self, limit=0):
"""Return iterator for items in each page"""
i = ItemIterator(self.iterator)
i.limit = limit
return i
class BaseIterator(object):
def __init__(self, method, args, kargs):
self.method = method
self.args = args
self.kargs = kargs
self.limit = 0
def next(self):
raise NotImplementedError
def prev(self):
raise NotImplementedError
def __iter__(self):
return self
class CursorIterator(BaseIterator):
def __init__(self, method, args, kargs):
BaseIterator.__init__(self, method, args, kargs)
self.next_cursor = -1
self.prev_cursor = 0
self.count = 0
def next(self):
if self.next_cursor == 0 or (self.limit and self.count == self.limit):
raise StopIteration
data, cursors = self.method(
cursor=self.next_cursor, *self.args, **self.kargs
)
self.prev_cursor, self.next_cursor = cursors
if len(data) == 0:
raise StopIteration
self.count += 1
return data
def prev(self):
if self.prev_cursor == 0:
raise TweepError('Can not page back more, at first page')
data, self.next_cursor, self.prev_cursor = self.method(
cursor=self.prev_cursor, *self.args, **self.kargs
)
self.count -= 1
return data
class PageIterator(BaseIterator):
def __init__(self, method, args, kargs):
BaseIterator.__init__(self, method, args, kargs)
self.current_page = 0
def next(self):
self.current_page += 1
items = self.method(page=self.current_page, *self.args, **self.kargs)
if len(items) == 0 or (self.limit > 0 and self.current_page > self.limit):
raise StopIteration
return items
def prev(self):
if (self.current_page == 1):
raise TweepError('Can not page back more, at first page')
self.current_page -= 1
return self.method(page=self.current_page, *self.args, **self.kargs)
class ItemIterator(BaseIterator):
def __init__(self, page_iterator):
self.page_iterator = page_iterator
self.limit = 0
self.current_page = None
self.page_index = -1
self.count = 0
def next(self):
if self.limit > 0 and self.count == self.limit:
raise StopIteration
if self.current_page is None or self.page_index == len(self.current_page) - 1:
# Reached end of current page, get the next page...
self.current_page = self.page_iterator.next()
self.page_index = -1
self.page_index += 1
self.count += 1
return self.current_page[self.page_index]
def prev(self):
if self.current_page is None:
raise TweepError('Can not go back more, at first page')
if self.page_index == 0:
# At the beginning of the current page, move to next...
self.current_page = self.page_iterator.prev()
self.page_index = len(self.current_page)
if self.page_index == 0:
raise TweepError('No more items')
self.page_index -= 1
self.count -= 1
return self.current_page[self.page_index]
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.