repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
ma1co/Sony-PMCA-RE | pmca/spk/__init__.py | 1 | 2713 | """Methods for reading and writing spk files"""
import sys
try:
from Cryptodome.Cipher import AES
from Cryptodome.PublicKey import RSA
from Cryptodome.Util.number import bytes_to_long, long_to_bytes
except ImportError:
from Crypto.Cipher import AES
from Crypto.PublicKey import RSA
from Crypto.Util.number import bytes_to_long, long_to_bytes
if sys.version_info >= (3,):
long = int
from . import constants
from . import util
from ..util import *
SpkHeader = Struct('SpkHeader', [
('magic', Struct.STR % 4),
('keyOffset', Struct.INT32),
])
spkHeaderMagic = b'1spk'
SpkKeyHeader = Struct('SpkKeyHeader', [
('keySize', Struct.INT32),
])
def parse(data):
"""Parses an spk file
Returns:
The contained apk data
"""
encryptedKey, encryptedData = parseContainer(data)
key = decryptKey(encryptedKey)
return decryptData(key, encryptedData)
def dump(data):
"""Builds an spk file containing the apk data specified"""
encryptedKey = constants.sampleSpkKey
key = decryptKey(encryptedKey)
encryptedData = encryptData(key, data)
return dumpContainer(encryptedKey, encryptedData)
def isSpk(data):
return len(data) >= SpkHeader.size and SpkHeader.unpack(data).magic == spkHeaderMagic
def parseContainer(data):
"""Parses an spk file
Returns:
('encrypted key', 'encrypted apk data')
"""
header = SpkHeader.unpack(data)
if header.magic != spkHeaderMagic:
raise Exception('Wrong magic')
keyHeaderOffset = SpkHeader.size + header.keyOffset
keyHeader = SpkKeyHeader.unpack(data, keyHeaderOffset)
keyOffset = keyHeaderOffset + SpkKeyHeader.size
dataOffset = keyOffset + keyHeader.keySize
return data[keyOffset:dataOffset], data[dataOffset:]
def dumpContainer(encryptedKey, encryptedData):
"""Builds an spk file from the encrypted key and data specified"""
return SpkHeader.pack(magic=spkHeaderMagic, keyOffset=0) + SpkKeyHeader.pack(keySize=len(encryptedKey)) + encryptedKey + encryptedData
def decryptKey(encryptedKey):
"""Decrypts an RSA-encrypted key"""
rsa = RSA.construct((long(constants.rsaModulus), long(constants.rsaExponent)))
try:
return rsa.encrypt(encryptedKey, 0)[0]
except NotImplementedError:
# pycryptodome
return long_to_bytes(rsa._encrypt(bytes_to_long(encryptedKey)))
def decryptData(key, encryptedData):
"""Decrypts the apk data using the specified AES key"""
aes = AES.new(key, AES.MODE_ECB)
return b''.join(util.unpad(aes.decrypt(c)) for c in util.chunk(encryptedData, constants.blockSize + constants.paddingSize))
def encryptData(key, data):
"""Encrypts the apk data using the specified AES key"""
aes = AES.new(key, AES.MODE_ECB)
return b''.join(aes.encrypt(util.pad(c, constants.paddingSize)) for c in util.chunk(data, constants.blockSize))
| mit |
chinmaygarde/depot_tools | third_party/boto/services/service.py | 70 | 6641 | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import boto
from boto.services.message import ServiceMessage
from boto.services.servicedef import ServiceDef
from boto.pyami.scriptbase import ScriptBase
from boto.utils import get_ts
import time
import os
import mimetypes
class Service(ScriptBase):
# Time required to process a transaction
ProcessingTime = 60
def __init__(self, config_file=None, mimetype_files=None):
ScriptBase.__init__(self, config_file)
self.name = self.__class__.__name__
self.working_dir = boto.config.get('Pyami', 'working_dir')
self.sd = ServiceDef(config_file)
self.retry_count = self.sd.getint('retry_count', 5)
self.loop_delay = self.sd.getint('loop_delay', 30)
self.processing_time = self.sd.getint('processing_time', 60)
self.input_queue = self.sd.get_obj('input_queue')
self.output_queue = self.sd.get_obj('output_queue')
self.output_domain = self.sd.get_obj('output_domain')
if mimetype_files:
mimetypes.init(mimetype_files)
def split_key(key):
if key.find(';') < 0:
t = (key, '')
else:
key, type = key.split(';')
label, mtype = type.split('=')
t = (key, mtype)
return t
def read_message(self):
boto.log.info('read_message')
message = self.input_queue.read(self.processing_time)
if message:
boto.log.info(message.get_body())
key = 'Service-Read'
message[key] = get_ts()
return message
# retrieve the source file from S3
def get_file(self, message):
bucket_name = message['Bucket']
key_name = message['InputKey']
file_name = os.path.join(self.working_dir, message.get('OriginalFileName', 'in_file'))
boto.log.info('get_file: %s/%s to %s' % (bucket_name, key_name, file_name))
bucket = boto.lookup('s3', bucket_name)
key = bucket.new_key(key_name)
key.get_contents_to_filename(os.path.join(self.working_dir, file_name))
return file_name
# process source file, return list of output files
def process_file(self, in_file_name, msg):
return []
# store result file in S3
def put_file(self, bucket_name, file_path, key_name=None):
boto.log.info('putting file %s as %s.%s' % (file_path, bucket_name, key_name))
bucket = boto.lookup('s3', bucket_name)
key = bucket.new_key(key_name)
key.set_contents_from_filename(file_path)
return key
def save_results(self, results, input_message, output_message):
output_keys = []
for file, type in results:
if 'OutputBucket' in input_message:
output_bucket = input_message['OutputBucket']
else:
output_bucket = input_message['Bucket']
key_name = os.path.split(file)[1]
key = self.put_file(output_bucket, file, key_name)
output_keys.append('%s;type=%s' % (key.name, type))
output_message['OutputKey'] = ','.join(output_keys)
# write message to each output queue
def write_message(self, message):
message['Service-Write'] = get_ts()
message['Server'] = self.name
if 'HOSTNAME' in os.environ:
message['Host'] = os.environ['HOSTNAME']
else:
message['Host'] = 'unknown'
message['Instance-ID'] = self.instance_id
if self.output_queue:
boto.log.info('Writing message to SQS queue: %s' % self.output_queue.id)
self.output_queue.write(message)
if self.output_domain:
boto.log.info('Writing message to SDB domain: %s' % self.output_domain.name)
item_name = '/'.join([message['Service-Write'], message['Bucket'], message['InputKey']])
self.output_domain.put_attributes(item_name, message)
# delete message from input queue
def delete_message(self, message):
boto.log.info('deleting message from %s' % self.input_queue.id)
self.input_queue.delete_message(message)
# to clean up any files, etc. after each iteration
def cleanup(self):
pass
def shutdown(self):
on_completion = self.sd.get('on_completion', 'shutdown')
if on_completion == 'shutdown':
if self.instance_id:
time.sleep(60)
c = boto.connect_ec2()
c.terminate_instances([self.instance_id])
def main(self, notify=False):
self.notify('Service: %s Starting' % self.name)
empty_reads = 0
while self.retry_count < 0 or empty_reads < self.retry_count:
try:
input_message = self.read_message()
if input_message:
empty_reads = 0
output_message = ServiceMessage(None, input_message.get_body())
input_file = self.get_file(input_message)
results = self.process_file(input_file, output_message)
self.save_results(results, input_message, output_message)
self.write_message(output_message)
self.delete_message(input_message)
self.cleanup()
else:
empty_reads += 1
time.sleep(self.loop_delay)
except Exception:
boto.log.exception('Service Failed')
empty_reads += 1
self.notify('Service: %s Shutting Down' % self.name)
self.shutdown()
| bsd-3-clause |
markdryan/dleyna-renderer | test/dbus/cap.py | 4 | 8713 | #!/usr/bin/python
# cap
#
# Copyright (C) 2012 Intel Corporation. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU Lesser General Public License,
# version 2.1, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
#
# Mark Ryan <[email protected]>
#
from gi.repository import Gtk, Gdk, GdkPixbuf
import cairo
import dbus
import dbus.service
import dbus.mainloop.glib
import tempfile
class Renderer:
def __init__(self, path):
bus = dbus.SessionBus()
obj = bus.get_object('com.intel.dleyna-renderer', path)
self.__propsIF = dbus.Interface(obj, 'org.freedesktop.DBus.Properties')
self.__hostIF = dbus.Interface(obj,
'com.intel.dLeynaRenderer.PushHost')
self.__playerIF = dbus.Interface(obj,
'org.mpris.MediaPlayer2.Player')
def get_prop(self, prop_name, iface = ""):
return self.__propsIF.Get(iface, prop_name)
def push_file(self, fname):
try:
self.__hostIF.RemoveFile(fname)
except:
pass
self.__playerIF.Stop()
uri = self.__hostIF.HostFile(fname)
self.__playerIF.OpenUri(uri)
self.__playerIF.Play()
class Renderers:
def __init__(self, cb):
bus=dbus.SessionBus()
obj = bus.get_object('com.intel.dleyna-renderer',
'/com/intel/dLeynaRenderer')
self.__manager = dbus.Interface(obj,
'com.intel.dLeynaRenderer.Manager')
self.__cb = cb
self.__manager.connect_to_signal("LostServer", self.__servers_changed)
self.__manager.connect_to_signal("FoundServer", self.__servers_changed)
def __servers_changed(self, server):
self.__cb()
def get_renderers(self):
retval = []
for path in self.__manager.GetServers():
retval.append((path, Renderer(path)))
return retval
class UI:
def delete_event(self, widget, event, data=None):
return False
def destroy(self, widget, data=None):
Gtk.main_quit()
def __create_renderers_store(self):
servers_store = Gtk.ListStore(str, str)
for server in self.__Renderers.get_renderers():
servers_store.append([server[0], server[1].get_prop("Identity")])
return servers_store
def __reset_renderers(self):
print "Renderers Changed"
entry = self.__combo.get_child()
servers_store = self.__create_renderers_store()
self.__combo.set_model(servers_store)
if len(servers_store) > 0:
self.__combo.set_active(0)
else:
entry.set_text("")
def draw_rect(self, widget, x, y):
if self.__pixmap != None:
ctx = cairo.Context(self.__pixmap)
ctx.set_source_rgb(0, 0, 0)
ctx.rectangle(x -3, y -3, 6, 6)
ctx.fill()
widget.queue_draw_area(x -3, y -3, 6, 6)
def __mouse_button_pressed_cb(self, widget, event):
self.draw_rect(widget, event.x, event.y)
return True
def __mouse_moved_cb(self, widget, event):
if event.state & Gdk.ModifierType.BUTTON1_MASK:
self.draw_rect(widget, event.x, event.y)
event.request_motions()
return True
def __draw_cb(self, da, ctx):
if self.__pixmap:
ctx.set_source_surface(self.__pixmap, 0, 0)
ctx.rectangle(0, 0, da.get_allocated_width(),
da.get_allocated_height())
ctx.fill()
@staticmethod
def __blank_pixmap(width, height):
new_pixmap = cairo.ImageSurface(cairo.FORMAT_RGB24, width, height)
ctx = cairo.Context(new_pixmap)
ctx.set_source_rgb(0xff, 0xff, 0xff)
ctx.rectangle(0, 0, width, height)
ctx.fill()
return (new_pixmap, ctx)
def __configured_cb(self, widget, event):
allocation = widget.get_allocation()
width = allocation.width
height = allocation.height
new_pixmap, ctx = UI.__blank_pixmap(width, height)
if self.__pixmap:
old_width = self.__pixmap.get_width()
old_height = self.__pixmap.get_height()
dest_x = (width - old_width) / 2
dest_y = (height - old_height) / 2
ctx.set_source_surface(self.__pixmap, dest_x, dest_y)
ctx.rectangle(0, 0, width, height)
ctx.fill()
self.__pixmap = new_pixmap
return True
def push_cb(self, button):
tree_iter = self.__combo.get_active_iter()
if tree_iter != None:
self.__pixmap.write_to_png(self.__tmp_file)
model = self.__combo.get_model()
ren = Renderer(model[tree_iter][0])
ren.push_file(self.__tmp_file)
def pick_cb(self, button):
dialog = Gtk.FileChooserDialog("Please choose a file", self.__window,
Gtk.FileChooserAction.OPEN,
(Gtk.STOCK_CANCEL,
Gtk.ResponseType.CANCEL,
Gtk.STOCK_OPEN,
Gtk.ResponseType.OK))
response = dialog.run()
if response == Gtk.ResponseType.OK:
print "Open clicked"
pick_file = dialog.get_filename()
tree_iter = self.__combo.get_active_iter()
if tree_iter != None:
model = self.__combo.get_model()
ren = Renderer(model[tree_iter][0])
dialog.destroy()
ren.push_file(pick_file)
elif response == Gtk.ResponseType.CANCEL:
print "Cancel clicked"
dialog.destroy()
def clear_cb(self, button):
allocation = self.__area.get_allocation()
self.__pixmap, ctx = UI.__blank_pixmap(allocation.width,
allocation.height)
self.__area.queue_draw_area(0,0, allocation.width, allocation.height)
def __init__(self):
self.__Renderers = Renderers(self.__reset_renderers)
self.__tmp_file = tempfile.mktemp(".png")
self.__pixmap = None
window = Gtk.Window()
window.set_default_size(640, 480)
window.set_title("Create and Push!")
container = Gtk.VBox(False, 0)
area = Gtk.DrawingArea()
area.set_events(Gdk.EventMask.BUTTON_PRESS_MASK |
Gdk.EventMask.POINTER_MOTION_MASK |
Gdk.EventMask.POINTER_MOTION_HINT_MASK)
area.connect("button_press_event", self.__mouse_button_pressed_cb)
area.connect("motion_notify_event", self.__mouse_moved_cb)
area.connect("configure-event", self.__configured_cb)
area.connect("draw", self.__draw_cb)
container.pack_start(area, True, True, 4);
button_bar = Gtk.HBox(False, 0)
pick_button = Gtk.Button("Pick & Push");
pick_button.connect("clicked", self.pick_cb)
push_button = Gtk.Button("Push");
push_button.connect("clicked", self.push_cb)
clear_button = Gtk.Button("Clear");
clear_button.connect("clicked", self.clear_cb)
servers_store = self.__create_renderers_store()
self.__combo = Gtk.ComboBox.new_with_model_and_entry(servers_store)
self.__combo.set_entry_text_column(1)
if len(servers_store) > 0:
self.__combo.set_active(0)
self.__combo.get_child().set_property("editable", False)
button_bar.pack_start(pick_button, True, True, 4)
button_bar.pack_start(push_button, True, True, 4)
button_bar.pack_start(clear_button, True, True, 4)
button_bar.pack_start(self.__combo, True, True, 4)
container.pack_start(button_bar, False, False, 4);
window.add(container)
window.show_all()
window.connect("delete_event", self.delete_event)
window.connect("destroy", self.destroy)
self.__window = window
self.__area = area
if __name__ == "__main__":
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
ui = UI()
Gtk.main()
| lgpl-2.1 |
chauhanhardik/populo | common/lib/xmodule/xmodule/modulestore/tests/test_libraries.py | 157 | 7393 | # -*- coding: utf-8 -*-
"""
Basic unit tests related to content libraries.
Higher-level tests are in `cms/djangoapps/contentstore`.
"""
import ddt
from bson.objectid import ObjectId
from opaque_keys.edx.locator import LibraryLocator
from xmodule.modulestore.exceptions import DuplicateCourseError
from xmodule.modulestore.tests.factories import LibraryFactory, ItemFactory, check_mongo_calls
from xmodule.modulestore.tests.utils import MixedSplitTestCase
@ddt.ddt
class TestLibraries(MixedSplitTestCase):
"""
Test for libraries.
Mostly tests code found throughout split mongo, but also tests library_root_xblock.py
"""
def test_create_library(self):
"""
Test that we can create a library, and see how many mongo calls it uses to do so.
Expected mongo calls, in order:
find_one({'org': '...', 'run': 'library', 'course': '...'})
insert(definition: {'block_type': 'library', 'fields': {}})
insert_structure(bulk)
insert_course_index(bulk)
get_course_index(bulk)
"""
with check_mongo_calls(2, 3):
LibraryFactory.create(modulestore=self.store)
def test_duplicate_library(self):
"""
Make sure we cannot create duplicate libraries
"""
org, lib_code = ('DuplicateX', "DUP")
LibraryFactory.create(org=org, library=lib_code, modulestore=self.store)
with self.assertRaises(DuplicateCourseError):
LibraryFactory.create(org=org, library=lib_code, modulestore=self.store)
@ddt.data(
"This is a test library!",
u"Ωμέγα Βιβλιοθήκη",
)
def test_str_repr(self, name):
"""
Test __unicode__() and __str__() methods of libraries
"""
library = LibraryFactory.create(metadata={"display_name": name}, modulestore=self.store)
self.assertIn(name, unicode(library))
if not isinstance(name, unicode):
self.assertIn(name, str(library))
def test_display_with_default_methods(self):
"""
Check that the display_x_with_default methods have been implemented, for
compatibility with courses.
"""
org = 'TestOrgX'
lib_code = 'LC101'
library = LibraryFactory.create(org=org, library=lib_code, modulestore=self.store)
self.assertEqual(library.display_org_with_default, org)
self.assertEqual(library.display_number_with_default, lib_code)
def test_block_with_children(self):
"""
Test that blocks used from a library can have children.
"""
library = LibraryFactory.create(modulestore=self.store)
# In the library, create a vertical block with a child:
vert_block = ItemFactory.create(
category="vertical",
parent_location=library.location,
user_id=self.user_id,
publish_item=False,
modulestore=self.store,
)
child_block = ItemFactory.create(
category="html",
parent_location=vert_block.location,
user_id=self.user_id,
publish_item=False,
metadata={"data": "Hello world", },
modulestore=self.store,
)
self.assertEqual(child_block.parent.replace(version_guid=None, branch=None), vert_block.location)
def test_update_item(self):
"""
Test that update_item works for a block in a library
"""
library = LibraryFactory.create(modulestore=self.store)
block = ItemFactory.create(
category="html",
parent_location=library.location,
user_id=self.user_id,
publish_item=False,
metadata={"data": "Hello world", },
modulestore=self.store,
)
block_key = block.location
block.data = "NEW"
old_version = self.store.get_item(block_key, remove_version=False, remove_branch=False).location.version_guid
self.store.update_item(block, self.user_id)
# Reload block from the modulestore
block = self.store.get_item(block_key)
self.assertEqual(block.data, "NEW")
self.assertEqual(block.location, block_key)
new_version = self.store.get_item(block_key, remove_version=False, remove_branch=False).location.version_guid
self.assertNotEqual(old_version, new_version)
def test_delete_item(self):
"""
Test to make sure delete_item() works on blocks in a library
"""
library = LibraryFactory.create(modulestore=self.store)
lib_key = library.location.library_key
block = ItemFactory.create(
category="html",
parent_location=library.location,
user_id=self.user_id,
publish_item=False,
modulestore=self.store,
)
library = self.store.get_library(lib_key)
self.assertEqual(len(library.children), 1)
self.store.delete_item(block.location, self.user_id)
library = self.store.get_library(lib_key)
self.assertEqual(len(library.children), 0)
def test_get_library_non_existent(self):
""" Test get_library() with non-existent key """
result = self.store.get_library(LibraryLocator("non", "existent"))
self.assertEqual(result, None)
def test_get_libraries(self):
""" Test get_libraries() """
libraries = [LibraryFactory.create(modulestore=self.store) for _ in range(3)]
lib_dict = dict([(lib.location.library_key, lib) for lib in libraries])
lib_list = self.store.get_libraries()
self.assertEqual(len(lib_list), len(libraries))
for lib in lib_list:
self.assertIn(lib.location.library_key, lib_dict)
def test_strip(self):
"""
Test that library keys coming out of MixedModuleStore are stripped of
branch and version info by default.
"""
# Create a library
lib_key = LibraryFactory.create(modulestore=self.store).location.library_key
# Re-load the library from the modulestore, explicitly including version information:
lib = self.store.get_library(lib_key)
self.assertEqual(lib.location.version_guid, None)
self.assertEqual(lib.location.branch, None)
self.assertEqual(lib.location.library_key.version_guid, None)
self.assertEqual(lib.location.library_key.branch, None)
def test_get_lib_version(self):
"""
Test that we can get version data about a library from get_library()
"""
# Create a library
lib_key = LibraryFactory.create(modulestore=self.store).location.library_key
# Re-load the library from the modulestore, explicitly including version information:
lib = self.store.get_library(lib_key, remove_version=False, remove_branch=False)
version = lib.location.library_key.version_guid
self.assertIsInstance(version, ObjectId)
def test_xblock_in_lib_have_published_version_returns_false(self):
library = LibraryFactory.create(modulestore=self.store)
block = ItemFactory.create(
category="html",
parent_location=library.location,
user_id=self.user_id,
publish_item=False,
modulestore=self.store,
)
self.assertFalse(self.store.has_published_version(block))
| agpl-3.0 |
huggingface/transformers | examples/tensorflow/benchmarking/run_benchmark_tf.py | 2 | 1915 | #!/usr/bin/env python
# coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Benchmarking the library on inference and training in TensorFlow"""
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def main():
parser = HfArgumentParser(TensorFlowBenchmarkArguments)
benchmark_args = parser.parse_args_into_dataclasses()[0]
benchmark = TensorFlowBenchmark(args=benchmark_args)
try:
benchmark_args = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
arg_error_msg = "Arg --no_{0} is no longer used, please use --no-{0} instead."
begin_error_msg = " ".join(str(e).split(" ")[:-1])
full_error_msg = ""
depreciated_args = eval(str(e).split(" ")[-1])
wrong_args = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:])
else:
wrong_args.append(arg)
if len(wrong_args) > 0:
full_error_msg = full_error_msg + begin_error_msg + str(wrong_args)
raise ValueError(full_error_msg)
benchmark.run()
if __name__ == "__main__":
main()
| apache-2.0 |
vineodd/PIMSim | GEM5Simulation/gem5/ext/pybind11/tests/test_methods_and_attributes.py | 13 | 17017 | import pytest
from pybind11_tests import methods_and_attributes as m
from pybind11_tests import ConstructorStats
def test_methods_and_attributes():
instance1 = m.ExampleMandA()
instance2 = m.ExampleMandA(32)
instance1.add1(instance2)
instance1.add2(instance2)
instance1.add3(instance2)
instance1.add4(instance2)
instance1.add5(instance2)
instance1.add6(32)
instance1.add7(32)
instance1.add8(32)
instance1.add9(32)
instance1.add10(32)
assert str(instance1) == "ExampleMandA[value=320]"
assert str(instance2) == "ExampleMandA[value=32]"
assert str(instance1.self1()) == "ExampleMandA[value=320]"
assert str(instance1.self2()) == "ExampleMandA[value=320]"
assert str(instance1.self3()) == "ExampleMandA[value=320]"
assert str(instance1.self4()) == "ExampleMandA[value=320]"
assert str(instance1.self5()) == "ExampleMandA[value=320]"
assert instance1.internal1() == 320
assert instance1.internal2() == 320
assert instance1.internal3() == 320
assert instance1.internal4() == 320
assert instance1.internal5() == 320
assert instance1.overloaded() == "()"
assert instance1.overloaded(0) == "(int)"
assert instance1.overloaded(1, 1.0) == "(int, float)"
assert instance1.overloaded(2.0, 2) == "(float, int)"
assert instance1.overloaded(3, 3) == "(int, int)"
assert instance1.overloaded(4., 4.) == "(float, float)"
assert instance1.overloaded_const(-3) == "(int) const"
assert instance1.overloaded_const(5, 5.0) == "(int, float) const"
assert instance1.overloaded_const(6.0, 6) == "(float, int) const"
assert instance1.overloaded_const(7, 7) == "(int, int) const"
assert instance1.overloaded_const(8., 8.) == "(float, float) const"
assert instance1.overloaded_float(1, 1) == "(float, float)"
assert instance1.overloaded_float(1, 1.) == "(float, float)"
assert instance1.overloaded_float(1., 1) == "(float, float)"
assert instance1.overloaded_float(1., 1.) == "(float, float)"
assert instance1.value == 320
instance1.value = 100
assert str(instance1) == "ExampleMandA[value=100]"
cstats = ConstructorStats.get(m.ExampleMandA)
assert cstats.alive() == 2
del instance1, instance2
assert cstats.alive() == 0
assert cstats.values() == ["32"]
assert cstats.default_constructions == 1
assert cstats.copy_constructions == 3
assert cstats.move_constructions >= 1
assert cstats.copy_assignments == 0
assert cstats.move_assignments == 0
def test_copy_method():
"""Issue #443: calling copied methods fails in Python 3"""
m.ExampleMandA.add2c = m.ExampleMandA.add2
m.ExampleMandA.add2d = m.ExampleMandA.add2b
a = m.ExampleMandA(123)
assert a.value == 123
a.add2(m.ExampleMandA(-100))
assert a.value == 23
a.add2b(m.ExampleMandA(20))
assert a.value == 43
a.add2c(m.ExampleMandA(6))
assert a.value == 49
a.add2d(m.ExampleMandA(-7))
assert a.value == 42
def test_properties():
instance = m.TestProperties()
assert instance.def_readonly == 1
with pytest.raises(AttributeError):
instance.def_readonly = 2
instance.def_readwrite = 2
assert instance.def_readwrite == 2
assert instance.def_property_readonly == 2
with pytest.raises(AttributeError):
instance.def_property_readonly = 3
instance.def_property = 3
assert instance.def_property == 3
def test_static_properties():
assert m.TestProperties.def_readonly_static == 1
with pytest.raises(AttributeError) as excinfo:
m.TestProperties.def_readonly_static = 2
assert "can't set attribute" in str(excinfo)
m.TestProperties.def_readwrite_static = 2
assert m.TestProperties.def_readwrite_static == 2
assert m.TestProperties.def_property_readonly_static == 2
with pytest.raises(AttributeError) as excinfo:
m.TestProperties.def_property_readonly_static = 3
assert "can't set attribute" in str(excinfo)
m.TestProperties.def_property_static = 3
assert m.TestProperties.def_property_static == 3
# Static property read and write via instance
instance = m.TestProperties()
m.TestProperties.def_readwrite_static = 0
assert m.TestProperties.def_readwrite_static == 0
assert instance.def_readwrite_static == 0
instance.def_readwrite_static = 2
assert m.TestProperties.def_readwrite_static == 2
assert instance.def_readwrite_static == 2
# It should be possible to override properties in derived classes
assert m.TestPropertiesOverride().def_readonly == 99
assert m.TestPropertiesOverride.def_readonly_static == 99
def test_static_cls():
"""Static property getter and setters expect the type object as the their only argument"""
instance = m.TestProperties()
assert m.TestProperties.static_cls is m.TestProperties
assert instance.static_cls is m.TestProperties
def check_self(self):
assert self is m.TestProperties
m.TestProperties.static_cls = check_self
instance.static_cls = check_self
def test_metaclass_override():
"""Overriding pybind11's default metaclass changes the behavior of `static_property`"""
assert type(m.ExampleMandA).__name__ == "pybind11_type"
assert type(m.MetaclassOverride).__name__ == "type"
assert m.MetaclassOverride.readonly == 1
assert type(m.MetaclassOverride.__dict__["readonly"]).__name__ == "pybind11_static_property"
# Regular `type` replaces the property instead of calling `__set__()`
m.MetaclassOverride.readonly = 2
assert m.MetaclassOverride.readonly == 2
assert isinstance(m.MetaclassOverride.__dict__["readonly"], int)
def test_no_mixed_overloads():
from pybind11_tests import debug_enabled
with pytest.raises(RuntimeError) as excinfo:
m.ExampleMandA.add_mixed_overloads1()
assert (str(excinfo.value) ==
"overloading a method with both static and instance methods is not supported; " +
("compile in debug mode for more details" if not debug_enabled else
"error while attempting to bind static method ExampleMandA.overload_mixed1"
"(arg0: float) -> str")
)
with pytest.raises(RuntimeError) as excinfo:
m.ExampleMandA.add_mixed_overloads2()
assert (str(excinfo.value) ==
"overloading a method with both static and instance methods is not supported; " +
("compile in debug mode for more details" if not debug_enabled else
"error while attempting to bind instance method ExampleMandA.overload_mixed2"
"(self: pybind11_tests.methods_and_attributes.ExampleMandA, arg0: int, arg1: int)"
" -> str")
)
@pytest.mark.parametrize("access", ["ro", "rw", "static_ro", "static_rw"])
def test_property_return_value_policies(access):
if not access.startswith("static"):
obj = m.TestPropRVP()
else:
obj = m.TestPropRVP
ref = getattr(obj, access + "_ref")
assert ref.value == 1
ref.value = 2
assert getattr(obj, access + "_ref").value == 2
ref.value = 1 # restore original value for static properties
copy = getattr(obj, access + "_copy")
assert copy.value == 1
copy.value = 2
assert getattr(obj, access + "_copy").value == 1
copy = getattr(obj, access + "_func")
assert copy.value == 1
copy.value = 2
assert getattr(obj, access + "_func").value == 1
def test_property_rvalue_policy():
"""When returning an rvalue, the return value policy is automatically changed from
`reference(_internal)` to `move`. The following would not work otherwise."""
instance = m.TestPropRVP()
o = instance.rvalue
assert o.value == 1
os = m.TestPropRVP.static_rvalue
assert os.value == 1
# https://bitbucket.org/pypy/pypy/issues/2447
@pytest.unsupported_on_pypy
def test_dynamic_attributes():
instance = m.DynamicClass()
assert not hasattr(instance, "foo")
assert "foo" not in dir(instance)
# Dynamically add attribute
instance.foo = 42
assert hasattr(instance, "foo")
assert instance.foo == 42
assert "foo" in dir(instance)
# __dict__ should be accessible and replaceable
assert "foo" in instance.__dict__
instance.__dict__ = {"bar": True}
assert not hasattr(instance, "foo")
assert hasattr(instance, "bar")
with pytest.raises(TypeError) as excinfo:
instance.__dict__ = []
assert str(excinfo.value) == "__dict__ must be set to a dictionary, not a 'list'"
cstats = ConstructorStats.get(m.DynamicClass)
assert cstats.alive() == 1
del instance
assert cstats.alive() == 0
# Derived classes should work as well
class PythonDerivedDynamicClass(m.DynamicClass):
pass
for cls in m.CppDerivedDynamicClass, PythonDerivedDynamicClass:
derived = cls()
derived.foobar = 100
assert derived.foobar == 100
assert cstats.alive() == 1
del derived
assert cstats.alive() == 0
# https://bitbucket.org/pypy/pypy/issues/2447
@pytest.unsupported_on_pypy
def test_cyclic_gc():
# One object references itself
instance = m.DynamicClass()
instance.circular_reference = instance
cstats = ConstructorStats.get(m.DynamicClass)
assert cstats.alive() == 1
del instance
assert cstats.alive() == 0
# Two object reference each other
i1 = m.DynamicClass()
i2 = m.DynamicClass()
i1.cycle = i2
i2.cycle = i1
assert cstats.alive() == 2
del i1, i2
assert cstats.alive() == 0
def test_noconvert_args(msg):
a = m.ArgInspector()
assert msg(a.f("hi")) == """
loading ArgInspector1 argument WITH conversion allowed. Argument value = hi
"""
assert msg(a.g("this is a", "this is b")) == """
loading ArgInspector1 argument WITHOUT conversion allowed. Argument value = this is a
loading ArgInspector1 argument WITH conversion allowed. Argument value = this is b
13
loading ArgInspector2 argument WITH conversion allowed. Argument value = (default arg inspector 2)
""" # noqa: E501 line too long
assert msg(a.g("this is a", "this is b", 42)) == """
loading ArgInspector1 argument WITHOUT conversion allowed. Argument value = this is a
loading ArgInspector1 argument WITH conversion allowed. Argument value = this is b
42
loading ArgInspector2 argument WITH conversion allowed. Argument value = (default arg inspector 2)
""" # noqa: E501 line too long
assert msg(a.g("this is a", "this is b", 42, "this is d")) == """
loading ArgInspector1 argument WITHOUT conversion allowed. Argument value = this is a
loading ArgInspector1 argument WITH conversion allowed. Argument value = this is b
42
loading ArgInspector2 argument WITH conversion allowed. Argument value = this is d
"""
assert (a.h("arg 1") ==
"loading ArgInspector2 argument WITHOUT conversion allowed. Argument value = arg 1")
assert msg(m.arg_inspect_func("A1", "A2")) == """
loading ArgInspector2 argument WITH conversion allowed. Argument value = A1
loading ArgInspector1 argument WITHOUT conversion allowed. Argument value = A2
"""
assert m.floats_preferred(4) == 2.0
assert m.floats_only(4.0) == 2.0
with pytest.raises(TypeError) as excinfo:
m.floats_only(4)
assert msg(excinfo.value) == """
floats_only(): incompatible function arguments. The following argument types are supported:
1. (f: float) -> float
Invoked with: 4
"""
assert m.ints_preferred(4) == 2
assert m.ints_preferred(True) == 0
with pytest.raises(TypeError) as excinfo:
m.ints_preferred(4.0)
assert msg(excinfo.value) == """
ints_preferred(): incompatible function arguments. The following argument types are supported:
1. (i: int) -> int
Invoked with: 4.0
""" # noqa: E501 line too long
assert m.ints_only(4) == 2
with pytest.raises(TypeError) as excinfo:
m.ints_only(4.0)
assert msg(excinfo.value) == """
ints_only(): incompatible function arguments. The following argument types are supported:
1. (i: int) -> int
Invoked with: 4.0
"""
def test_bad_arg_default(msg):
from pybind11_tests import debug_enabled
with pytest.raises(RuntimeError) as excinfo:
m.bad_arg_def_named()
assert msg(excinfo.value) == (
"arg(): could not convert default argument 'a: UnregisteredType' in function "
"'should_fail' into a Python object (type not registered yet?)"
if debug_enabled else
"arg(): could not convert default argument into a Python object (type not registered "
"yet?). Compile in debug mode for more information."
)
with pytest.raises(RuntimeError) as excinfo:
m.bad_arg_def_unnamed()
assert msg(excinfo.value) == (
"arg(): could not convert default argument 'UnregisteredType' in function "
"'should_fail' into a Python object (type not registered yet?)"
if debug_enabled else
"arg(): could not convert default argument into a Python object (type not registered "
"yet?). Compile in debug mode for more information."
)
def test_accepts_none(msg):
a = m.NoneTester()
assert m.no_none1(a) == 42
assert m.no_none2(a) == 42
assert m.no_none3(a) == 42
assert m.no_none4(a) == 42
assert m.no_none5(a) == 42
assert m.ok_none1(a) == 42
assert m.ok_none2(a) == 42
assert m.ok_none3(a) == 42
assert m.ok_none4(a) == 42
assert m.ok_none5(a) == 42
with pytest.raises(TypeError) as excinfo:
m.no_none1(None)
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.no_none2(None)
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.no_none3(None)
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.no_none4(None)
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.no_none5(None)
assert "incompatible function arguments" in str(excinfo.value)
# The first one still raises because you can't pass None as a lvalue reference arg:
with pytest.raises(TypeError) as excinfo:
assert m.ok_none1(None) == -1
assert msg(excinfo.value) == """
ok_none1(): incompatible function arguments. The following argument types are supported:
1. (arg0: m.methods_and_attributes.NoneTester) -> int
Invoked with: None
"""
# The rest take the argument as pointer or holder, and accept None:
assert m.ok_none2(None) == -1
assert m.ok_none3(None) == -1
assert m.ok_none4(None) == -1
assert m.ok_none5(None) == -1
def test_str_issue(msg):
"""#283: __str__ called on uninitialized instance when constructor arguments invalid"""
assert str(m.StrIssue(3)) == "StrIssue[3]"
with pytest.raises(TypeError) as excinfo:
str(m.StrIssue("no", "such", "constructor"))
assert msg(excinfo.value) == """
__init__(): incompatible constructor arguments. The following argument types are supported:
1. m.methods_and_attributes.StrIssue(arg0: int)
2. m.methods_and_attributes.StrIssue()
Invoked with: 'no', 'such', 'constructor'
"""
def test_unregistered_base_implementations():
a = m.RegisteredDerived()
a.do_nothing()
assert a.rw_value == 42
assert a.ro_value == 1.25
a.rw_value += 5
assert a.sum() == 48.25
a.increase_value()
assert a.rw_value == 48
assert a.ro_value == 1.5
assert a.sum() == 49.5
assert a.rw_value_prop == 48
a.rw_value_prop += 1
assert a.rw_value_prop == 49
a.increase_value()
assert a.ro_value_prop == 1.75
def test_custom_caster_destruction():
"""Tests that returning a pointer to a type that gets converted with a custom type caster gets
destroyed when the function has py::return_value_policy::take_ownership policy applied."""
cstats = m.destruction_tester_cstats()
# This one *doesn't* have take_ownership: the pointer should be used but not destroyed:
z = m.custom_caster_no_destroy()
assert cstats.alive() == 1 and cstats.default_constructions == 1
assert z
# take_ownership applied: this constructs a new object, casts it, then destroys it:
z = m.custom_caster_destroy()
assert z
assert cstats.default_constructions == 2
# Same, but with a const pointer return (which should *not* inhibit destruction):
z = m.custom_caster_destroy_const()
assert z
assert cstats.default_constructions == 3
# Make sure we still only have the original object (from ..._no_destroy()) alive:
assert cstats.alive() == 1
| gpl-3.0 |
junhuac/MQUIC | depot_tools/ENV/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/util/url.py | 375 | 5760 | from collections import namedtuple
from ..exceptions import LocationParseError
url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment']
class Url(namedtuple('Url', url_attrs)):
"""
Datastructure for representing an HTTP URL. Used as a return value for
:func:`parse_url`.
"""
slots = ()
def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None,
query=None, fragment=None):
return super(Url, cls).__new__(cls, scheme, auth, host, port, path,
query, fragment)
@property
def hostname(self):
"""For backwards-compatibility with urlparse. We're nice like that."""
return self.host
@property
def request_uri(self):
"""Absolute path including the query string."""
uri = self.path or '/'
if self.query is not None:
uri += '?' + self.query
return uri
@property
def netloc(self):
"""Network location including host and port"""
if self.port:
return '%s:%d' % (self.host, self.port)
return self.host
@property
def url(self):
"""
Convert self into a url
This function should more or less round-trip with :func:`.parse_url`. The
returned url may not be exactly the same as the url inputted to
:func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
with a blank port will have : removed).
Example: ::
>>> U = parse_url('http://google.com/mail/')
>>> U.url
'http://google.com/mail/'
>>> Url('http', 'username:password', 'host.com', 80,
... '/path', 'query', 'fragment').url
'http://username:[email protected]:80/path?query#fragment'
"""
scheme, auth, host, port, path, query, fragment = self
url = ''
# We use "is not None" we want things to happen with empty strings (or 0 port)
if scheme is not None:
url += scheme + '://'
if auth is not None:
url += auth + '@'
if host is not None:
url += host
if port is not None:
url += ':' + str(port)
if path is not None:
url += path
if query is not None:
url += '?' + query
if fragment is not None:
url += '#' + fragment
return url
def __str__(self):
return self.url
def split_first(s, delims):
"""
Given a string and an iterable of delimiters, split on the first found
delimiter. Return two split parts and the matched delimiter.
If not found, then the first part is the full input string.
Example::
>>> split_first('foo/bar?baz', '?/=')
('foo', 'bar?baz', '/')
>>> split_first('foo/bar?baz', '123')
('foo/bar?baz', '', None)
Scales linearly with number of delims. Not ideal for large number of delims.
"""
min_idx = None
min_delim = None
for d in delims:
idx = s.find(d)
if idx < 0:
continue
if min_idx is None or idx < min_idx:
min_idx = idx
min_delim = d
if min_idx is None or min_idx < 0:
return s, '', None
return s[:min_idx], s[min_idx+1:], min_delim
def parse_url(url):
"""
Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
performed to parse incomplete urls. Fields not provided will be None.
Partly backwards-compatible with :mod:`urlparse`.
Example::
>>> parse_url('http://google.com/mail/')
Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
>>> parse_url('google.com:80')
Url(scheme=None, host='google.com', port=80, path=None, ...)
>>> parse_url('/foo?bar')
Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
"""
# While this code has overlap with stdlib's urlparse, it is much
# simplified for our needs and less annoying.
# Additionally, this implementations does silly things to be optimal
# on CPython.
if not url:
# Empty
return Url()
scheme = None
auth = None
host = None
port = None
path = None
fragment = None
query = None
# Scheme
if '://' in url:
scheme, url = url.split('://', 1)
# Find the earliest Authority Terminator
# (http://tools.ietf.org/html/rfc3986#section-3.2)
url, path_, delim = split_first(url, ['/', '?', '#'])
if delim:
# Reassemble the path
path = delim + path_
# Auth
if '@' in url:
# Last '@' denotes end of auth part
auth, url = url.rsplit('@', 1)
# IPv6
if url and url[0] == '[':
host, url = url.split(']', 1)
host += ']'
# Port
if ':' in url:
_host, port = url.split(':', 1)
if not host:
host = _host
if port:
# If given, ports must be integers.
if not port.isdigit():
raise LocationParseError(url)
port = int(port)
else:
# Blank ports are cool, too. (rfc3986#section-3.2.3)
port = None
elif not host and url:
host = url
if not path:
return Url(scheme, auth, host, port, path, query, fragment)
# Fragment
if '#' in path:
path, fragment = path.split('#', 1)
# Query
if '?' in path:
path, query = path.split('?', 1)
return Url(scheme, auth, host, port, path, query, fragment)
def get_host(url):
"""
Deprecated. Use :func:`.parse_url` instead.
"""
p = parse_url(url)
return p.scheme or 'http', p.hostname, p.port
| mit |
TaskEvolution/Task-Coach-Evolution | taskcoach/taskcoachlib/thirdparty/timeline/timeline.py | 1 | 16578 | #! /usr/bin/env python
import wx, wx.lib
TimeLineSelectionEvent, EVT_TIMELINE_SELECTED = wx.lib.newevent.NewEvent()
TimeLineActivationEvent, EVT_TIMELINE_ACTIVATED = wx.lib.newevent.NewEvent()
class HotMap(object):
''' Keep track of which node is where. '''
def __init__(self, parent=None):
self.parent = parent
self.nodes = []
self.rects = {}
self.children = {}
super(HotMap, self).__init__()
def append(self, node, rect):
self.nodes.append(node)
self.rects[node] = rect
self.children[node] = HotMap(node)
def __getitem__(self, node):
return self.children[node]
def findNodeAtPosition(self, position, parent=None):
''' Retrieve the node at the given position. '''
for node, rect in self.rects.items():
if rect.Contains(position):
return self[node].findNodeAtPosition(position, node)
return parent
def firstNode(self):
return self.nodes[0] if self.nodes else None
def lastNode(self, parent=None):
if self.nodes:
last = self.nodes[-1]
return self[last].lastNode(last)
else:
return parent
def findNode(self, target):
if target in self.nodes:
return self
for node in self.nodes:
result = self[node].findNode(target)
if result:
return result
return None
def nextChild(self, target):
index = self.nodes.index(target)
index = min(index+1, len(self.nodes)-1)
return self.nodes[index]
def previousChild(self, target):
index = self.nodes.index(target)
index = max(index-1, 0)
return self.nodes[index]
def firstChild(self, target):
children = self[target].nodes
if children:
return children[0]
else:
return target
class TimeLine(wx.Panel):
def __init__(self, *args, **kwargs):
self.model = kwargs.pop('model', [])
self.padding = kwargs.pop('padding', 3)
self.adapter = kwargs.pop('adapter', DefaultAdapter())
self.selectedNode = None
self.backgroundColour = wx.WHITE
self._buffer = wx.EmptyBitmap(20, 20) # Have a default buffer ready
self.DEFAULT_PEN = wx.Pen(wx.BLACK, 1, wx.SOLID)
self.SELECTED_PEN = wx.Pen(wx.WHITE, 2, wx.SOLID)
kwargs['style'] = wx.TAB_TRAVERSAL|wx.NO_BORDER|wx.FULL_REPAINT_ON_RESIZE|wx.WANTS_CHARS
super(TimeLine, self).__init__(*args, **kwargs)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_SIZE, self.OnSize )
self.Bind(wx.EVT_LEFT_UP, self.OnClickRelease)
self.Bind(wx.EVT_LEFT_DCLICK, self.OnDoubleClick)
self.Bind(wx.EVT_KEY_UP, self.OnKeyUp)
self.OnSize(None)
def SetBackgroundColour(self, colour):
self.backgroundColour = colour
def Refresh(self):
self.UpdateDrawing()
def OnPaint(self, event):
dc = wx.BufferedPaintDC(self, self._buffer)
def OnSize(self, event):
# The buffer is initialized in here, so that the buffer is always
# the same size as the Window.
width, height = self.GetClientSizeTuple()
if width <= 0 or height <= 0:
return
# Make new off-screen bitmap: this bitmap will always have the
# current drawing in it, so it can be used to save the image to
# a file, or whatever.
self._buffer = wx.EmptyBitmap(width, height)
self.UpdateDrawing()
def OnClickRelease(self, event):
event.Skip()
self.SetFocus()
point = event.GetPosition()
node = self.hot_map.findNodeAtPosition(point)
self.SetSelected(node, point)
def OnDoubleClick(self, event):
point = event.GetPosition()
node = self.hot_map.findNodeAtPosition(point)
if node:
wx.PostEvent(self, TimeLineActivationEvent(node=node, point=point))
def OnKeyUp(self, event):
event.Skip()
if not self.hot_map:
return
if event.KeyCode == wx.WXK_HOME:
self.SetSelected(self.hot_map.firstNode())
return
elif event.KeyCode == wx.WXK_END:
self.SetSelected(self.hot_map.lastNode())
return
if not self.selectedNode:
return
if event.KeyCode == wx.WXK_RETURN:
wx.PostEvent(self, TimeLineActivationEvent(node=self.selectedNode))
return
hot_map = self.hot_map.findNode(self.selectedNode)
if hot_map is None:
newSelection = self.hot_map.firstNode()
elif event.KeyCode == wx.WXK_DOWN:
newSelection = hot_map.nextChild(self.selectedNode)
elif event.KeyCode == wx.WXK_UP:
newSelection = hot_map.previousChild(self.selectedNode)
elif event.KeyCode == wx.WXK_RIGHT:
newSelection = hot_map.firstChild(self.selectedNode)
elif event.KeyCode == wx.WXK_LEFT and hot_map.parent:
newSelection = hot_map.parent
else:
newSelection = self.selectedNode
self.SetSelected(newSelection)
def GetSelected(self):
return self.selectedNode
def SetSelected(self, node, point=None):
''' Set the given node selected in the timeline widget '''
if node == self.selectedNode:
return
self.selectedNode = node
self.Refresh()
if node:
wx.PostEvent(self, TimeLineSelectionEvent(node=node, point=point))
def UpdateDrawing(self):
dc = wx.BufferedDC(wx.ClientDC(self), self._buffer)
self.Draw(dc)
def Draw(self, dc):
''' Draw the timeline on the device context. '''
self.hot_map = HotMap()
dc.BeginDrawing()
brush = wx.Brush(self.backgroundColour)
dc.SetBackground(brush)
dc.Clear()
dc.SetFont(self.FontForLabels(dc))
if self.model:
bounds = self.adapter.bounds(self.model)
self.min_start = float(min(bounds))
self.max_stop = float(max(bounds))
if self.max_stop - self.min_start < 100:
self.max_stop += 100
self.length = self.max_stop - self.min_start
self.width, self.height = dc.GetSize()
labelHeight = dc.GetTextExtent('ABC')[1] + 2 # Leave room for time labels
self.DrawParallelChildren(dc, self.model, labelHeight, self.height-labelHeight, self.hot_map)
self.DrawNow(dc)
dc.EndDrawing()
def FontForLabels(self, dc):
''' Return the default GUI font, scaled for printing if necessary. '''
font = wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT)
scale = dc.GetPPI()[0] / wx.ScreenDC().GetPPI()[0]
font.SetPointSize(scale*font.GetPointSize())
return font
def DrawBox(self, dc, node, y, h, hot_map, isSequentialNode=False, depth=0):
if h < self.padding:
return
start, stop = self.adapter.start(node), self.adapter.stop(node)
if start is None:
start = self.min_start - 10
if stop is None:
stop = self.max_stop + 10
start, stop = min(start, stop), max(start, stop) # Sanitize input
x = self.scaleX(start) + 2*depth
w = self.scaleWidth(stop - start) - 4*depth
hot_map.append(node, (wx.Rect(int(x), int(y), int(w), int(h))))
self.DrawRectangle(dc, node, x, y, w, h, isSequentialNode, depth)
if not isSequentialNode:
self.DrawIconAndLabel(dc, node, x, y, w, h, depth)
seqHeight = min(dc.GetTextExtent('ABC')[1] + 2, h)
self.DrawSequentialChildren(dc, node, y+2, seqHeight-4, hot_map[node], depth+1)
self.DrawParallelChildren(dc, node, y+seqHeight, h-seqHeight, hot_map[node], depth+1)
def DrawRectangle(self, dc, node, x, y, w, h, isSequentialNode, depth):
dc = wx.GCDC(dc) if isSequentialNode else dc
dc.SetClippingRegion(x, y, w, h)
dc.SetBrush(self.brushForNode(node, isSequentialNode, depth))
dc.SetPen(self.penForNode(node, isSequentialNode, depth))
rounding = 0 if isSequentialNode and (h < self.padding * 4 or w < self.padding * 4) else self.padding * 2
dc.DrawRoundedRectangle(x, y, w, h, rounding)
dc.DestroyClippingRegion()
def DrawIconAndLabel(self, dc, node, x, y, w, h, depth):
''' Draw the icon, if any, and the label, if any, of the node. '''
# Make sure the Icon and Label are visible:
if x < 0:
w -= abs(x)
x = 0
dc.SetClippingRegion(x+1, y+1, w-2, h-2) # Don't draw outside the box
icon = self.adapter.icon(node, node==self.selectedNode)
if icon and h >= icon.GetHeight() and w >= icon.GetWidth():
iconWidth = icon.GetWidth() + 2
dc.DrawIcon(icon, x+2, y+2)
else:
iconWidth = 0
if h >= dc.GetTextExtent('ABC')[1]:
dc.SetFont(self.fontForNode(dc, node, depth))
dc.SetTextForeground(self.textForegroundForNode(node, depth))
dc.DrawText(self.adapter.label(node), x + iconWidth + 2, y+2)
dc.DestroyClippingRegion()
def DrawParallelChildren(self, dc, parent, y, h, hot_map, depth=0):
children = self.adapter.parallel_children(parent)
if not children:
return
childY = y
h -= len(children) # vertical space between children
recursiveChildrenList = [self.adapter.parallel_children(child, recursive=True) \
for child in children]
recursiveChildrenCounts = [len(recursiveChildren) for recursiveChildren in recursiveChildrenList]
recursiveChildHeight = h / float(len(children) + sum(recursiveChildrenCounts))
for child, numberOfRecursiveChildren in zip(children, recursiveChildrenCounts):
childHeight = recursiveChildHeight * (numberOfRecursiveChildren + 1)
if childHeight >= self.padding:
self.DrawBox(dc, child, childY, childHeight, hot_map, depth=depth)
childY += childHeight + 1
def DrawSequentialChildren(self, dc, parent, y, h, hot_map, depth=0):
for child in self.adapter.sequential_children(parent):
self.DrawBox(dc, child, y, h, hot_map, isSequentialNode=True, depth=depth)
def DrawNow(self, dc):
alpha_dc = wx.GCDC(dc)
alpha_dc.SetPen(wx.Pen(wx.Color(128, 200, 128, 128), width=3))
now = self.scaleX(self.adapter.now())
alpha_dc.DrawLine(now, 0, now, self.height)
label = self.adapter.nowlabel()
textWidth = alpha_dc.GetTextExtent(label)[0]
alpha_dc.DrawText(label, now - (textWidth / 2), 0)
def scaleX(self, x):
return self.scaleWidth(x - self.min_start)
def scaleWidth(self, width):
return (width / self.length) * self.width
def textForegroundForNode(self, node, depth=0):
''' Determine the text foreground color to use to display the label of
the given node '''
if node == self.selectedNode:
fg_color = wx.SystemSettings_GetColour(wx.SYS_COLOUR_HIGHLIGHTTEXT)
else:
fg_color = self.adapter.foreground_color(node, depth)
if not fg_color:
fg_color = wx.SystemSettings_GetColour(wx.SYS_COLOUR_WINDOWTEXT)
return fg_color
def fontForNode(self, dc, node, depth=0):
''' Determine the font to use to display the label of the given node,
scaled for printing if necessary. '''
font = self.adapter.font(node, depth)
font = font if font else wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT)
scale = dc.GetPPI()[0] / wx.ScreenDC().GetPPI()[0]
font.SetPointSize(scale*font.GetPointSize())
return font
def brushForNode(self, node, isSequentialNode=False, depth=0):
''' Create brush to use to display the given node '''
if node == self.selectedNode:
color = wx.SystemSettings_GetColour(wx.SYS_COLOUR_HIGHLIGHT)
else:
color = self.adapter.background_color(node)
if color:
# The adapter returns a 3-tuple
color = wx.Color(*color)
else:
red = (depth * 10)%255
green = 255-((depth * 10)%255)
blue = 200
color = wx.Color(red, green, blue)
if isSequentialNode:
color.Set(color.Red(), color.Green(), color.Blue(), 128)
return wx.Brush(color)
def penForNode(self, node, isSequentialNode=False, depth=0):
''' Determine the pen to use to display the given node '''
pen = self.SELECTED_PEN if node == self.selectedNode else self.DEFAULT_PEN
#style = wx.DOT if isSequentialNode else wx.SOLID
#pen.SetStyle(style)
return pen
class DefaultAdapter(object):
def parallel_children(self, node, recursive=False):
children = node.parallel_children[:]
if recursive:
for child in node.parallel_children:
children.extend(self.parallel_children(child, True))
return children
def sequential_children(self, node):
return node.sequential_children
def children(self, node):
return self.parallel_children(node) + self.sequential_children(node)
def bounds(self, node):
times = [node.start, node.stop]
for child in self.children(node):
times.extend(self.bounds(child))
return min(times), max(times)
def start(self, node, recursive=False):
starts = [node.start]
if recursive:
starts.extend([self.start(child, True) \
for child in self.children(node)])
return float(min(starts))
def stop(self, node, recursive=False):
stops = [node.stop]
if recursive:
stops.extend([self.stop(child, True) \
for child in self.children(node)])
return float(max(stops))
def label(self, node):
return node.path
def background_color(self, node):
return None
def foreground_color(self, node, depth):
return None
def icon(self, node):
return None
def now(self):
return 0
def nowlabel(self):
return 'Now'
class TestApp(wx.App):
''' Basic application for holding the viewing Frame '''
def __init__(self, size):
self.size = size
super(TestApp, self).__init__(0)
def OnInit(self):
''' Initialise the application. '''
wx.InitAllImageHandlers()
self.frame = wx.Frame(None)
self.frame.CreateStatusBar()
model = self.get_model(self.size)
self.timeline = TimeLine(self.frame, model=model)
self.frame.Show(True)
return True
def get_model(self, size):
parallel_children, sequential_children = [], []
if size > 0:
parallel_children = [self.get_model(size-1) for i in range(size)]
sequential_children = [Node('Seq 1', 30+10*size, 40+10*size, [], []),
Node('Seq 2', 80-10*size, 90-10*size, [], [])]
return Node('Node %d'%size, 0+5*size, 100-5*size, parallel_children,
sequential_children)
class Node(object):
def __init__(self, path, start, stop, subnodes, events):
self.path = path
self.start = start
self.stop = stop
self.parallel_children = subnodes
self.sequential_children = events
def __repr__(self):
return '%s(%r, %r, %r, %r, %r)'%(self.__class__.__name__, self.path,
self.start, self.stop,
self.parallel_children,
self.sequential_children)
usage = 'timeline.py [size]'
def main():
"""Mainloop for the application"""
import sys
size = 3
if len(sys.argv) > 1:
if sys.argv[1] in ('-h', '--help'):
print usage
else:
try:
size = int(sys.argv[1])
except ValueError:
print usage
else:
app = TestApp(size)
app.MainLoop()
if __name__ == "__main__":
main()
| gpl-3.0 |
dbmi-pitt/DIKB-Micropublication | scripts/mp-scripts/Bio/EUtils/setup.py | 1 | 1941 |
import sys
from distutils.core import setup
try:
import EUtils
except ImportError:
import __init__ as EUtils
def _dict(**kwargs):
return kwargs
d = _dict(
name = "EUtils",
version = EUtils.__version__,
description = "Client interface to NCBI's EUtils/Entrez server",
author = "Andrew Dalke",
author_email = "[email protected]",
maintainer = "Dalke Scientific Software, LLC",
maintainer_email = "[email protected]",
url = "http://www.dalkescientific.com/EUtils/",
long_description = """\
EUtils is a client library for the Entrez databases at NCBI.
NCBI provides the EUtils web service so that software can query Entrez
directly, rather than going through the web interface and dealing with
the hassles of web scraping. For more information see
http://www.ncbi.nlm.nih.gov/entrez/query/static/eutils_help.html
This package provides two levels of interface. The lowest one makes a
programmatic interface to construct the query URL and make the
request. The higher level ones support history tracking and parsing
of query results. These greatly simplify working with the EUtils
server.
""",
package_dir = {"": ".."},
packages = ["EUtils", "EUtils.DTDs"],
classifiers = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: Freely Distributable",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Scientific/Engineering :: Bio-Informatics", # a '-'? !
"Topic :: Scientific/Engineering :: Medical Science Apps.",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Internet",
],
)
if sys.version_info < (2,2,4):
del d["classifiers"]
if __name__ == "__main__":
setup(**d)
| apache-2.0 |
igabriel85/dmon-adp | adpformater/adpformater.py | 1 | 1615 | import pandas as pd
class DataFormatter():
def __init__(self, dataloc):
self.dataloc = dataloc
def aggJsonToCsv(self):
return "CSV file"
def expTimestamp(self):
return "Expand metric timestamp"
def window(self):
return "Window metrics"
def pivot(self):
return "Pivot values"
def addID(self):
return "Add new ID as index"
def removeID(self):
return "Remove selected column as index"
def renameHeader(self):
return "Rename headers"
def normalize(self):
return "Normalize data"
def denormalize(self):
return "Denormalize data"
input_table = pd.read_csv("metrics.csv")
for index, row in input_table.iterrows():
input_table = input_table.append([row]*9)
input_table = input_table.sort_values(['row ID'])
input_table = input_table.reset_index(drop=True)
for index, rows in input_table.iterrows():
if int(index) > 59:
print "Index to big!"
time = rows[0].split(", ", 1) #In Knime row for timestamp is row(55) last one
timeHour = time[1].split(":", 2)
timeHourSeconds = timeHour[2].split(".", 1)
timeHourSecondsDecimal = timeHour[2].split(".", 1)
timeHourSecondsDecimal[0] = str(index)
if len(timeHourSecondsDecimal[0]) == 1:
timeHourSecondsDecimal[0] = '0%s' %timeHourSecondsDecimal[0]
decimal = '.'.join(timeHourSecondsDecimal)
timeHour[2] = decimal
timenew = ':'.join(timeHour)
time[1] = timenew
finalString = ', '.join(time)
input_table.set_value(index, 'row ID', finalString)
input_table.to_csv('out.csv')
| apache-2.0 |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/boto/dynamodb2/items.py | 32 | 14656 | from copy import deepcopy
class NEWVALUE(object):
# A marker for new data added.
pass
class Item(object):
"""
An object representing the item data within a DynamoDB table.
An item is largely schema-free, meaning it can contain any data. The only
limitation is that it must have data for the fields in the ``Table``'s
schema.
This object presents a dictionary-like interface for accessing/storing
data. It also tries to intelligently track how data has changed throughout
the life of the instance, to be as efficient as possible about updates.
Empty items, or items that have no data, are considered falsey.
"""
def __init__(self, table, data=None, loaded=False):
"""
Constructs an (unsaved) ``Item`` instance.
To persist the data in DynamoDB, you'll need to call the ``Item.save``
(or ``Item.partial_save``) on the instance.
Requires a ``table`` parameter, which should be a ``Table`` instance.
This is required, as DynamoDB's API is focus around all operations
being table-level. It's also for persisting schema around many objects.
Optionally accepts a ``data`` parameter, which should be a dictionary
of the fields & values of the item. Alternatively, an ``Item`` instance
may be provided from which to extract the data.
Optionally accepts a ``loaded`` parameter, which should be a boolean.
``True`` if it was preexisting data loaded from DynamoDB, ``False`` if
it's new data from the user. Default is ``False``.
Example::
>>> users = Table('users')
>>> user = Item(users, data={
... 'username': 'johndoe',
... 'first_name': 'John',
... 'date_joined': 1248o61592,
... })
# Change existing data.
>>> user['first_name'] = 'Johann'
# Add more data.
>>> user['last_name'] = 'Doe'
# Delete data.
>>> del user['date_joined']
# Iterate over all the data.
>>> for field, val in user.items():
... print "%s: %s" % (field, val)
username: johndoe
first_name: John
date_joined: 1248o61592
"""
self.table = table
self._loaded = loaded
self._orig_data = {}
self._data = data
self._dynamizer = table._dynamizer
if isinstance(self._data, Item):
self._data = self._data._data
if self._data is None:
self._data = {}
if self._loaded:
self._orig_data = deepcopy(self._data)
def __getitem__(self, key):
return self._data.get(key, None)
def __setitem__(self, key, value):
self._data[key] = value
def __delitem__(self, key):
if not key in self._data:
return
del self._data[key]
def keys(self):
return self._data.keys()
def values(self):
return self._data.values()
def items(self):
return self._data.items()
def get(self, key, default=None):
return self._data.get(key, default)
def __iter__(self):
for key in self._data:
yield self._data[key]
def __contains__(self, key):
return key in self._data
def __bool__(self):
return bool(self._data)
__nonzero__ = __bool__
def _determine_alterations(self):
"""
Checks the ``-orig_data`` against the ``_data`` to determine what
changes to the data are present.
Returns a dictionary containing the keys ``adds``, ``changes`` &
``deletes``, containing the updated data.
"""
alterations = {
'adds': {},
'changes': {},
'deletes': [],
}
orig_keys = set(self._orig_data.keys())
data_keys = set(self._data.keys())
# Run through keys we know are in both for changes.
for key in orig_keys.intersection(data_keys):
if self._data[key] != self._orig_data[key]:
if self._is_storable(self._data[key]):
alterations['changes'][key] = self._data[key]
else:
alterations['deletes'].append(key)
# Run through additions.
for key in data_keys.difference(orig_keys):
if self._is_storable(self._data[key]):
alterations['adds'][key] = self._data[key]
# Run through deletions.
for key in orig_keys.difference(data_keys):
alterations['deletes'].append(key)
return alterations
def needs_save(self, data=None):
"""
Returns whether or not the data has changed on the ``Item``.
Optionally accepts a ``data`` argument, which accepts the output from
``self._determine_alterations()`` if you've already called it. Typically
unnecessary to do. Default is ``None``.
Example:
>>> user.needs_save()
False
>>> user['first_name'] = 'Johann'
>>> user.needs_save()
True
"""
if data is None:
data = self._determine_alterations()
needs_save = False
for kind in ['adds', 'changes', 'deletes']:
if len(data[kind]):
needs_save = True
break
return needs_save
def mark_clean(self):
"""
Marks an ``Item`` instance as no longer needing to be saved.
Example:
>>> user.needs_save()
False
>>> user['first_name'] = 'Johann'
>>> user.needs_save()
True
>>> user.mark_clean()
>>> user.needs_save()
False
"""
self._orig_data = deepcopy(self._data)
def mark_dirty(self):
"""
DEPRECATED: Marks an ``Item`` instance as needing to be saved.
This method is no longer necessary, as the state tracking on ``Item``
has been improved to automatically detect proper state.
"""
return
def load(self, data):
"""
This is only useful when being handed raw data from DynamoDB directly.
If you have a Python datastructure already, use the ``__init__`` or
manually set the data instead.
Largely internal, unless you know what you're doing or are trying to
mix the low-level & high-level APIs.
"""
self._data = {}
for field_name, field_value in data.get('Item', {}).items():
self[field_name] = self._dynamizer.decode(field_value)
self._loaded = True
self._orig_data = deepcopy(self._data)
def get_keys(self):
"""
Returns a Python-style dict of the keys/values.
Largely internal.
"""
key_fields = self.table.get_key_fields()
key_data = {}
for key in key_fields:
key_data[key] = self[key]
return key_data
def get_raw_keys(self):
"""
Returns a DynamoDB-style dict of the keys/values.
Largely internal.
"""
raw_key_data = {}
for key, value in self.get_keys().items():
raw_key_data[key] = self._dynamizer.encode(value)
return raw_key_data
def build_expects(self, fields=None):
"""
Builds up a list of expecations to hand off to DynamoDB on save.
Largely internal.
"""
expects = {}
if fields is None:
fields = list(self._data.keys()) + list(self._orig_data.keys())
# Only uniques.
fields = set(fields)
for key in fields:
expects[key] = {
'Exists': True,
}
value = None
# Check for invalid keys.
if not key in self._orig_data and not key in self._data:
raise ValueError("Unknown key %s provided." % key)
# States:
# * New field (only in _data)
# * Unchanged field (in both _data & _orig_data, same data)
# * Modified field (in both _data & _orig_data, different data)
# * Deleted field (only in _orig_data)
orig_value = self._orig_data.get(key, NEWVALUE)
current_value = self._data.get(key, NEWVALUE)
if orig_value == current_value:
# Existing field unchanged.
value = current_value
else:
if key in self._data:
if not key in self._orig_data:
# New field.
expects[key]['Exists'] = False
else:
# Existing field modified.
value = orig_value
else:
# Existing field deleted.
value = orig_value
if value is not None:
expects[key]['Value'] = self._dynamizer.encode(value)
return expects
def _is_storable(self, value):
# We need to prevent ``None``, empty string & empty set from
# heading to DDB, but allow false-y values like 0 & False make it.
if not value:
if not value in (0, 0.0, False):
return False
return True
def prepare_full(self):
"""
Runs through all fields & encodes them to be handed off to DynamoDB
as part of an ``save`` (``put_item``) call.
Largely internal.
"""
# This doesn't save on its own. Rather, we prepare the datastructure
# and hand-off to the table to handle creation/update.
final_data = {}
for key, value in self._data.items():
if not self._is_storable(value):
continue
final_data[key] = self._dynamizer.encode(value)
return final_data
def prepare_partial(self):
"""
Runs through **ONLY** the changed/deleted fields & encodes them to be
handed off to DynamoDB as part of an ``partial_save`` (``update_item``)
call.
Largely internal.
"""
# This doesn't save on its own. Rather, we prepare the datastructure
# and hand-off to the table to handle creation/update.
final_data = {}
fields = set()
alterations = self._determine_alterations()
for key, value in alterations['adds'].items():
final_data[key] = {
'Action': 'PUT',
'Value': self._dynamizer.encode(self._data[key])
}
fields.add(key)
for key, value in alterations['changes'].items():
final_data[key] = {
'Action': 'PUT',
'Value': self._dynamizer.encode(self._data[key])
}
fields.add(key)
for key in alterations['deletes']:
final_data[key] = {
'Action': 'DELETE',
}
fields.add(key)
return final_data, fields
def partial_save(self):
"""
Saves only the changed data to DynamoDB.
Extremely useful for high-volume/high-write data sets, this allows
you to update only a handful of fields rather than having to push
entire items. This prevents many accidental overwrite situations as
well as saves on the amount of data to transfer over the wire.
Returns ``True`` on success, ``False`` if no save was performed or
the write failed.
Example::
>>> user['last_name'] = 'Doh!'
# Only the last name field will be sent to DynamoDB.
>>> user.partial_save()
"""
key = self.get_keys()
# Build a new dict of only the data we're changing.
final_data, fields = self.prepare_partial()
if not final_data:
return False
# Remove the key(s) from the ``final_data`` if present.
# They should only be present if this is a new item, in which
# case we shouldn't be sending as part of the data to update.
for fieldname, value in key.items():
if fieldname in final_data:
del final_data[fieldname]
try:
# It's likely also in ``fields``, so remove it there too.
fields.remove(fieldname)
except KeyError:
pass
# Build expectations of only the fields we're planning to update.
expects = self.build_expects(fields=fields)
returned = self.table._update_item(key, final_data, expects=expects)
# Mark the object as clean.
self.mark_clean()
return returned
def save(self, overwrite=False):
"""
Saves all data to DynamoDB.
By default, this attempts to ensure that none of the underlying
data has changed. If any fields have changed in between when the
``Item`` was constructed & when it is saved, this call will fail so
as not to cause any data loss.
If you're sure possibly overwriting data is acceptable, you can pass
an ``overwrite=True``. If that's not acceptable, you may be able to use
``Item.partial_save`` to only write the changed field data.
Optionally accepts an ``overwrite`` parameter, which should be a
boolean. If you provide ``True``, the item will be forcibly overwritten
within DynamoDB, even if another process changed the data in the
meantime. (Default: ``False``)
Returns ``True`` on success, ``False`` if no save was performed.
Example::
>>> user['last_name'] = 'Doh!'
# All data on the Item is sent to DynamoDB.
>>> user.save()
# If it fails, you can overwrite.
>>> user.save(overwrite=True)
"""
if not self.needs_save() and not overwrite:
return False
final_data = self.prepare_full()
expects = None
if overwrite is False:
# Build expectations about *all* of the data.
expects = self.build_expects()
returned = self.table._put_item(final_data, expects=expects)
# Mark the object as clean.
self.mark_clean()
return returned
def delete(self):
"""
Deletes the item's data to DynamoDB.
Returns ``True`` on success.
Example::
# Buh-bye now.
>>> user.delete()
"""
key_data = self.get_keys()
return self.table.delete_item(**key_data)
| mit |
IllusionRom-deprecated/android_platform_tools_idea | python/lib/Lib/site-packages/django/contrib/localflavor/nl/forms.py | 311 | 2796 | """
NL-specific Form helpers
"""
import re
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, Select
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_unicode
pc_re = re.compile('^\d{4}[A-Z]{2}$')
sofi_re = re.compile('^\d{9}$')
numeric_re = re.compile('^\d+$')
class NLZipCodeField(Field):
"""
A Dutch postal code field.
"""
default_error_messages = {
'invalid': _('Enter a valid postal code'),
}
def clean(self, value):
super(NLZipCodeField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value = value.strip().upper().replace(' ', '')
if not pc_re.search(value):
raise ValidationError(self.error_messages['invalid'])
if int(value[:4]) < 1000:
raise ValidationError(self.error_messages['invalid'])
return u'%s %s' % (value[:4], value[4:])
class NLProvinceSelect(Select):
"""
A Select widget that uses a list of provinces of the Netherlands as its
choices.
"""
def __init__(self, attrs=None):
from nl_provinces import PROVINCE_CHOICES
super(NLProvinceSelect, self).__init__(attrs, choices=PROVINCE_CHOICES)
class NLPhoneNumberField(Field):
"""
A Dutch telephone number field.
"""
default_error_messages = {
'invalid': _('Enter a valid phone number'),
}
def clean(self, value):
super(NLPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
phone_nr = re.sub('[\-\s\(\)]', '', smart_unicode(value))
if len(phone_nr) == 10 and numeric_re.search(phone_nr):
return value
if phone_nr[:3] == '+31' and len(phone_nr) == 12 and \
numeric_re.search(phone_nr[3:]):
return value
raise ValidationError(self.error_messages['invalid'])
class NLSoFiNumberField(Field):
"""
A Dutch social security number (SoFi/BSN) field.
http://nl.wikipedia.org/wiki/Sofinummer
"""
default_error_messages = {
'invalid': _('Enter a valid SoFi number'),
}
def clean(self, value):
super(NLSoFiNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
if not sofi_re.search(value):
raise ValidationError(self.error_messages['invalid'])
if int(value) == 0:
raise ValidationError(self.error_messages['invalid'])
checksum = 0
for i in range(9, 1, -1):
checksum += int(value[9-i]) * i
checksum -= int(value[-1])
if checksum % 11 != 0:
raise ValidationError(self.error_messages['invalid'])
return value
| apache-2.0 |
rezasafi/spark | examples/src/main/python/ml/dct_example.py | 123 | 1509 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# $example on$
from pyspark.ml.feature import DCT
from pyspark.ml.linalg import Vectors
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("DCTExample")\
.getOrCreate()
# $example on$
df = spark.createDataFrame([
(Vectors.dense([0.0, 1.0, -2.0, 3.0]),),
(Vectors.dense([-1.0, 2.0, 4.0, -7.0]),),
(Vectors.dense([14.0, -2.0, -5.0, 1.0]),)], ["features"])
dct = DCT(inverse=False, inputCol="features", outputCol="featuresDCT")
dctDf = dct.transform(df)
dctDf.select("featuresDCT").show(truncate=False)
# $example off$
spark.stop()
| apache-2.0 |
tdyas/pants | src/python/pants/backend/jvm/register.py | 1 | 12485 | # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
"""Support for both Java and Scala."""
from pants.backend.jvm.artifact import Artifact
from pants.backend.jvm.ossrh_publication_metadata import (
Developer,
License,
OSSRHPublicationMetadata,
Scm,
)
from pants.backend.jvm.repository import Repository as repo
from pants.backend.jvm.scala_artifact import ScalaArtifact
from pants.backend.jvm.subsystems.jar_dependency_management import JarDependencyManagementSetup
from pants.backend.jvm.subsystems.scala_platform import ScalaPlatform
from pants.backend.jvm.subsystems.scoverage_platform import ScoveragePlatform
from pants.backend.jvm.subsystems.shader import Shading
from pants.backend.jvm.target_types import (
AnnotationProcessor,
JarLibrary,
JavaAgent,
JavacPlugin,
JavaLibrary,
JunitTests,
JvmApp,
JvmBenchmark,
JvmBinary,
JvmCredentials,
JvmPrepCommand,
ManagedJarDependencies,
NetrcCredentials,
ScalacPlugin,
ScalaLibrary,
UnpackedJars,
)
from pants.backend.jvm.targets.annotation_processor import (
AnnotationProcessor as AnnotationProcessorV1,
)
from pants.backend.jvm.targets.benchmark import Benchmark as BenchmarkV1
from pants.backend.jvm.targets.credentials import LiteralCredentials as LiteralCredentialsV1
from pants.backend.jvm.targets.credentials import NetrcCredentials as NetrcCredentialsV1
from pants.backend.jvm.targets.jar_library import JarLibrary as JarLibraryV1
from pants.backend.jvm.targets.java_agent import JavaAgent as JavaAgentV1
from pants.backend.jvm.targets.java_library import JavaLibrary as JavaLibraryV1
from pants.backend.jvm.targets.javac_plugin import JavacPlugin as JavacPluginV1
from pants.backend.jvm.targets.junit_tests import JUnitTests as JUnitTestsV1
from pants.backend.jvm.targets.jvm_app import JvmApp as JvmAppV1
from pants.backend.jvm.targets.jvm_binary import Duplicate, JarRules
from pants.backend.jvm.targets.jvm_binary import JvmBinary as JvmBinaryV1
from pants.backend.jvm.targets.jvm_binary import Skip
from pants.backend.jvm.targets.jvm_prep_command import JvmPrepCommand as JvmPrepCommandV1
from pants.backend.jvm.targets.managed_jar_dependencies import (
ManagedJarDependencies as ManagedJarDependenciesV1,
)
from pants.backend.jvm.targets.managed_jar_dependencies import ManagedJarLibraries
from pants.backend.jvm.targets.scala_exclude import ScalaExclude
from pants.backend.jvm.targets.scala_jar_dependency import ScalaJarDependency
from pants.backend.jvm.targets.scala_library import ScalaLibrary as ScalaLibraryV1
from pants.backend.jvm.targets.scalac_plugin import ScalacPlugin as ScalacPluginV1
from pants.backend.jvm.targets.unpacked_jars import UnpackedJars as UnpackedJarsV1
from pants.backend.jvm.tasks.analysis_extraction import AnalysisExtraction
from pants.backend.jvm.tasks.benchmark_run import BenchmarkRun
from pants.backend.jvm.tasks.binary_create import BinaryCreate
from pants.backend.jvm.tasks.bootstrap_jvm_tools import BootstrapJvmTools
from pants.backend.jvm.tasks.bundle_create import BundleCreate
from pants.backend.jvm.tasks.check_published_deps import CheckPublishedDeps
from pants.backend.jvm.tasks.checkstyle import Checkstyle
from pants.backend.jvm.tasks.classmap import ClassmapTask
from pants.backend.jvm.tasks.consolidate_classpath import ConsolidateClasspath
from pants.backend.jvm.tasks.coursier_resolve import CoursierResolve
from pants.backend.jvm.tasks.detect_duplicates import DuplicateDetector
from pants.backend.jvm.tasks.ivy_imports import IvyImports
from pants.backend.jvm.tasks.ivy_outdated import IvyOutdated
from pants.backend.jvm.tasks.jar_create import JarCreate
from pants.backend.jvm.tasks.jar_publish import JarPublish
from pants.backend.jvm.tasks.javadoc_gen import JavadocGen
from pants.backend.jvm.tasks.junit_run import JUnitRun
from pants.backend.jvm.tasks.jvm_compile.javac.javac_compile import JavacCompile
from pants.backend.jvm.tasks.jvm_compile.jvm_classpath_publisher import RuntimeClasspathPublisher
from pants.backend.jvm.tasks.jvm_compile.rsc.rsc_compile import RscCompile
from pants.backend.jvm.tasks.jvm_dependency_check import JvmDependencyCheck
from pants.backend.jvm.tasks.jvm_dependency_usage import JvmDependencyUsage
from pants.backend.jvm.tasks.jvm_platform_analysis import JvmPlatformExplain, JvmPlatformValidate
from pants.backend.jvm.tasks.jvm_run import JvmRun
from pants.backend.jvm.tasks.nailgun_task import NailgunKillall
from pants.backend.jvm.tasks.prepare_resources import PrepareResources
from pants.backend.jvm.tasks.prepare_services import PrepareServices
from pants.backend.jvm.tasks.provide_tools_jar import ProvideToolsJar
from pants.backend.jvm.tasks.run_jvm_prep_command import (
RunBinaryJvmPrepCommand,
RunCompileJvmPrepCommand,
RunTestJvmPrepCommand,
)
from pants.backend.jvm.tasks.scala_repl import ScalaRepl
from pants.backend.jvm.tasks.scaladoc_gen import ScaladocGen
from pants.backend.jvm.tasks.scalafix_task import ScalaFixCheck, ScalaFixFix
from pants.backend.jvm.tasks.scalafmt_task import ScalaFmtCheckFormat, ScalaFmtFormat
from pants.backend.jvm.tasks.scalastyle_task import ScalastyleTask
from pants.backend.jvm.tasks.unpack_jars import UnpackJars
from pants.backend.project_info.tasks.export_dep_as_jar import ExportDepAsJar
from pants.build_graph.app_base import Bundle, DirectoryReMapper
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.goal.goal import Goal
from pants.goal.task_registrar import TaskRegistrar as task
from pants.java.jar.exclude import Exclude
from pants.java.jar.jar_dependency import JarDependencyParseContextWrapper
def build_file_aliases():
return BuildFileAliases(
targets={
"annotation_processor": AnnotationProcessorV1,
"benchmark": BenchmarkV1,
"credentials": LiteralCredentialsV1,
"jar_library": JarLibraryV1,
"java_agent": JavaAgentV1,
"java_library": JavaLibraryV1,
"javac_plugin": JavacPluginV1,
"junit_tests": JUnitTestsV1,
"jvm_app": JvmAppV1,
"jvm_binary": JvmBinaryV1,
"jvm_prep_command": JvmPrepCommandV1,
"managed_jar_dependencies": ManagedJarDependenciesV1,
"netrc_credentials": NetrcCredentialsV1,
"scala_library": ScalaLibraryV1,
"scalac_plugin": ScalacPluginV1,
"unpacked_jars": UnpackedJarsV1,
},
objects={
"artifact": Artifact,
"scala_artifact": ScalaArtifact,
"ossrh": OSSRHPublicationMetadata,
"license": License,
"scm": Scm,
"developer": Developer,
"github": Scm.github,
"DirectoryReMapper": DirectoryReMapper,
"Duplicate": Duplicate,
"exclude": Exclude,
"scala_jar": ScalaJarDependency,
"scala_exclude": ScalaExclude,
"jar_rules": JarRules,
"repository": repo,
"Skip": Skip,
"shading_relocate": Shading.create_relocate,
"shading_exclude": Shading.create_exclude,
"shading_keep": Shading.create_keep,
"shading_zap": Shading.create_zap,
"shading_relocate_package": Shading.create_relocate_package,
"shading_exclude_package": Shading.create_exclude_package,
"shading_keep_package": Shading.create_keep_package,
"shading_zap_package": Shading.create_zap_package,
},
context_aware_object_factories={
"bundle": Bundle,
"jar": JarDependencyParseContextWrapper,
"managed_jar_libraries": ManagedJarLibraries,
},
)
def global_subsystems():
return (
ScalaPlatform,
ScoveragePlatform,
)
# TODO https://github.com/pantsbuild/pants/issues/604 register_goals
def register_goals():
ng_killall = task(name="ng-killall", action=NailgunKillall)
ng_killall.install()
Goal.by_name("invalidate").install(ng_killall, first=True)
Goal.by_name("clean-all").install(ng_killall, first=True)
task(name="jar-dependency-management", action=JarDependencyManagementSetup).install("bootstrap")
task(name="jvm-platform-explain", action=JvmPlatformExplain).install("jvm-platform-explain")
task(name="jvm-platform-validate", action=JvmPlatformValidate).install("jvm-platform-validate")
task(name="bootstrap-jvm-tools", action=BootstrapJvmTools).install("bootstrap")
task(name="provide-tools-jar", action=ProvideToolsJar).install("bootstrap")
# Compile
task(name="rsc", action=RscCompile).install("compile")
task(name="javac", action=JavacCompile).install("compile")
# Analysis extraction.
task(name="zinc", action=AnalysisExtraction).install("analysis")
# Dependency resolution.
task(name="coursier", action=CoursierResolve).install("resolve")
task(name="ivy-imports", action=IvyImports).install("imports")
task(name="unpack-jars", action=UnpackJars).install()
task(name="ivy", action=IvyOutdated).install("outdated")
# Resource preparation.
task(name="prepare", action=PrepareResources).install("resources")
task(name="services", action=PrepareServices).install("resources")
task(name="export-classpath", action=RuntimeClasspathPublisher).install()
# This goal affects the contents of the runtime_classpath, and should not be
# combined with any other goals on the command line.
task(name="export-dep-as-jar", action=ExportDepAsJar).install()
task(name="jvm", action=JvmDependencyUsage).install("dep-usage")
task(name="classmap", action=ClassmapTask).install("classmap")
# Generate documentation.
task(name="javadoc", action=JavadocGen).install("doc")
task(name="scaladoc", action=ScaladocGen).install("doc")
# Bundling.
task(name="create", action=JarCreate).install("jar")
detect_duplicates = task(name="dup", action=DuplicateDetector)
task(name="jvm", action=BinaryCreate).install("binary")
detect_duplicates.install("binary")
task(name="consolidate-classpath", action=ConsolidateClasspath).install("bundle")
task(name="jvm", action=BundleCreate).install("bundle")
detect_duplicates.install("bundle")
task(name="detect-duplicates", action=DuplicateDetector).install()
# Publishing.
task(name="check-published-deps", action=CheckPublishedDeps).install("check-published-deps")
task(name="jar", action=JarPublish).install("publish")
# Testing.
task(name="junit", action=JUnitRun).install("test")
task(name="bench", action=BenchmarkRun).install("bench")
# Linting.
task(name="scalafix", action=ScalaFixCheck).install("lint")
task(name="scalafmt", action=ScalaFmtCheckFormat, serialize=False).install("lint")
task(name="scalastyle", action=ScalastyleTask, serialize=False).install("lint")
task(name="checkstyle", action=Checkstyle, serialize=False).install("lint")
task(name="jvm-dep-check", action=JvmDependencyCheck, serialize=False).install("lint")
# Formatting.
# Scalafix has to go before scalafmt in order not to
# further change Scala files after scalafmt.
task(name="scalafix", action=ScalaFixFix).install("fmt")
task(name="scalafmt", action=ScalaFmtFormat, serialize=False).install("fmt")
# Running.
task(name="jvm", action=JvmRun, serialize=False).install("run")
task(name="jvm-dirty", action=JvmRun, serialize=False).install("run-dirty")
task(name="scala", action=ScalaRepl, serialize=False).install("repl")
task(name="scala-dirty", action=ScalaRepl, serialize=False).install("repl-dirty")
task(name="test-jvm-prep-command", action=RunTestJvmPrepCommand).install("test", first=True)
task(name="binary-jvm-prep-command", action=RunBinaryJvmPrepCommand).install(
"binary", first=True
)
task(name="compile-jvm-prep-command", action=RunCompileJvmPrepCommand).install(
"compile", first=True
)
def target_types():
return [
AnnotationProcessor,
JvmBenchmark,
JvmCredentials,
JarLibrary,
JavaAgent,
JavaLibrary,
JavacPlugin,
JunitTests,
JvmApp,
JvmBinary,
JvmPrepCommand,
ManagedJarDependencies,
NetrcCredentials,
ScalaLibrary,
ScalacPlugin,
UnpackedJars,
]
| apache-2.0 |
morenopc/edx-platform | common/test/acceptance/pages/studio/unit.py | 13 | 3855 | """
Unit page in Studio
"""
from bok_choy.page_object import PageObject
from bok_choy.promise import EmptyPromise, Promise
from . import BASE_URL
from .container import ContainerPage
class UnitPage(PageObject):
"""
Unit page in Studio
"""
def __init__(self, browser, unit_locator):
super(UnitPage, self).__init__(browser)
self.unit_locator = unit_locator
@property
def url(self):
"""URL to the pages UI in a course."""
return "{}/unit/{}".format(BASE_URL, self.unit_locator)
def is_browser_on_page(self):
def _is_finished_loading():
# Wait until all components have been loaded
number_of_leaf_xblocks = len(self.q(css='{} .xblock-student_view'.format(Component.BODY_SELECTOR)).results)
number_of_container_xblocks = len(self.q(css='{} .wrapper-xblock'.format(Component.BODY_SELECTOR)).results)
is_done = len(self.q(css=Component.BODY_SELECTOR).results) == number_of_leaf_xblocks + number_of_container_xblocks
return (is_done, is_done)
# First make sure that an element with the view-unit class is present on the page,
# and then wait to make sure that the xblocks are all there
return (
self.q(css='body.view-unit').present and
Promise(_is_finished_loading, 'Finished rendering the xblocks in the unit.').fulfill()
)
@property
def components(self):
"""
Return a list of components loaded on the unit page.
"""
return self.q(css=Component.BODY_SELECTOR).map(
lambda el: Component(self.browser, el.get_attribute('data-locator'))).results
def edit_draft(self):
"""
Started editing a draft of this unit.
"""
EmptyPromise(
lambda: self.q(css='.create-draft').present,
'Wait for edit draft link to be present'
).fulfill()
self.q(css='.create-draft').first.click()
EmptyPromise(
lambda: self.q(css='.editing-draft-alert').present,
'Wait for draft mode to be activated'
).fulfill()
class Component(PageObject):
"""
A PageObject representing an XBlock child on the Studio UnitPage (including
the editing controls).
"""
url = None
BODY_SELECTOR = '.component'
NAME_SELECTOR = '.component-header'
def __init__(self, browser, locator):
super(Component, self).__init__(browser)
self.locator = locator
def is_browser_on_page(self):
return self.q(css='{}[data-locator="{}"]'.format(self.BODY_SELECTOR, self.locator)).present
def _bounded_selector(self, selector):
"""
Return `selector`, but limited to this particular `CourseOutlineChild` context
"""
return '{}[data-locator="{}"] {}'.format(
self.BODY_SELECTOR,
self.locator,
selector
)
@property
def name(self):
titles = self.q(css=self._bounded_selector(self.NAME_SELECTOR)).text
if titles:
return titles[0]
else:
return None
@property
def preview_selector(self):
return self._bounded_selector('.xblock-student_view')
def edit(self):
self.q(css=self._bounded_selector('.edit-button')).first.click()
EmptyPromise(
lambda: self.q(css='.xblock-studio_view').present,
'Wait for the Studio editor to be present'
).fulfill()
return self
@property
def editor_selector(self):
return '.xblock-studio_view'
def go_to_container(self):
"""
Open the container page linked to by this component, and return
an initialized :class:`.ContainerPage` for that xblock.
"""
return ContainerPage(self.browser, self.locator).visit()
| agpl-3.0 |
timokoola/timoechobot | docutils/parsers/rst/languages/zh_cn.py | 128 | 4007 | # -*- coding: utf-8 -*-
# $Id: zh_cn.py 7119 2011-09-02 13:00:23Z milde $
# Author: Panjunyong <[email protected]>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Simplified Chinese language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
# language-dependent: fixed
u'注意': 'attention',
u'小心': 'caution',
u'code (translation required)': 'code',
u'危险': 'danger',
u'错误': 'error',
u'提示': 'hint',
u'重要': 'important',
u'注解': 'note',
u'技巧': 'tip',
u'警告': 'warning',
u'忠告': 'admonition',
u'侧框': 'sidebar',
u'主题': 'topic',
u'line-block (translation required)': 'line-block',
u'parsed-literal (translation required)': 'parsed-literal',
u'醒目': 'rubric',
u'铭文': 'epigraph',
u'要点': 'highlights',
u'pull-quote (translation required)': 'pull-quote',
u'复合': 'compound',
u'容器': 'container',
#u'questions (translation required)': 'questions',
u'表格': 'table',
u'csv表格': 'csv-table',
u'列表表格': 'list-table',
#u'qa (translation required)': 'questions',
#u'faq (translation required)': 'questions',
u'元数据': 'meta',
u'math (translation required)': 'math',
#u'imagemap (translation required)': 'imagemap',
u'图片': 'image',
u'图例': 'figure',
u'包含': 'include',
u'原文': 'raw',
u'代替': 'replace',
u'统一码': 'unicode',
u'日期': 'date',
u'类型': 'class',
u'角色': 'role',
u'默认角色': 'default-role',
u'标题': 'title',
u'目录': 'contents',
u'章节序号': 'sectnum',
u'题头': 'header',
u'页脚': 'footer',
#u'footnotes (translation required)': 'footnotes',
#u'citations (translation required)': 'citations',
u'target-notes (translation required)': 'target-notes',
u'restructuredtext-test-directive': 'restructuredtext-test-directive'}
"""Simplified Chinese name to registered (in directives/__init__.py)
directive name mapping."""
roles = {
# language-dependent: fixed
u'缩写': 'abbreviation',
u'简称': 'acronym',
u'code (translation required)': 'code',
u'index (translation required)': 'index',
u'i (translation required)': 'index',
u'下标': 'subscript',
u'上标': 'superscript',
u'title-reference (translation required)': 'title-reference',
u'title (translation required)': 'title-reference',
u't (translation required)': 'title-reference',
u'pep-reference (translation required)': 'pep-reference',
u'pep (translation required)': 'pep-reference',
u'rfc-reference (translation required)': 'rfc-reference',
u'rfc (translation required)': 'rfc-reference',
u'强调': 'emphasis',
u'加粗': 'strong',
u'字面': 'literal',
u'math (translation required)': 'math',
u'named-reference (translation required)': 'named-reference',
u'anonymous-reference (translation required)': 'anonymous-reference',
u'footnote-reference (translation required)': 'footnote-reference',
u'citation-reference (translation required)': 'citation-reference',
u'substitution-reference (translation required)': 'substitution-reference',
u'target (translation required)': 'target',
u'uri-reference (translation required)': 'uri-reference',
u'uri (translation required)': 'uri-reference',
u'url (translation required)': 'uri-reference',
u'raw (translation required)': 'raw',}
"""Mapping of Simplified Chinese role names to canonical role names
for interpreted text."""
| apache-2.0 |
simbtrix/screenmix | screenmix/reinforcement/shapeSelection.py | 1 | 5607 | '''
Created on 13.05.2016
@author: mkennert
'''
from kivy.properties import ObjectProperty, StringProperty
from kivy.uix.gridlayout import GridLayout
from kivy.uix.scrollview import ScrollView
from ownComponents.design import Design
from ownComponents.ownButton import OwnButton
from ownComponents.ownGraph import OwnGraph
from plot.line import LinePlot
class ShapeSelection(GridLayout):
'''
the shape-selection-component make it possible to change
the cross-section-shape
'''
#reinforcement-editor
information = ObjectProperty()
okStr = StringProperty('ok')
cancelStr = StringProperty('cancel')
rectStr = StringProperty('rectangle')
# constructor
def __init__(self, **kwargs):
super(ShapeSelection, self).__init__(**kwargs)
self.padding = Design.padding
self.cols, self.spacing = 2, Design.spacing
self.create_gui()
'''
create the gui
'''
def create_gui(self):
self.create_graphs()
self.create_selection()
'''
create all graphs
'''
def create_graphs(self):
self.create_graph_rectangle()
# default-shape Rectangle
self.add_widget(self.graphRectangle)
self.focusGraph = self.graphRectangle
###################################################################
# here you can add more shapes. #
# implement a graph which represent the shape #
###################################################################
'''
create the plot graph
'''
def create_graph_rectangle(self):
self.graphRectangle = OwnGraph(
x_ticks_major=0.1, y_ticks_major=0.05,
y_grid_label=True, x_grid_label=True,
xmin=0, xmax=0.5, ymin=0, ymax=0.25)
self.p = LinePlot(color=[1, 1, 1, 1], points=self.draw_rectangle())
self.graphRectangle.add_plot(self.p)
'''
draw the plot
'''
def draw_rectangle(self):
c, h, w = 1e-2, 0.23, 0.45
return [(c, c), (c, h), (w, h), (w, c), (c, c)]
'''
create the right area where you can select
the shape
'''
def create_selection(self):
self.create_btns()
self.contentRight = GridLayout(cols=1)
# self.contentRight.add_widget(self.focusShape)
self.btns = GridLayout(cols=1, spacing=Design.spacing, size_hint_y=None)
# self.contentRight.add_widget(self.btns)
# Make sure the height is such that there is something to scroll.
self.btns.bind(minimum_height=self.btns.setter('height'))
self.btns.add_widget(self.plot)
###################################################################
# here you can add more shapes. #
# implement the button in the create_btns method #
###################################################################
layout = GridLayout(cols=2, spacing=Design.spacing)
layout.add_widget(self.btnOK)
layout.add_widget(self.btnCancel)
self.btns.add_widget(layout)
self.shapes = ScrollView()
self.shapes.add_widget(self.btns)
self.contentRight.add_widget(self.shapes)
self.add_widget(self.contentRight)
'''
create and bind all btns from the gui
'''
def create_btns(self):
self.btnOK = OwnButton(text=self.okStr)
self.btnOK.bind(on_press=self.finished)
self.btnCancel = OwnButton(text=self.cancelStr)
self.btnCancel.bind(on_press=self.cancel)
# default-shape=rectangle
self.focusShape = OwnButton(text=self.rectStr)
self.focusShape.bind(on_press=self.show_shapes_btn)
# btns
self.plot = OwnButton(text=self.rectStr)
self.plot.bind(on_press=self.show_rectangle)
#######################################################################
# here you can add more shapes #
# Attention: make sure that the buttons habe the properties #
# size_hint_y=None, height=self.btnSize and a bind-method #
# like the show_rectangle-method #
#######################################################################
'''
show Rectangle-Graph
'''
def show_rectangle(self, btn):
self.remove_widget(self.focusGraph)
self.add_widget(self.graphRectangle, 1)
self.focusGraph = self.graphRectangle
self.focusShape.text = btn.text
#######################################################
# if you want add new shapes make sure, that the shape#
# has a show-method like the show_rectangle #
#######################################################
'''
show the btns where you can select the shape
'''
def show_shapes_btn(self, btn):
self.contentRight.remove_widget(self.focusShape)
self.contentRight.add_widget(self.shapes)
'''
finished the totally selection and call the
finished_shape_selection of the information
'''
def finished(self, btn):
self.information.finished_shape_selection(self.focusShape)
'''
cancel the shape selection
'''
def cancel(self, btn):
self.information.cancel_shape_selection()
| gpl-3.0 |
lanfker/tdma_imac | .waf-1.6.7-0a94702c61504c487a251b8d0a04ca9a/waflib/Tools/glib2.py | 3 | 8308 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/svn/docs/wafbook/single.html#_obtaining_the_waf_file
import os
from waflib import Task,Utils,Options,Errors,Logs
from waflib.TaskGen import taskgen_method,before_method,after_method,feature
def add_marshal_file(self,filename,prefix):
if not hasattr(self,'marshal_list'):
self.marshal_list=[]
self.meths.append('process_marshal')
self.marshal_list.append((filename,prefix))
def process_marshal(self):
for f,prefix in getattr(self,'marshal_list',[]):
node=self.path.find_resource(f)
if not node:
raise Errors.WafError('file not found %r'%f)
h_node=node.change_ext('.h')
c_node=node.change_ext('.c')
task=self.create_task('glib_genmarshal',node,[h_node,c_node])
task.env.GLIB_GENMARSHAL_PREFIX=prefix
self.source=self.to_nodes(getattr(self,'source',[]))
self.source.append(c_node)
class glib_genmarshal(Task.Task):
def run(self):
bld=self.inputs[0].__class__.ctx
get=self.env.get_flat
cmd1="%s %s --prefix=%s --header > %s"%(get('GLIB_GENMARSHAL'),self.inputs[0].srcpath(),get('GLIB_GENMARSHAL_PREFIX'),self.outputs[0].abspath())
ret=bld.exec_command(cmd1)
if ret:return ret
c='''#include "%s"\n'''%self.outputs[0].name
self.outputs[1].write(c)
cmd2="%s %s --prefix=%s --body >> %s"%(get('GLIB_GENMARSHAL'),self.inputs[0].srcpath(),get('GLIB_GENMARSHAL_PREFIX'),self.outputs[1].abspath())
return bld.exec_command(cmd2)
vars=['GLIB_GENMARSHAL_PREFIX','GLIB_GENMARSHAL']
color='BLUE'
ext_out=['.h']
def add_enums_from_template(self,source='',target='',template='',comments=''):
if not hasattr(self,'enums_list'):
self.enums_list=[]
self.meths.append('process_enums')
self.enums_list.append({'source':source,'target':target,'template':template,'file-head':'','file-prod':'','file-tail':'','enum-prod':'','value-head':'','value-prod':'','value-tail':'','comments':comments})
def add_enums(self,source='',target='',file_head='',file_prod='',file_tail='',enum_prod='',value_head='',value_prod='',value_tail='',comments=''):
if not hasattr(self,'enums_list'):
self.enums_list=[]
self.meths.append('process_enums')
self.enums_list.append({'source':source,'template':'','target':target,'file-head':file_head,'file-prod':file_prod,'file-tail':file_tail,'enum-prod':enum_prod,'value-head':value_head,'value-prod':value_prod,'value-tail':value_tail,'comments':comments})
def process_enums(self):
for enum in getattr(self,'enums_list',[]):
task=self.create_task('glib_mkenums')
env=task.env
inputs=[]
source_list=self.to_list(enum['source'])
if not source_list:
raise Errors.WafError('missing source '+str(enum))
source_list=[self.path.find_resource(k)for k in source_list]
inputs+=source_list
env['GLIB_MKENUMS_SOURCE']=[k.abspath()for k in source_list]
if not enum['target']:
raise Errors.WafError('missing target '+str(enum))
tgt_node=self.path.find_or_declare(enum['target'])
if tgt_node.name.endswith('.c'):
self.source.append(tgt_node)
env['GLIB_MKENUMS_TARGET']=tgt_node.abspath()
options=[]
if enum['template']:
template_node=self.path.find_resource(enum['template'])
options.append('--template %s'%(template_node.abspath()))
inputs.append(template_node)
params={'file-head':'--fhead','file-prod':'--fprod','file-tail':'--ftail','enum-prod':'--eprod','value-head':'--vhead','value-prod':'--vprod','value-tail':'--vtail','comments':'--comments'}
for param,option in params.items():
if enum[param]:
options.append('%s %r'%(option,enum[param]))
env['GLIB_MKENUMS_OPTIONS']=' '.join(options)
task.set_inputs(inputs)
task.set_outputs(tgt_node)
class glib_mkenums(Task.Task):
run_str='${GLIB_MKENUMS} ${GLIB_MKENUMS_OPTIONS} ${GLIB_MKENUMS_SOURCE} > ${GLIB_MKENUMS_TARGET}'
color='PINK'
ext_out=['.h']
def add_settings_schemas(self,filename_list):
if not hasattr(self,'settings_schema_files'):
self.settings_schema_files=[]
if not isinstance(filename_list,list):
filename_list=[filename_list]
self.settings_schema_files.extend(filename_list)
def add_settings_enums(self,namespace,filename_list):
if hasattr(self,'settings_enum_namespace'):
raise Errors.WafError("Tried to add gsettings enums to '%s' more than once"%self.name)
self.settings_enum_namespace=namespace
if type(filename_list)!='list':
filename_list=[filename_list]
self.settings_enum_files=filename_list
def r_change_ext(self,ext):
name=self.name
k=name.rfind('.')
if k>=0:
name=name[:k]+ext
else:
name=name+ext
return self.parent.find_or_declare([name])
def process_settings(self):
enums_tgt_node=[]
install_files=[]
settings_schema_files=getattr(self,'settings_schema_files',[])
if settings_schema_files and not self.env['GLIB_COMPILE_SCHEMAS']:
raise Errors.WafError("Unable to process GSettings schemas - glib-compile-schemas was not found during configure")
if hasattr(self,'settings_enum_files'):
enums_task=self.create_task('glib_mkenums')
source_list=self.settings_enum_files
source_list=[self.path.find_resource(k)for k in source_list]
enums_task.set_inputs(source_list)
enums_task.env['GLIB_MKENUMS_SOURCE']=[k.abspath()for k in source_list]
target=self.settings_enum_namespace+'.enums.xml'
tgt_node=self.path.find_or_declare(target)
enums_task.set_outputs(tgt_node)
enums_task.env['GLIB_MKENUMS_TARGET']=tgt_node.abspath()
enums_tgt_node=[tgt_node]
install_files.append(tgt_node)
options='--comments "<!-- @comment@ -->" --fhead "<schemalist>" --vhead " <@type@ id=\\"%s.@EnumName@\\">" --vprod " <value nick=\\"@valuenick@\\" value=\\"@valuenum@\\"/>" --vtail " </@type@>" --ftail "</schemalist>" '%(self.settings_enum_namespace)
enums_task.env['GLIB_MKENUMS_OPTIONS']=options
for schema in settings_schema_files:
schema_task=self.create_task('glib_validate_schema')
schema_node=self.path.find_resource(schema)
if not schema_node:
raise Errors.WafError("Cannot find the schema file '%s'"%schema)
install_files.append(schema_node)
source_list=enums_tgt_node+[schema_node]
schema_task.set_inputs(source_list)
schema_task.env['GLIB_COMPILE_SCHEMAS_OPTIONS']=[("--schema-file="+k.abspath())for k in source_list]
target_node=r_change_ext(schema_node,'.xml.valid')
schema_task.set_outputs(target_node)
schema_task.env['GLIB_VALIDATE_SCHEMA_OUTPUT']=target_node.abspath()
def compile_schemas_callback(bld):
if not bld.is_install:return
Logs.pprint('YELLOW','Updating GSettings schema cache')
command=Utils.subst_vars("${GLIB_COMPILE_SCHEMAS} ${GSETTINGSSCHEMADIR}",bld.env)
ret=self.bld.exec_command(command)
if self.bld.is_install:
if not self.env['GSETTINGSSCHEMADIR']:
raise Errors.WafError('GSETTINGSSCHEMADIR not defined (should have been set up automatically during configure)')
if install_files:
self.bld.install_files(self.env['GSETTINGSSCHEMADIR'],install_files)
if not hasattr(self.bld,'_compile_schemas_registered'):
self.bld.add_post_fun(compile_schemas_callback)
self.bld._compile_schemas_registered=True
class glib_validate_schema(Task.Task):
run_str='rm -f ${GLIB_VALIDATE_SCHEMA_OUTPUT} && ${GLIB_COMPILE_SCHEMAS} --dry-run ${GLIB_COMPILE_SCHEMAS_OPTIONS} && touch ${GLIB_VALIDATE_SCHEMA_OUTPUT}'
color='PINK'
def configure(conf):
conf.find_program('glib-genmarshal',var='GLIB_GENMARSHAL')
conf.find_perl_program('glib-mkenums',var='GLIB_MKENUMS')
conf.find_program('glib-compile-schemas',var='GLIB_COMPILE_SCHEMAS',mandatory=False)
def getstr(varname):
return getattr(Options.options,varname,getattr(conf.env,varname,''))
gsettingsschemadir=getstr('GSETTINGSSCHEMADIR')
if not gsettingsschemadir:
datadir=getstr('DATADIR')
if not datadir:
prefix=conf.env['PREFIX']
datadir=os.path.join(prefix,'share')
gsettingsschemadir=os.path.join(datadir,'glib-2.0','schemas')
conf.env['GSETTINGSSCHEMADIR']=gsettingsschemadir
def options(opt):
opt.add_option('--gsettingsschemadir',help='GSettings schema location [Default: ${datadir}/glib-2.0/schemas]',default='',dest='GSETTINGSSCHEMADIR')
taskgen_method(add_marshal_file)
before_method('process_source')(process_marshal)
taskgen_method(add_enums_from_template)
taskgen_method(add_enums)
before_method('process_source')(process_enums)
taskgen_method(add_settings_schemas)
taskgen_method(add_settings_enums)
feature('glib2')(process_settings) | gpl-2.0 |
suryakencana/niimanga | niimanga/ctasks/batoto.py | 1 | 34700 | """
# Copyright (c) 06 2015 | surya
# 26/06/15 [email protected]
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# mangaeden.py
"""
import shutil
from concurrent.futures import ThreadPoolExecutor
from niimanga.libs.exceptions import HtmlError
from niimanga.libs.utils import LocalDateTime
from niimanga.models.master import ISOLang
from os import path, makedirs
from niimanga.libs import utils
from niimanga.models.manga import Manga, Chapter
from .celery import load_ini
from niimanga.models.meta.base import initialize_sql, DBSession
import re
import requests
from requests.packages.urllib3.connection import ConnectionError
from requests_futures.sessions import FuturesSession
from sqlalchemy.exc import IntegrityError
import transaction
INI = load_ini()
initialize_sql(INI)
def _chapter_slug(str_, slug_manga):
name = str_
# print(name[name.index("C"):])
no = re.search(r"\d+(\.\d+)?", name[name.index("C"):]).group(0)
# print(no)
return no, utils.slugist('{1}-chapter-{0}'.format(no.zfill(3), slug_manga))
def build_to_sys(site, source):
try:
lt = LocalDateTime.now()
"""
dict(
thumb=self.netlocs[3] + "/".join([image_thumb.split('/')[-2], image_thumb.split('/')[-1]]),
origin=origin_url,
name=title,
# time=self.parseDate.human_to_date_stamp(time),
time=time,
last_chapter=last_title,
last_url=last_url,
site=self.netlocs[1]
)
"""
# list latest
# scrap series info
# url = "/".join([site.netlocs[2], source.get('origin')])
url = source.get('origin')
# print(url)
respcontent = site.get_html(url)
series_info = site.series_info(respcontent)
# series == manga
qry = Manga.query
manga = qry.filter(Manga.slug == utils.slugist(
"-".join([site.netlocs[4], source.get('name', None)])
)).first()
if manga is None:
with transaction.manager:
manga = Manga(
site.netlocs[4],
series_info.get('name', []),
0,
", ".join(series_info.get('tags', [])),
", ".join(series_info.get('authors', [])),
", ".join(series_info.get('artists', [])),
', '.join(series_info.get('aka', [])),
",".join(series_info.get('description', None)),
1 if 'ongoing' in series_info.get('status', '').lower()
else 2 if 'completed' in series_info.get('status', '').lower() else 0
)
# manga.id = utils.guid()
manga.origin = source.get('origin', '')
manga.chapter_updated = lt.from_time_stamp(source.get('time', 'now'))
ext = series_info.get('thumb_url', '').lower().rsplit('.', 1)[-1]
manga.thumb = '.'.join(['cover', ext])
manga.category = 'ja'
DBSession.add(manga)
DBSession.flush()
manga = qry.filter(Manga.slug == utils.slugist(
"-".join([site.netlocs[4], source.get('name', None)])
)).first()
manga_id, manga_thumb, manga_slug = manga.id, manga.thumb, manga.slug
ini_path = path.join(
path.dirname(
path.dirname(__file__)
),
'/'.join(['rak', 'manga', manga_id])
)
r = requests.get(source.get('thumb'))
path_img = '/'.join([ini_path, manga_thumb])
print(path_img)
if not path.exists(ini_path):
makedirs(ini_path)
with open(path_img, "wb") as code:
code.write(r.content)
chapters_info = series_info.get('chapters', [])
for i, ch in enumerate(chapters_info[0:2]):
print(ch.get('name', ''))
# batoto slug
slug_bt = ch.get('name', '')
if ':' in slug_bt:
slug_bt = slug_bt.split(':')
slug_bt.pop(0)
slug_bt = '-'.join(slug_bt)
slug_chapter = ' '.join([manga_slug, slug_bt])
# cek chapter sudah didownload
chapter = Chapter.query.filter(Chapter.slug == utils.slugist(slug_chapter)).first()
if chapter is None:
v = utils.parse_number(ch.get('name', ''), "Vol")
v = 0 if v is None else v
c = utils.parse_number(ch.get('name', ''), "Ch")
c = 0 if c is None else c
with transaction.manager:
chapter = Chapter(
slug_bt,
c,
v
)
time = lt.human_to_date(ch.get('time', 'now'))
# chapter.id = utils.guid()
ch_manga = Manga.query.get(manga_id)
ch_manga.chapter_count += 1
chapter.lang = ISOLang.query.filter(ISOLang.iso == 'en').first()
chapter.updated = time
chapter.manga = ch_manga
# s = 1000v + c
# chapter.sortorder = (1000*float(v)) + float(c)
chapter.sortorder = float(c)
chapter.slug = slug_chapter
DBSession.add(chapter)
DBSession.flush()
chapter = Chapter.query.filter(Chapter.slug == utils.slugist(slug_chapter)).first()
# batoto
html = site.get_html(ch.get('url'))
# # ambil image dan download locally di folder chapter.id
chapter_info = site.chapter_info(html)
try:
# series info
# chapter info and images
session = FuturesSession(executor=ThreadPoolExecutor(max_workers=10))
for page in chapter_info.get('pages', []):
ini_chapter = '/'.join([ini_path, chapter.id])
print(page)
r = session.get(page).result()
if r.status_code != 200:
raise HtmlError('cannot fetch')
path_img = '/'.join([ini_chapter, page.split('/')[-1]])
print(path_img)
if not path.exists(ini_chapter):
makedirs(ini_chapter)
with open(path_img, "wb") as code:
code.write(r.content)
except ConnectionError as Conn:
print(Conn)
chapter = Chapter.query.get(chapter.id)
DBSession.delete(chapter)
shutil.rmtree(ini_chapter)
except AttributeError as e:
print(e.message)
except KeyError as e:
print(e.message)
except ValueError as e:
print(e.message)
def build_from_latestDB():
try:
"""
dict(
thumb=self.netlocs[3] + "/".join([image_thumb.split('/')[-2], image_thumb.split('/')[-1]]),
origin=origin_url,
name=title,
# time=self.parseDate.human_to_date_stamp(time),
time=time,
last_chapter=last_title,
last_url=last_url,
site=self.netlocs[1]
)
"""
trans = transaction.begin()
# manga = Manga(
# u'bt',
# u'Fairy Tail',
# 0,
# u'comedy, shounen, adventure',
# u'Mushi shi',
# u'Hiro antsuki',
# u'False Love',
# u'Nisekoi'
# )
# manga.category = u'ja'
# manga.save_it()
manga = Manga.query.filter(Manga.slug == u'bt-fairy-tail').first()
# DBSession.delete(manga)
chapters = [
{
"name": "Ch.123: Without Fearing Spiciness",
"url": "http://bato.to/read/_/327794/shokugeki-no-soma_ch123_by_casanova"
},
{
"name": "Ch.122: \"M\u00e1\" and \"L\u00e0\"",
"url": "http://bato.to/read/_/327793/shokugeki-no-soma_ch122_by_casanova"
},
{
"name": "Ch.121: Spicy Flavor Worship",
"url": "http://bato.to/read/_/325643/shokugeki-no-soma_ch121_by_casanova"
},
{
"name": "Ch.120: What Is It!!?",
"url": "http://bato.to/read/_/324650/shokugeki-no-soma_ch120_by_casanova"
},
{
"name": "Ch.119: The Distance from the Elite Ten",
"url": "http://bato.to/read/_/323145/shokugeki-no-soma_ch119_by_casanova"
},
{
"name": "Ch.118: Tootsuki Elite Ten",
"url": "http://bato.to/read/_/321978/shokugeki-no-soma_ch118_by_casanova"
},
{
"name": "Ch.117.3 Read Online",
"url": "http://bato.to/read/_/321119/shokugeki-no-soma_ch117.3_by_casanova"
},
{
"name": "Ch.117: Imposingly",
"url": "http://bato.to/read/_/321118/shokugeki-no-soma_ch117_by_casanova"
},
{
"name": "Ch.116.5: A Magnificent Banquet",
"url": "http://bato.to/read/_/318818/shokugeki-no-soma_ch116.5_by_casanova"
},
{
"name": "Ch.116: The Fruit Called Growth",
"url": "http://bato.to/read/_/318387/shokugeki-no-soma_ch116_by_casanova"
},
{
"name": "Ch.115: Tear Through",
"url": "http://bato.to/read/_/316969/shokugeki-no-soma_ch115_by_casanova"
},
{
"name": "Ch.114: Yuihara (Revamped)",
"url": "http://bato.to/read/_/316564/shokugeki-no-soma_ch114_by_casanova"
},
{
"name": "Ch.113: Forgotten Vegetables",
"url": "http://bato.to/read/_/314647/shokugeki-no-soma_ch113_by_casanova"
},
{
"name": "Ch.112: The Guidepost for Growth",
"url": "http://bato.to/read/_/314279/shokugeki-no-soma_ch112_by_casanova"
},
{
"name": "Ch.111: Main Course",
"url": "http://bato.to/read/_/312126/shokugeki-no-soma_ch111_by_casanova"
},
{
"name": "Ch.110: The Magician, Once Again---!",
"url": "http://bato.to/read/_/311083/shokugeki-no-soma_ch110_by_casanova"
},
{
"name": "Ch.109: Those Who Shed Light",
"url": "http://bato.to/read/_/309853/shokugeki-no-soma_ch109_by_casanova"
},
{
"name": "Ch.108: Choosing a Path",
"url": "http://bato.to/read/_/308448/shokugeki-no-soma_ch108_by_casanova"
},
{
"name": "Ch.107: Ideals and Distance",
"url": "http://bato.to/read/_/306749/shokugeki-no-soma_ch107_by_casanova"
},
{
"name": "Ch.106: A Busy Restaurant with Many Problems",
"url": "http://bato.to/read/_/305011/shokugeki-no-soma_ch106_by_casanova"
},
{
"name": "Ch.105: Stagiaire",
"url": "http://bato.to/read/_/303297/shokugeki-no-soma_ch105_by_casanova"
},
{
"name": "Ch.104: New \"Jewel\"",
"url": "http://bato.to/read/_/302063/shokugeki-no-soma_ch104_by_casanova"
},
{
"name": "Ch.103: Specialty",
"url": "http://bato.to/read/_/300229/shokugeki-no-soma_ch103_by_casanova"
},
{
"name": "Ch.102: Souma's Strength",
"url": "http://bato.to/read/_/299255/shokugeki-no-soma_ch102_by_casanova"
},
{
"name": "Ch.101: A Fine Tempered Sword",
"url": "http://bato.to/read/_/295858/shokugeki-no-soma_ch101_by_casanova"
},
{
"name": "Ch.100: A Sharp Blade",
"url": "http://bato.to/read/_/294443/shokugeki-no-soma_ch100_by_casanova"
},
{
"name": "Ch.99: The Fangs That Cut Through The Battlefield",
"url": "http://bato.to/read/_/293409/shokugeki-no-soma_ch99_by_casanova"
},
{
"name": "Ch.98 (full color): The \"Things\" They've Accumulated",
"url": "http://bato.to/read/_/292819/shokugeki-no-soma_ch98--full-color-_by_casanova"
},
{
"name": "Ch.98: The \"Things\" They've Accumulated",
"url": "http://bato.to/read/_/290601/shokugeki-no-soma_ch98_by_casanova"
},
{
"name": "Ch.97 (full color): Moonlight Memories",
"url": "http://bato.to/read/_/292818/shokugeki-no-soma_ch97--full-color-_by_casanova"
},
{
"name": "Ch.97: Moonlight Memories",
"url": "http://bato.to/read/_/289696/shokugeki-no-soma_ch97_by_casanova"
},
{
"name": "Ch.96 (full color): The Answer He Reached",
"url": "http://bato.to/read/_/292817/shokugeki-no-soma_ch96--full-color-_by_casanova"
},
{
"name": "Ch.96: The Answer He Reached",
"url": "http://bato.to/read/_/287642/shokugeki-no-soma_ch96_by_casanova"
},
{
"name": "Ch.95 (full color): A Battle Surrounding the \"Season\"",
"url": "http://bato.to/read/_/292816/shokugeki-no-soma_ch95--full-color-_by_casanova"
},
{
"name": "Ch.95: A Battle Surrounding the \"Season\"",
"url": "http://bato.to/read/_/286562/shokugeki-no-soma_ch95_by_casanova"
},
{
"name": "Ch.94: Seizing the Season",
"url": "http://bato.to/read/_/284514/shokugeki-no-soma_ch94_by_casanova"
},
{
"name": "Ch.93: The \"Sword\" That Announces Autumn",
"url": "http://bato.to/read/_/282575/shokugeki-no-soma_ch93_by_casanova"
},
{
"name": "Ch.92: Firestarter",
"url": "http://bato.to/read/_/280599/shokugeki-no-soma_ch92_by_casanova"
},
{
"name": "Ch.91: Beats Eating Each Other",
"url": "http://bato.to/read/_/279908/shokugeki-no-soma_ch91_by_casanova"
},
{
"name": "Ch.90: Iron Will, Heart of Steel",
"url": "http://bato.to/read/_/278692/shokugeki-no-soma_ch90_by_casanova"
},
{
"name": "Ch.89: Morning Will Come Again",
"url": "http://bato.to/read/_/277091/shokugeki-no-soma_ch89_by_casanova"
},
{
"name": "Ch.88: ~DREAMLAND~",
"url": "http://bato.to/read/_/275550/shokugeki-no-soma_ch88_by_casanova"
},
{
"name": "Ch.87: Secret Plan",
"url": "http://bato.to/read/_/274593/shokugeki-no-soma_ch87_by_casanova"
},
{
"name": "Ch.86: Garniture",
"url": "http://bato.to/read/_/272508/shokugeki-no-soma_ch86_by_casanova"
},
{
"name": "Ch.85.2 Read Online",
"url": "http://bato.to/read/_/271777/shokugeki-no-soma_ch85.2_by_casanova"
},
{
"name": "Ch.85.1 Read Online",
"url": "http://bato.to/read/_/271776/shokugeki-no-soma_ch85.1_by_casanova"
},
{
"name": "Ch.85: The First Bite's Secret",
"url": "http://bato.to/read/_/271775/shokugeki-no-soma_ch85_by_casanova"
},
{
"name": "Ch.84: Hidden Assignment",
"url": "http://bato.to/read/_/270967/shokugeki-no-soma_ch84_by_casanova"
},
{
"name": "Ch.83: The Chaser And The Chased",
"url": "http://bato.to/read/_/268312/shokugeki-no-soma_ch83_by_casanova"
},
{
"name": "Ch.82: Starting Line",
"url": "http://bato.to/read/_/265163/shokugeki-no-soma_ch82_by_casanova"
},
{
"name": "Ch.81: The Observer Arrives",
"url": "http://bato.to/read/_/263615/shokugeki-no-soma_ch81_by_casanova"
},
{
"name": "Ch.80: The Conditions for the Challenge",
"url": "http://bato.to/read/_/262016/shokugeki-no-soma_ch80_by_casanova"
},
{
"name": "Ch.79: The Last \"Card\"",
"url": "http://bato.to/read/_/259695/shokugeki-no-soma_ch79_by_casanova"
},
{
"name": "Ch.78: A Paper-Thin Difference Between Offense and Defense",
"url": "http://bato.to/read/_/258287/shokugeki-no-soma_ch78_by_casanova"
},
{
"name": "Ch.77: Pursuer",
"url": "http://bato.to/read/_/256463/shokugeki-no-soma_ch77_by_casanova"
},
{
"name": "Ch.76: Duel Etiquette",
"url": "http://bato.to/read/_/254889/shokugeki-no-soma_ch76_by_casanova"
},
{
"name": "Ch.75: Beneath The Mask",
"url": "http://bato.to/read/_/252716/shokugeki-no-soma_ch75_by_casanova"
},
{
"name": "Ch.74: Sensitive Monster",
"url": "http://bato.to/read/_/250870/shokugeki-no-soma_ch74_by_casanova"
},
{
"name": "Ch.73: Minding The Details",
"url": "http://bato.to/read/_/248966/shokugeki-no-soma_ch73_by_casanova"
},
{
"name": "Ch.72: The \"Jewels\" Generation",
"url": "http://bato.to/read/_/247956/shokugeki-no-soma_ch72_by_casanova"
},
{
"name": "Ch.71: \"Courage\" and \"Resolution\"",
"url": "http://bato.to/read/_/246285/shokugeki-no-soma_ch71_by_casanova"
},
{
"name": "Ch.70: Polar Opposites",
"url": "http://bato.to/read/_/245239/shokugeki-no-soma_ch70_by_casanova"
},
{
"name": "Ch.69: Kitchen's Dictator",
"url": "http://bato.to/read/_/243801/shokugeki-no-soma_ch69_by_casanova"
},
{
"name": "Ch.68: The \"Port City\" Match",
"url": "http://bato.to/read/_/241781/shokugeki-no-soma_ch68_by_casanova"
},
{
"name": "Ch.67: Blending Light And Shadow",
"url": "http://bato.to/read/_/239555/shokugeki-no-soma_ch67_by_casanova"
},
{
"name": "Ch.66: What Fills That Box",
"url": "http://bato.to/read/_/237502/shokugeki-no-soma_ch66_by_casanova"
},
{
"name": "Ch.65: The Theory of Bento Evolution",
"url": "http://bato.to/read/_/236405/shokugeki-no-soma_ch65_by_casanova"
},
{
"name": "Ch.64: On the Edge",
"url": "http://bato.to/read/_/234698/shokugeki-no-soma_ch64_by_casanova"
},
{
"name": "Ch.63: Plan",
"url": "http://bato.to/read/_/232844/shokugeki-no-soma_ch63_by_casanova"
},
{
"name": "Ch.62: A Meeting of Strong People",
"url": "http://bato.to/read/_/230838/shokugeki-no-soma_ch62_by_casanova"
},
{
"name": "Ch.61: Putting Your Heart Into It",
"url": "http://bato.to/read/_/228801/shokugeki-no-soma_ch61_by_casanova"
},
{
"name": "Ch.60: The Warriors' Banquet",
"url": "http://bato.to/read/_/227472/shokugeki-no-soma_ch60_by_casanova"
},
{
"name": "Ch.59: Their Respective Weapons",
"url": "http://bato.to/read/_/225853/shokugeki-no-soma_ch59_by_casanova"
},
{
"name": "Ch.58: Holy Aroma",
"url": "http://bato.to/read/_/224397/shokugeki-no-soma_ch58_by_casanova"
},
{
"name": "Ch.57: Her Memories",
"url": "http://bato.to/read/_/222875/shokugeki-no-soma_ch57_by_casanova"
},
{
"name": "Ch.56: Tuscan Moon",
"url": "http://bato.to/read/_/222555/shokugeki-no-soma_ch56_by_casanova"
},
{
"name": "Ch.55: A Hole Drilled with Knowledge",
"url": "http://bato.to/read/_/221797/shokugeki-no-soma_ch55_by_casanova"
},
{
"name": "Ch.54: A Recital of Blossoming Individuals",
"url": "http://bato.to/read/_/219111/shokugeki-no-soma_ch54_by_casanova"
},
{
"name": "Ch.53: The Man Who Came From A Cold Country",
"url": "http://bato.to/read/_/215047/shokugeki-no-soma_ch53_by_casanova"
},
{
"name": "Ch.52.5: Natsuyumi no Erina",
"url": "http://bato.to/read/_/213824/shokugeki-no-soma_ch52.5_by_casanova"
},
{
"name": "Ch.52: Those Who Serve the Best",
"url": "http://bato.to/read/_/211649/shokugeki-no-soma_ch52_by_casanova"
},
{
"name": "Ch.51: The Witch's Dining Table",
"url": "http://bato.to/read/_/211213/shokugeki-no-soma_ch51_by_casanova"
},
{
"name": "Ch.50: Those Beyond Ordinary",
"url": "http://bato.to/read/_/210069/shokugeki-no-soma_ch50_by_casanova"
},
{
"name": "Ch.49: Wolf Pack",
"url": "http://bato.to/read/_/208381/shokugeki-no-soma_ch49_by_casanova"
},
{
"name": "Ch.48: The Known Unknown",
"url": "http://bato.to/read/_/207413/shokugeki-no-soma_ch48_by_casanova"
},
{
"name": "Ch.47: Battle Memories",
"url": "http://bato.to/read/_/205556/shokugeki-no-soma_ch47_by_casanova"
},
{
"name": "Ch.46: The Dragon Lies Down and then Ascends to the Sky",
"url": "http://bato.to/read/_/203799/shokugeki-no-soma_ch46_by_casanova"
},
{
"name": "Ch.45: The Accompanist of Aromas and Stimuli",
"url": "http://bato.to/read/_/202784/shokugeki-no-soma_ch45_by_casanova"
},
{
"name": "Ch.44: An Unexpected Straight",
"url": "http://bato.to/read/_/201764/shokugeki-no-soma_ch44_by_casanova"
},
{
"name": "Ch.43: The Cook Who Has Travelled Thousands of Miles",
"url": "http://bato.to/read/_/200010/shokugeki-no-soma_ch43_by_casanova"
},
{
"name": "Ch.42: Wake Up Kiss",
"url": "http://bato.to/read/_/199003/shokugeki-no-soma_ch42_by_casanova"
},
{
"name": "Ch.41: The Man Who was Called an \"Asura\"",
"url": "http://bato.to/read/_/196809/shokugeki-no-soma_ch41_by_casanova"
},
{
"name": "Ch.40: Return",
"url": "http://bato.to/read/_/195573/shokugeki-no-soma_ch40_by_casanova"
},
{
"name": "Ch.39: The Chosen Ones",
"url": "http://bato.to/read/_/192744/shokugeki-no-soma_ch39_by_casanova"
},
{
"name": "Ch.38: Sensual Karaage (4)",
"url": "http://bato.to/read/_/192097/shokugeki-no-soma_ch38_by_casanova"
},
{
"name": "Ch.37: Sensual Karaage (3)",
"url": "http://bato.to/read/_/190617/shokugeki-no-soma_ch37_by_casanova"
},
{
"name": "Ch.36v2: Sensual Kaarage (2)",
"url": "http://bato.to/read/_/189007/shokugeki-no-soma_ch36v2_by_casanova"
},
{
"name": "Ch.35.5: Mid-Summer's Nikumi-san",
"url": "http://bato.to/read/_/188961/shokugeki-no-soma_ch35.5_by_casanova"
},
{
"name": "Ch.35: Sensual Karaage (1)",
"url": "http://bato.to/read/_/186597/shokugeki-no-soma_ch35_by_casanova"
},
{
"name": "Ch.34: The Fate Surrounding Tootsuki",
"url": "http://bato.to/read/_/185446/shokugeki-no-soma_ch34_by_casanova"
},
{
"name": "Ch.33: To the People that will Eventually Fight",
"url": "http://bato.to/read/_/184581/shokugeki-no-soma_ch33_by_casanova"
},
{
"name": "Ch.32: Dancing Cook",
"url": "http://bato.to/read/_/183357/shokugeki-no-soma_ch32_by_casanova"
},
{
"name": "Ch.31: Metamorphose",
"url": "http://bato.to/read/_/182129/shokugeki-no-soma_ch31_by_casanova"
},
{
"name": "Ch.30: A Set Trap",
"url": "http://bato.to/read/_/180945/shokugeki-no-soma_ch30_by_casanova"
},
{
"name": "Ch.29: The Eggs Before Dawn",
"url": "http://bato.to/read/_/179806/shokugeki-no-soma_ch29_by_casanova"
},
{
"name": "Ch.28: Everyone Must Not Fall Asleep",
"url": "http://bato.to/read/_/178134/shokugeki-no-soma_ch28_by_casanova"
},
{
"name": "Ch.27: The Bitterness of Defeat",
"url": "http://bato.to/read/_/177135/shokugeki-no-soma_ch27_by_casanova"
},
{
"name": "Ch.26: Memories of a Dish",
"url": "http://bato.to/read/_/176297/shokugeki-no-soma_ch26_by_casanova"
},
{
"name": "Ch.25: Those Remnants",
"url": "http://bato.to/read/_/174116/shokugeki-no-soma_ch25_by_casanova"
},
{
"name": "Ch.24: The Magician that Came from the East",
"url": "http://bato.to/read/_/173475/shokugeki-no-soma_ch24_by_casanova"
},
{
"name": "Ch.23: Proof of Existence",
"url": "http://bato.to/read/_/171105/shokugeki-no-soma_ch23_by_casanova"
},
{
"name": "Ch.22: Alumni",
"url": "http://bato.to/read/_/170355/shokugeki-no-soma_ch22_by_casanova"
},
{
"name": "Ch.21: The Supreme Recette",
"url": "http://bato.to/read/_/167841/shokugeki-no-soma_ch21_by_casanova"
},
{
"name": "Ch.20: Verdict",
"url": "http://bato.to/read/_/166990/shokugeki-no-soma_ch20_by_casanova"
},
{
"name": "Ch.19: Sparkling Soul",
"url": "http://bato.to/read/_/165823/shokugeki-no-soma_ch19_by_casanova"
},
{
"name": "Ch.18: The Seed of Ideas",
"url": "http://bato.to/read/_/165444/shokugeki-no-soma_ch18_by_casanova"
},
{
"name": "Ch.17: The Coating that Colors the Mountain",
"url": "http://bato.to/read/_/164819/shokugeki-no-soma_ch17_by_casanova"
},
{
"name": "Vol.3 Ch.16.5 Read Online",
"url": "http://bato.to/read/_/213776/shokugeki-no-soma_v3_ch16.5_by_casanova"
},
{
"name": "Ch.16: Concerto of Ideas and Creation",
"url": "http://bato.to/read/_/162138/shokugeki-no-soma_ch16_by_casanova"
},
{
"name": "Ch.15: Friction and Elite",
"url": "http://bato.to/read/_/161276/shokugeki-no-soma_ch15_by_casanova"
},
{
"name": "Vol.2 Ch.14.5: Volume 2 Extra and Recipes",
"url": "http://bato.to/read/_/209555/shokugeki-no-soma_v2_ch14.5_by_casanova"
},
{
"name": "Ch.14: Megumi's Garden",
"url": "http://bato.to/read/_/160292/shokugeki-no-soma_ch14_by_casanova"
},
{
"name": "Ch.13: Quiet Don, An Eloquent Don",
"url": "http://bato.to/read/_/159427/shokugeki-no-soma_ch13_by_casanova"
},
{
"name": "Ch.12: Enter the Battlefield",
"url": "http://bato.to/read/_/158233/shokugeki-no-soma_ch12_by_casanova"
},
{
"name": "Ch.11: The Night Before the Showdown",
"url": "http://bato.to/read/_/157118/shokugeki-no-soma_ch11_by_casanova"
},
{
"name": "Ch.10: The Meat Invader",
"url": "http://bato.to/read/_/155824/shokugeki-no-soma_ch10_by_casanova"
},
{
"name": "Ch.9: The Ice Queen and the Spring Storm",
"url": "http://bato.to/read/_/154910/shokugeki-no-soma_ch9_by_casanova"
},
{
"name": "Ch.8: A Dish that Calls for Spring",
"url": "http://bato.to/read/_/153806/shokugeki-no-soma_ch8_by_casanova"
},
{
"name": "Ch.7: Lawless Area",
"url": "http://bato.to/read/_/153114/shokugeki-no-soma_ch7_by_casanova"
},
{
"name": "Ch.6: Maria of the Polar Star",
"url": "http://bato.to/read/_/149043/shokugeki-no-soma_ch6_by_casanova"
},
{
"name": "Ch.5: The Chef That Doesn't Smile",
"url": "http://bato.to/read/_/147981/shokugeki-no-soma_ch5_by_casanova"
},
{
"name": "Ch.4.5: Kurase-san's Diary + Recipe 1",
"url": "http://bato.to/read/_/199090/shokugeki-no-soma_ch4.5_by_casanova"
},
{
"name": "Ch.4: The Demon King Talks About \"Gems\"",
"url": "http://bato.to/read/_/146795/shokugeki-no-soma_ch4_by_casanova"
},
{
"name": "Ch.3: \"Transforming Furikake\"",
"url": "http://bato.to/read/_/146229/shokugeki-no-soma_ch3_by_casanova"
},
{
"name": "Ch.2: God's Tounge",
"url": "http://bato.to/read/_/144856/shokugeki-no-soma_ch2_by_casanova"
},
{
"name": "Ch.1: The Endless Wilderness",
"url": "http://bato.to/read/_/143718/shokugeki-no-soma_ch1_by_casanova"
},
{
"name": "Ch.0: [Oneshot]",
"url": "http://bato.to/read/_/182841/shokugeki-no-soma_by_utopia"
}
]
for i, ch in enumerate(chapters):
# eden
# url = "/".join([site.netlocs[2], ch.get('url')])
# html = site.get_html(url)
# site.chapter_info(html)
v = utils.parse_number(ch.get('name', ''), "Vol")
v = 0 if v is None else v
c = utils.parse_number(ch.get('name', ''), "Ch")
c = 0 if c is None else c
try:
chapter = Chapter(
ch.get('name', '').split(':')[-1],
c,
v
)
chapter.id = utils.guid()
chapter.slug = " ".join([manga.slug, ch.get('name', '').split(':')[0]])
chapter.manga = manga
# s = 1000v + c
chapter.sortorder = (1000*float(v)) + float(c)
chapter.save_it()
print(chapter.id)
ini_path = path.join(
path.dirname(
path.dirname(__file__)
),
'/'.join(['rak', 'manga', chapter.id])
)
print(ini_path)
except IntegrityError as IE:
print(IE.message)
# if 'violates unique constraint' in IE.message:
# c += float(c / 100)
# chapter = Chapter(
# ch.get('name', '').split(':')[-1],
# manga.slug,
# c,
# v
# )
# chapter.manga = manga
# # s = 1000v + c
# print("{0}: {1}".format(v, c))
# chapter.sortorder = (1000*float(v)) + float(c)
# chapter.save_it()
trans.commit()
except AttributeError as e:
print(e.message)
except KeyError as e:
print(e.message)
except ValueError as e:
print(e.message)
| lgpl-3.0 |
idlead/scikit-learn | examples/linear_model/plot_sgd_comparison.py | 112 | 1819 | """
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import LogisticRegression
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
("SAG", LogisticRegression(solver='sag', tol=1e-1, C=1.e4 / X.shape[0]))
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
print("training %s" % name)
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
| bsd-3-clause |
jianran/spark | examples/src/main/python/ml/vector_assembler_example.py | 123 | 1649 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# $example on$
from pyspark.ml.linalg import Vectors
from pyspark.ml.feature import VectorAssembler
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("VectorAssemblerExample")\
.getOrCreate()
# $example on$
dataset = spark.createDataFrame(
[(0, 18, 1.0, Vectors.dense([0.0, 10.0, 0.5]), 1.0)],
["id", "hour", "mobile", "userFeatures", "clicked"])
assembler = VectorAssembler(
inputCols=["hour", "mobile", "userFeatures"],
outputCol="features")
output = assembler.transform(dataset)
print("Assembled columns 'hour', 'mobile', 'userFeatures' to vector column 'features'")
output.select("features", "clicked").show(truncate=False)
# $example off$
spark.stop()
| apache-2.0 |
lindsayad/sympy | sympy/matrices/tests/test_densearith.py | 80 | 1844 | from sympy.matrices.densetools import eye
from sympy.matrices.densearith import add, sub, mulmatmat, mulmatscaler
from sympy import ZZ
def test_add():
a = [[ZZ(3), ZZ(7), ZZ(4)], [ZZ(2), ZZ(4), ZZ(5)], [ZZ(6), ZZ(2), ZZ(3)]]
b = [[ZZ(5), ZZ(4), ZZ(9)], [ZZ(3), ZZ(7), ZZ(1)], [ZZ(12), ZZ(13), ZZ(14)]]
c = [[ZZ(12)], [ZZ(17)], [ZZ(21)]]
d = [[ZZ(3)], [ZZ(4)], [ZZ(5)]]
e = [[ZZ(12), ZZ(78)], [ZZ(56), ZZ(79)]]
f = [[ZZ.zero, ZZ.zero], [ZZ.zero, ZZ.zero]]
assert add(a, b, ZZ) == [[ZZ(8), ZZ(11), ZZ(13)], [ZZ(5), ZZ(11), ZZ(6)], [ZZ(18), ZZ(15), ZZ(17)]]
assert add(c, d, ZZ) == [[ZZ(15)], [ZZ(21)], [ZZ(26)]]
assert add(e, f, ZZ) == e
def test_sub():
a = [[ZZ(3), ZZ(7), ZZ(4)], [ZZ(2), ZZ(4), ZZ(5)], [ZZ(6), ZZ(2), ZZ(3)]]
b = [[ZZ(5), ZZ(4), ZZ(9)], [ZZ(3), ZZ(7), ZZ(1)], [ZZ(12), ZZ(13), ZZ(14)]]
c = [[ZZ(12)], [ZZ(17)], [ZZ(21)]]
d = [[ZZ(3)], [ZZ(4)], [ZZ(5)]]
e = [[ZZ(12), ZZ(78)], [ZZ(56), ZZ(79)]]
f = [[ZZ.zero, ZZ.zero], [ZZ.zero, ZZ.zero]]
assert sub(a, b, ZZ) == [[ZZ(-2), ZZ(3), ZZ(-5)], [ZZ(-1), ZZ(-3), ZZ(4)], [ZZ(-6), ZZ(-11), ZZ(-11)]]
assert sub(c, d, ZZ) == [[ZZ(9)], [ZZ(13)], [ZZ(16)]]
assert sub(e, f, ZZ) == e
def test_mulmatmat():
a = [[ZZ(3), ZZ(4)], [ZZ(5), ZZ(6)]]
b = [[ZZ(1), ZZ(2)], [ZZ(7), ZZ(8)]]
c = eye(2, ZZ)
d = [[ZZ(6)], [ZZ(7)]]
assert mulmatmat(a, b, ZZ) == [[ZZ(31), ZZ(38)], [ZZ(47), ZZ(58)]]
assert mulmatmat(b, d, ZZ) == [[ZZ(20)], [ZZ(98)]]
def test_mulmatscaler():
a = eye(3, ZZ)
b = [[ZZ(3), ZZ(7), ZZ(4)], [ZZ(2), ZZ(4), ZZ(5)], [ZZ(6), ZZ(2), ZZ(3)]]
assert mulmatscaler(a, ZZ(4), ZZ) == [[ZZ(4), ZZ(0), ZZ(0)], [ZZ(0), ZZ(4), ZZ(0)], [ZZ(0), ZZ(0), ZZ(4)]]
assert mulmatscaler(b, ZZ(1), ZZ) == [[ZZ(3), ZZ(7), ZZ(4)], [ZZ(2), ZZ(4), ZZ(5)], [ZZ(6), ZZ(2), ZZ(3)]]
| bsd-3-clause |
hs634/algorithms | python/company/dropbox.py | 1 | 1392 | __author__ = 'hs634'
class Crawler(Thread):
def __init__(self, q, seen, index, lock, wlock, worker_pool_size):
self.queue = q
self.seen = seen
self.index = index
self.worker_pool_size = worker_pool_size
self.qandslock = lock
self.worker_lock = wlock
def crawl(self, start_url, index):
cur_page = fetch_page(start_url)
cur_links = fetch_links(cur_page)
with self.qandslock:
for link in cur_links:
self.queue.enqueue()
with self.worker_lock:
self.status = "Free"
self.worker_available.notify()
class Controller():
def __init__(self, index):
self.queue = Queue()
self.seen = {}
self.qandslock = Lock()
self.worker_lock = Lock()
self.url_available = Condition(self.qandslock)
self.worker_available = Condition(self.worker_lock)
self.index = index
self.worker_pool = [Crawler() for __ in range(worker_pool_size)]
def run(self, start_url):
worker = get_next_worker()
with self.qandslock:
while self.queue.isEmpty():
self.url_available.wait()
next_url = self.queue.dequeue()
with self.worker_lock:
while worker_unavailabe():
self.worker_available.wait()
worker.crawl(start_url)
| mit |
anandpdoshi/erpnext | erpnext/config/projects.py | 2 | 1504 | from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"label": _("Projects"),
"icon": "icon-star",
"items": [
{
"type": "doctype",
"name": "Project",
"description": _("Project master."),
},
{
"type": "doctype",
"name": "Task",
"description": _("Project activity / task."),
},
{
"type": "report",
"route": "Gantt/Task",
"doctype": "Task",
"name": "Gantt Chart",
"description": _("Gantt chart of all tasks.")
},
]
},
{
"label": _("Time Tracking"),
"items": [
{
"type": "doctype",
"name": "Timesheet",
"description": _("Timesheet for tasks."),
},
{
"type": "doctype",
"name": "Activity Type",
"description": _("Types of activities for Time Logs"),
},
{
"type": "doctype",
"name": "Activity Cost",
"description": _("Cost of various activities"),
},
]
},
{
"label": _("Reports"),
"icon": "icon-list",
"items": [
{
"type": "report",
"is_query_report": True,
"name": "Daily Timesheet Summary",
"doctype": "Timesheet"
},
{
"type": "report",
"is_query_report": True,
"name": "Project wise Stock Tracking",
"doctype": "Project"
},
]
},
{
"label": _("Help"),
"icon": "icon-facetime-video",
"items": [
{
"type": "help",
"label": _("Managing Projects"),
"youtube_id": "egxIGwtoKI4"
},
]
},
]
| agpl-3.0 |
junbochen/pylearn2 | pylearn2/scripts/papers/jia_huang_wkshp_11/evaluate.py | 44 | 3208 | from __future__ import print_function
from optparse import OptionParser
import warnings
try:
from sklearn.metrics import classification_report
except ImportError:
classification_report = None
warnings.warn("couldn't find sklearn.metrics.classification_report")
try:
from sklearn.metrics import confusion_matrix
except ImportError:
confusion_matrix = None
warnings.warn("couldn't find sklearn.metrics.metrics.confusion_matrix")
from galatea.s3c.feature_loading import get_features
from pylearn2.utils import serial
from pylearn2.datasets.cifar10 import CIFAR10
from pylearn2.datasets.cifar100 import CIFAR100
import numpy as np
def test(model, X, y):
print("Evaluating svm")
y_pred = model.predict(X)
#try:
if True:
acc = (y == y_pred).mean()
print("Accuracy ",acc)
"""except:
print("something went wrong")
print('y:')
print(y)
print('y_pred:')
print(y_pred)
print('extra info')
print(type(y))
print(type(y_pred))
print(y.dtype)
print(y_pred.dtype)
print(y.shape)
print(y_pred.shape)
raise
"""
#
def get_test_labels(cifar10, cifar100, stl10):
assert cifar10 + cifar100 + stl10 == 1
if stl10:
print('loading entire stl-10 test set just to get the labels')
stl10 = serial.load("${PYLEARN2_DATA_PATH}/stl10/stl10_32x32/test.pkl")
return stl10.y
if cifar10:
print('loading entire cifar10 test set just to get the labels')
cifar10 = CIFAR10(which_set = 'test')
return np.asarray(cifar10.y)
if cifar100:
print('loading entire cifar100 test set just to get the fine labels')
cifar100 = CIFAR100(which_set = 'test')
return np.asarray(cifar100.y_fine)
assert False
def main(model_path,
test_path,
dataset,
**kwargs):
model = serial.load(model_path)
cifar100 = dataset == 'cifar100'
cifar10 = dataset == 'cifar10'
stl10 = dataset == 'stl10'
assert cifar10 + cifar100 + stl10 == 1
y = get_test_labels(cifar10, cifar100, stl10)
X = get_features(test_path, False, False)
if stl10:
num_examples = 8000
if cifar10 or cifar100:
num_examples = 10000
if not X.shape[0] == num_examples:
raise AssertionError('Expected %d examples but got %d' % (num_examples, X.shape[0]))
assert y.shape[0] == num_examples
test(model,X,y)
if __name__ == '__main__':
"""
Useful for quick tests.
Usage: python train_bilinear.py
"""
parser = OptionParser()
parser.add_option("-m", "--model",
action="store", type="string", dest="model_path")
parser.add_option("-t", "--test",
action="store", type="string", dest="test")
parser.add_option("-o", action="store", dest="output", default = None, help="path to write the report to")
parser.add_option('--dataset', type='string', dest = 'dataset', action='store', default = None)
#(options, args) = parser.parse_args()
#assert options.output
main(model_path='final_model.pkl',
test_path='test_features.npy',
dataset = 'cifar100',
)
| bsd-3-clause |
laurentgo/pants | tests/python/pants_test/net/http/test_fetcher.py | 14 | 6703 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from contextlib import closing
import mox
import pytest
import requests
from six import StringIO
from pants.net.http.fetcher import Fetcher
from pants.util.contextutil import temporary_file
class FetcherTest(mox.MoxTestBase):
def setUp(self):
super(FetcherTest, self).setUp()
self.requests = self.mox.CreateMockAnything()
self.response = self.mox.CreateMock(requests.Response)
self.fetcher = Fetcher(requests_api=self.requests)
self.listener = self.mox.CreateMock(Fetcher.Listener)
def expect_get(self, url, chunk_size_bytes, timeout_secs, listener=True):
self.requests.get(url, stream=True, timeout=timeout_secs).AndReturn(self.response)
self.response.status_code = 200
self.response.headers = {'content-length': '11'}
if listener:
self.listener.status(200, content_length=11)
chunks = ['0123456789', 'a']
self.response.iter_content(chunk_size=chunk_size_bytes).AndReturn(chunks)
return chunks
def test_get(self):
for chunk in self.expect_get('http://bar', chunk_size_bytes=1024, timeout_secs=60):
self.listener.recv_chunk(chunk)
self.listener.finished()
self.response.close()
self.mox.ReplayAll()
self.fetcher.fetch('http://bar',
self.listener,
chunk_size_bytes=1024,
timeout_secs=60)
def test_checksum_listener(self):
digest = self.mox.CreateMockAnything()
for chunk in self.expect_get('http://baz', chunk_size_bytes=1, timeout_secs=37):
self.listener.recv_chunk(chunk)
digest.update(chunk)
self.listener.finished()
digest.hexdigest().AndReturn('42')
self.response.close()
self.mox.ReplayAll()
checksum_listener = Fetcher.ChecksumListener(digest=digest)
self.fetcher.fetch('http://baz',
checksum_listener.wrap(self.listener),
chunk_size_bytes=1,
timeout_secs=37)
self.assertEqual('42', checksum_listener.checksum)
def test_download_listener(self):
downloaded = ''
for chunk in self.expect_get('http://foo', chunk_size_bytes=1048576, timeout_secs=3600):
self.listener.recv_chunk(chunk)
downloaded += chunk
self.listener.finished()
self.response.close()
self.mox.ReplayAll()
with closing(StringIO()) as fp:
self.fetcher.fetch('http://foo',
Fetcher.DownloadListener(fp).wrap(self.listener),
chunk_size_bytes=1024 * 1024,
timeout_secs=60 * 60)
self.assertEqual(downloaded, fp.getvalue())
def test_size_mismatch(self):
self.requests.get('http://foo', stream=True, timeout=60).AndReturn(self.response)
self.response.status_code = 200
self.response.headers = {'content-length': '11'}
self.listener.status(200, content_length=11)
self.response.iter_content(chunk_size=1024).AndReturn(['a', 'b'])
self.listener.recv_chunk('a')
self.listener.recv_chunk('b')
self.response.close()
self.mox.ReplayAll()
with pytest.raises(self.fetcher.Error):
self.fetcher.fetch('http://foo',
self.listener,
chunk_size_bytes=1024,
timeout_secs=60)
def test_get_error_transient(self):
self.requests.get('http://foo', stream=True, timeout=60).AndRaise(requests.ConnectionError)
self.mox.ReplayAll()
with pytest.raises(self.fetcher.TransientError):
self.fetcher.fetch('http://foo',
self.listener,
chunk_size_bytes=1024,
timeout_secs=60)
def test_get_error_permanent(self):
self.requests.get('http://foo', stream=True, timeout=60).AndRaise(requests.TooManyRedirects)
self.mox.ReplayAll()
with pytest.raises(self.fetcher.PermanentError) as e:
self.fetcher.fetch('http://foo',
self.listener,
chunk_size_bytes=1024,
timeout_secs=60)
self.assertTrue(e.value.response_code is None)
def test_http_error(self):
self.requests.get('http://foo', stream=True, timeout=60).AndReturn(self.response)
self.response.status_code = 404
self.listener.status(404)
self.response.close()
self.mox.ReplayAll()
with pytest.raises(self.fetcher.PermanentError) as e:
self.fetcher.fetch('http://foo',
self.listener,
chunk_size_bytes=1024,
timeout_secs=60)
self.assertEqual(404, e.value.response_code)
def test_iter_content_error(self):
self.requests.get('http://foo', stream=True, timeout=60).AndReturn(self.response)
self.response.status_code = 200
self.response.headers = {}
self.listener.status(200, content_length=None)
self.response.iter_content(chunk_size=1024).AndRaise(requests.Timeout)
self.response.close()
self.mox.ReplayAll()
with pytest.raises(self.fetcher.TransientError):
self.fetcher.fetch('http://foo',
self.listener,
chunk_size_bytes=1024,
timeout_secs=60)
def expect_download(self, path_or_fd=None):
downloaded = ''
for chunk in self.expect_get('http://1', chunk_size_bytes=13, timeout_secs=13, listener=False):
downloaded += chunk
self.response.close()
self.mox.ReplayAll()
path = self.fetcher.download('http://1',
path_or_fd=path_or_fd,
chunk_size_bytes=13,
timeout_secs=13)
return downloaded, path
def test_download(self):
downloaded, path = self.expect_download()
try:
with open(path) as fp:
self.assertEqual(downloaded, fp.read())
finally:
os.unlink(path)
def test_download_fd(self):
with temporary_file() as fd:
downloaded, path = self.expect_download(path_or_fd=fd)
self.assertEqual(path, fd.name)
fd.close()
with open(path) as fp:
self.assertEqual(downloaded, fp.read())
def test_download_path(self):
with temporary_file() as fd:
fd.close()
downloaded, path = self.expect_download(path_or_fd=fd.name)
self.assertEqual(path, fd.name)
with open(path) as fp:
self.assertEqual(downloaded, fp.read())
| apache-2.0 |
vefimova/rally | rally/plugins/openstack/context/quotas/nova_quotas.py | 15 | 2762 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common import log as logging
LOG = logging.getLogger(__name__)
class NovaQuotas(object):
"""Management of Nova quotas."""
QUOTAS_SCHEMA = {
"type": "object",
"additionalProperties": False,
"properties": {
"instances": {
"type": "integer",
"minimum": -1
},
"cores": {
"type": "integer",
"minimum": -1
},
"ram": {
"type": "integer",
"minimum": -1
},
"floating_ips": {
"type": "integer",
"minimum": -1
},
"fixed_ips": {
"type": "integer",
"minimum": -1
},
"metadata_items": {
"type": "integer",
"minimum": -1
},
"injected_files": {
"type": "integer",
"minimum": -1
},
"injected_file_content_bytes": {
"type": "integer",
"minimum": -1
},
"injected_file_path_bytes": {
"type": "integer",
"minimum": -1
},
"key_pairs": {
"type": "integer",
"minimum": -1
},
"security_groups": {
"type": "integer",
"minimum": -1
},
"security_group_rules": {
"type": "integer",
"minimum": -1
},
"server_groups": {
"type": "integer",
"minimum": -1
},
"server_group_members": {
"type": "integer",
"minimum": -1
}
}
}
def __init__(self, clients):
self.clients = clients
def update(self, tenant_id, **kwargs):
self.clients.nova().quotas.update(tenant_id, **kwargs)
def delete(self, tenant_id):
# Reset quotas to defaults and tag database objects as deleted
self.clients.nova().quotas.delete(tenant_id)
| apache-2.0 |
CenterForOpenScience/modular-file-renderer | mfr/extensions/tabular/libs/xlrd_tools.py | 2 | 1632 | import xlrd
from collections import OrderedDict
from ..exceptions import TableTooBigError
from ..utilities import header_population
from mfr.extensions.tabular.compat import range, basestring
def xlsx_xlrd(fp):
"""Read and convert a xlsx file to JSON format using the xlrd library
:param fp: File pointer object
:return: tuple of table headers and data
"""
max_size = 10000
wb = xlrd.open_workbook(fp.name)
sheets = OrderedDict()
for sheet in wb.sheets():
if sheet.ncols > max_size or sheet.nrows > max_size:
raise TableTooBigError('Table is too large to render.', '.xlsx',
nbr_cols=sheet.ncols, nbr_rows=sheet.nrows)
if sheet.ncols < 1 or sheet.nrows < 1:
sheets[sheet.name] = ([], [])
continue
fields = sheet.row_values(0) if sheet.nrows else []
fields = [
str(value)
if not isinstance(value, basestring) and value is not None
else value or 'Unnamed: {0}'.format(index + 1)
for index, value in enumerate(fields)
]
data = []
for i in range(1, sheet.nrows):
row = []
for cell in sheet.row(i):
if cell.ctype == xlrd.XL_CELL_DATE:
value = xlrd.xldate.xldate_as_datetime(cell.value, wb.datemode).isoformat()
else:
value = cell.value
row.append(value)
data.append(dict(zip(fields, row)))
header = header_population(fields)
sheets[sheet.name] = (header, data)
return sheets
| apache-2.0 |
sambyers/o365_fmc | .venv/lib/python3.6/site-packages/requests/packages/chardet/euckrfreq.py | 3121 | 45978 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Sampling from about 20M text materials include literature and computer technology
# 128 --> 0.79
# 256 --> 0.92
# 512 --> 0.986
# 1024 --> 0.99944
# 2048 --> 0.99999
#
# Idea Distribution Ratio = 0.98653 / (1-0.98653) = 73.24
# Random Distribution Ration = 512 / (2350-512) = 0.279.
#
# Typical Distribution Ratio
EUCKR_TYPICAL_DISTRIBUTION_RATIO = 6.0
EUCKR_TABLE_SIZE = 2352
# Char to FreqOrder table ,
EUCKRCharToFreqOrder = ( \
13, 130, 120,1396, 481,1719,1720, 328, 609, 212,1721, 707, 400, 299,1722, 87,
1397,1723, 104, 536,1117,1203,1724,1267, 685,1268, 508,1725,1726,1727,1728,1398,
1399,1729,1730,1731, 141, 621, 326,1057, 368,1732, 267, 488, 20,1733,1269,1734,
945,1400,1735, 47, 904,1270,1736,1737, 773, 248,1738, 409, 313, 786, 429,1739,
116, 987, 813,1401, 683, 75,1204, 145,1740,1741,1742,1743, 16, 847, 667, 622,
708,1744,1745,1746, 966, 787, 304, 129,1747, 60, 820, 123, 676,1748,1749,1750,
1751, 617,1752, 626,1753,1754,1755,1756, 653,1757,1758,1759,1760,1761,1762, 856,
344,1763,1764,1765,1766, 89, 401, 418, 806, 905, 848,1767,1768,1769, 946,1205,
709,1770,1118,1771, 241,1772,1773,1774,1271,1775, 569,1776, 999,1777,1778,1779,
1780, 337, 751,1058, 28, 628, 254,1781, 177, 906, 270, 349, 891,1079,1782, 19,
1783, 379,1784, 315,1785, 629, 754,1402, 559,1786, 636, 203,1206,1787, 710, 567,
1788, 935, 814,1789,1790,1207, 766, 528,1791,1792,1208,1793,1794,1795,1796,1797,
1403,1798,1799, 533,1059,1404,1405,1156,1406, 936, 884,1080,1800, 351,1801,1802,
1803,1804,1805, 801,1806,1807,1808,1119,1809,1157, 714, 474,1407,1810, 298, 899,
885,1811,1120, 802,1158,1812, 892,1813,1814,1408, 659,1815,1816,1121,1817,1818,
1819,1820,1821,1822, 319,1823, 594, 545,1824, 815, 937,1209,1825,1826, 573,1409,
1022,1827,1210,1828,1829,1830,1831,1832,1833, 556, 722, 807,1122,1060,1834, 697,
1835, 900, 557, 715,1836,1410, 540,1411, 752,1159, 294, 597,1211, 976, 803, 770,
1412,1837,1838, 39, 794,1413, 358,1839, 371, 925,1840, 453, 661, 788, 531, 723,
544,1023,1081, 869, 91,1841, 392, 430, 790, 602,1414, 677,1082, 457,1415,1416,
1842,1843, 475, 327,1024,1417, 795, 121,1844, 733, 403,1418,1845,1846,1847, 300,
119, 711,1212, 627,1848,1272, 207,1849,1850, 796,1213, 382,1851, 519,1852,1083,
893,1853,1854,1855, 367, 809, 487, 671,1856, 663,1857,1858, 956, 471, 306, 857,
1859,1860,1160,1084,1861,1862,1863,1864,1865,1061,1866,1867,1868,1869,1870,1871,
282, 96, 574,1872, 502,1085,1873,1214,1874, 907,1875,1876, 827, 977,1419,1420,
1421, 268,1877,1422,1878,1879,1880, 308,1881, 2, 537,1882,1883,1215,1884,1885,
127, 791,1886,1273,1423,1887, 34, 336, 404, 643,1888, 571, 654, 894, 840,1889,
0, 886,1274, 122, 575, 260, 908, 938,1890,1275, 410, 316,1891,1892, 100,1893,
1894,1123, 48,1161,1124,1025,1895, 633, 901,1276,1896,1897, 115, 816,1898, 317,
1899, 694,1900, 909, 734,1424, 572, 866,1425, 691, 85, 524,1010, 543, 394, 841,
1901,1902,1903,1026,1904,1905,1906,1907,1908,1909, 30, 451, 651, 988, 310,1910,
1911,1426, 810,1216, 93,1912,1913,1277,1217,1914, 858, 759, 45, 58, 181, 610,
269,1915,1916, 131,1062, 551, 443,1000, 821,1427, 957, 895,1086,1917,1918, 375,
1919, 359,1920, 687,1921, 822,1922, 293,1923,1924, 40, 662, 118, 692, 29, 939,
887, 640, 482, 174,1925, 69,1162, 728,1428, 910,1926,1278,1218,1279, 386, 870,
217, 854,1163, 823,1927,1928,1929,1930, 834,1931, 78,1932, 859,1933,1063,1934,
1935,1936,1937, 438,1164, 208, 595,1938,1939,1940,1941,1219,1125,1942, 280, 888,
1429,1430,1220,1431,1943,1944,1945,1946,1947,1280, 150, 510,1432,1948,1949,1950,
1951,1952,1953,1954,1011,1087,1955,1433,1043,1956, 881,1957, 614, 958,1064,1065,
1221,1958, 638,1001, 860, 967, 896,1434, 989, 492, 553,1281,1165,1959,1282,1002,
1283,1222,1960,1961,1962,1963, 36, 383, 228, 753, 247, 454,1964, 876, 678,1965,
1966,1284, 126, 464, 490, 835, 136, 672, 529, 940,1088,1435, 473,1967,1968, 467,
50, 390, 227, 587, 279, 378, 598, 792, 968, 240, 151, 160, 849, 882,1126,1285,
639,1044, 133, 140, 288, 360, 811, 563,1027, 561, 142, 523,1969,1970,1971, 7,
103, 296, 439, 407, 506, 634, 990,1972,1973,1974,1975, 645,1976,1977,1978,1979,
1980,1981, 236,1982,1436,1983,1984,1089, 192, 828, 618, 518,1166, 333,1127,1985,
818,1223,1986,1987,1988,1989,1990,1991,1992,1993, 342,1128,1286, 746, 842,1994,
1995, 560, 223,1287, 98, 8, 189, 650, 978,1288,1996,1437,1997, 17, 345, 250,
423, 277, 234, 512, 226, 97, 289, 42, 167,1998, 201,1999,2000, 843, 836, 824,
532, 338, 783,1090, 182, 576, 436,1438,1439, 527, 500,2001, 947, 889,2002,2003,
2004,2005, 262, 600, 314, 447,2006, 547,2007, 693, 738,1129,2008, 71,1440, 745,
619, 688,2009, 829,2010,2011, 147,2012, 33, 948,2013,2014, 74, 224,2015, 61,
191, 918, 399, 637,2016,1028,1130, 257, 902,2017,2018,2019,2020,2021,2022,2023,
2024,2025,2026, 837,2027,2028,2029,2030, 179, 874, 591, 52, 724, 246,2031,2032,
2033,2034,1167, 969,2035,1289, 630, 605, 911,1091,1168,2036,2037,2038,1441, 912,
2039, 623,2040,2041, 253,1169,1290,2042,1442, 146, 620, 611, 577, 433,2043,1224,
719,1170, 959, 440, 437, 534, 84, 388, 480,1131, 159, 220, 198, 679,2044,1012,
819,1066,1443, 113,1225, 194, 318,1003,1029,2045,2046,2047,2048,1067,2049,2050,
2051,2052,2053, 59, 913, 112,2054, 632,2055, 455, 144, 739,1291,2056, 273, 681,
499,2057, 448,2058,2059, 760,2060,2061, 970, 384, 169, 245,1132,2062,2063, 414,
1444,2064,2065, 41, 235,2066, 157, 252, 877, 568, 919, 789, 580,2067, 725,2068,
2069,1292,2070,2071,1445,2072,1446,2073,2074, 55, 588, 66,1447, 271,1092,2075,
1226,2076, 960,1013, 372,2077,2078,2079,2080,2081,1293,2082,2083,2084,2085, 850,
2086,2087,2088,2089,2090, 186,2091,1068, 180,2092,2093,2094, 109,1227, 522, 606,
2095, 867,1448,1093, 991,1171, 926, 353,1133,2096, 581,2097,2098,2099,1294,1449,
1450,2100, 596,1172,1014,1228,2101,1451,1295,1173,1229,2102,2103,1296,1134,1452,
949,1135,2104,2105,1094,1453,1454,1455,2106,1095,2107,2108,2109,2110,2111,2112,
2113,2114,2115,2116,2117, 804,2118,2119,1230,1231, 805,1456, 405,1136,2120,2121,
2122,2123,2124, 720, 701,1297, 992,1457, 927,1004,2125,2126,2127,2128,2129,2130,
22, 417,2131, 303,2132, 385,2133, 971, 520, 513,2134,1174, 73,1096, 231, 274,
962,1458, 673,2135,1459,2136, 152,1137,2137,2138,2139,2140,1005,1138,1460,1139,
2141,2142,2143,2144, 11, 374, 844,2145, 154,1232, 46,1461,2146, 838, 830, 721,
1233, 106,2147, 90, 428, 462, 578, 566,1175, 352,2148,2149, 538,1234, 124,1298,
2150,1462, 761, 565,2151, 686,2152, 649,2153, 72, 173,2154, 460, 415,2155,1463,
2156,1235, 305,2157,2158,2159,2160,2161,2162, 579,2163,2164,2165,2166,2167, 747,
2168,2169,2170,2171,1464, 669,2172,2173,2174,2175,2176,1465,2177, 23, 530, 285,
2178, 335, 729,2179, 397,2180,2181,2182,1030,2183,2184, 698,2185,2186, 325,2187,
2188, 369,2189, 799,1097,1015, 348,2190,1069, 680,2191, 851,1466,2192,2193, 10,
2194, 613, 424,2195, 979, 108, 449, 589, 27, 172, 81,1031, 80, 774, 281, 350,
1032, 525, 301, 582,1176,2196, 674,1045,2197,2198,1467, 730, 762,2199,2200,2201,
2202,1468,2203, 993,2204,2205, 266,1070, 963,1140,2206,2207,2208, 664,1098, 972,
2209,2210,2211,1177,1469,1470, 871,2212,2213,2214,2215,2216,1471,2217,2218,2219,
2220,2221,2222,2223,2224,2225,2226,2227,1472,1236,2228,2229,2230,2231,2232,2233,
2234,2235,1299,2236,2237, 200,2238, 477, 373,2239,2240, 731, 825, 777,2241,2242,
2243, 521, 486, 548,2244,2245,2246,1473,1300, 53, 549, 137, 875, 76, 158,2247,
1301,1474, 469, 396,1016, 278, 712,2248, 321, 442, 503, 767, 744, 941,1237,1178,
1475,2249, 82, 178,1141,1179, 973,2250,1302,2251, 297,2252,2253, 570,2254,2255,
2256, 18, 450, 206,2257, 290, 292,1142,2258, 511, 162, 99, 346, 164, 735,2259,
1476,1477, 4, 554, 343, 798,1099,2260,1100,2261, 43, 171,1303, 139, 215,2262,
2263, 717, 775,2264,1033, 322, 216,2265, 831,2266, 149,2267,1304,2268,2269, 702,
1238, 135, 845, 347, 309,2270, 484,2271, 878, 655, 238,1006,1478,2272, 67,2273,
295,2274,2275, 461,2276, 478, 942, 412,2277,1034,2278,2279,2280, 265,2281, 541,
2282,2283,2284,2285,2286, 70, 852,1071,2287,2288,2289,2290, 21, 56, 509, 117,
432,2291,2292, 331, 980, 552,1101, 148, 284, 105, 393,1180,1239, 755,2293, 187,
2294,1046,1479,2295, 340,2296, 63,1047, 230,2297,2298,1305, 763,1306, 101, 800,
808, 494,2299,2300,2301, 903,2302, 37,1072, 14, 5,2303, 79, 675,2304, 312,
2305,2306,2307,2308,2309,1480, 6,1307,2310,2311,2312, 1, 470, 35, 24, 229,
2313, 695, 210, 86, 778, 15, 784, 592, 779, 32, 77, 855, 964,2314, 259,2315,
501, 380,2316,2317, 83, 981, 153, 689,1308,1481,1482,1483,2318,2319, 716,1484,
2320,2321,2322,2323,2324,2325,1485,2326,2327, 128, 57, 68, 261,1048, 211, 170,
1240, 31,2328, 51, 435, 742,2329,2330,2331, 635,2332, 264, 456,2333,2334,2335,
425,2336,1486, 143, 507, 263, 943,2337, 363, 920,1487, 256,1488,1102, 243, 601,
1489,2338,2339,2340,2341,2342,2343,2344, 861,2345,2346,2347,2348,2349,2350, 395,
2351,1490,1491, 62, 535, 166, 225,2352,2353, 668, 419,1241, 138, 604, 928,2354,
1181,2355,1492,1493,2356,2357,2358,1143,2359, 696,2360, 387, 307,1309, 682, 476,
2361,2362, 332, 12, 222, 156,2363, 232,2364, 641, 276, 656, 517,1494,1495,1035,
416, 736,1496,2365,1017, 586,2366,2367,2368,1497,2369, 242,2370,2371,2372,1498,
2373, 965, 713,2374,2375,2376,2377, 740, 982,1499, 944,1500,1007,2378,2379,1310,
1501,2380,2381,2382, 785, 329,2383,2384,1502,2385,2386,2387, 932,2388,1503,2389,
2390,2391,2392,1242,2393,2394,2395,2396,2397, 994, 950,2398,2399,2400,2401,1504,
1311,2402,2403,2404,2405,1049, 749,2406,2407, 853, 718,1144,1312,2408,1182,1505,
2409,2410, 255, 516, 479, 564, 550, 214,1506,1507,1313, 413, 239, 444, 339,1145,
1036,1508,1509,1314,1037,1510,1315,2411,1511,2412,2413,2414, 176, 703, 497, 624,
593, 921, 302,2415, 341, 165,1103,1512,2416,1513,2417,2418,2419, 376,2420, 700,
2421,2422,2423, 258, 768,1316,2424,1183,2425, 995, 608,2426,2427,2428,2429, 221,
2430,2431,2432,2433,2434,2435,2436,2437, 195, 323, 726, 188, 897, 983,1317, 377,
644,1050, 879,2438, 452,2439,2440,2441,2442,2443,2444, 914,2445,2446,2447,2448,
915, 489,2449,1514,1184,2450,2451, 515, 64, 427, 495,2452, 583,2453, 483, 485,
1038, 562, 213,1515, 748, 666,2454,2455,2456,2457, 334,2458, 780, 996,1008, 705,
1243,2459,2460,2461,2462,2463, 114,2464, 493,1146, 366, 163,1516, 961,1104,2465,
291,2466,1318,1105,2467,1517, 365,2468, 355, 951,1244,2469,1319,2470, 631,2471,
2472, 218,1320, 364, 320, 756,1518,1519,1321,1520,1322,2473,2474,2475,2476, 997,
2477,2478,2479,2480, 665,1185,2481, 916,1521,2482,2483,2484, 584, 684,2485,2486,
797,2487,1051,1186,2488,2489,2490,1522,2491,2492, 370,2493,1039,1187, 65,2494,
434, 205, 463,1188,2495, 125, 812, 391, 402, 826, 699, 286, 398, 155, 781, 771,
585,2496, 590, 505,1073,2497, 599, 244, 219, 917,1018, 952, 646,1523,2498,1323,
2499,2500, 49, 984, 354, 741,2501, 625,2502,1324,2503,1019, 190, 357, 757, 491,
95, 782, 868,2504,2505,2506,2507,2508,2509, 134,1524,1074, 422,1525, 898,2510,
161,2511,2512,2513,2514, 769,2515,1526,2516,2517, 411,1325,2518, 472,1527,2519,
2520,2521,2522,2523,2524, 985,2525,2526,2527,2528,2529,2530, 764,2531,1245,2532,
2533, 25, 204, 311,2534, 496,2535,1052,2536,2537,2538,2539,2540,2541,2542, 199,
704, 504, 468, 758, 657,1528, 196, 44, 839,1246, 272, 750,2543, 765, 862,2544,
2545,1326,2546, 132, 615, 933,2547, 732,2548,2549,2550,1189,1529,2551, 283,1247,
1053, 607, 929,2552,2553,2554, 930, 183, 872, 616,1040,1147,2555,1148,1020, 441,
249,1075,2556,2557,2558, 466, 743,2559,2560,2561, 92, 514, 426, 420, 526,2562,
2563,2564,2565,2566,2567,2568, 185,2569,2570,2571,2572, 776,1530, 658,2573, 362,
2574, 361, 922,1076, 793,2575,2576,2577,2578,2579,2580,1531, 251,2581,2582,2583,
2584,1532, 54, 612, 237,1327,2585,2586, 275, 408, 647, 111,2587,1533,1106, 465,
3, 458, 9, 38,2588, 107, 110, 890, 209, 26, 737, 498,2589,1534,2590, 431,
202, 88,1535, 356, 287,1107, 660,1149,2591, 381,1536, 986,1150, 445,1248,1151,
974,2592,2593, 846,2594, 446, 953, 184,1249,1250, 727,2595, 923, 193, 883,2596,
2597,2598, 102, 324, 539, 817,2599, 421,1041,2600, 832,2601, 94, 175, 197, 406,
2602, 459,2603,2604,2605,2606,2607, 330, 555,2608,2609,2610, 706,1108, 389,2611,
2612,2613,2614, 233,2615, 833, 558, 931, 954,1251,2616,2617,1537, 546,2618,2619,
1009,2620,2621,2622,1538, 690,1328,2623, 955,2624,1539,2625,2626, 772,2627,2628,
2629,2630,2631, 924, 648, 863, 603,2632,2633, 934,1540, 864, 865,2634, 642,1042,
670,1190,2635,2636,2637,2638, 168,2639, 652, 873, 542,1054,1541,2640,2641,2642, # 512, 256
#Everything below is of no interest for detection purpose
2643,2644,2645,2646,2647,2648,2649,2650,2651,2652,2653,2654,2655,2656,2657,2658,
2659,2660,2661,2662,2663,2664,2665,2666,2667,2668,2669,2670,2671,2672,2673,2674,
2675,2676,2677,2678,2679,2680,2681,2682,2683,2684,2685,2686,2687,2688,2689,2690,
2691,2692,2693,2694,2695,2696,2697,2698,2699,1542, 880,2700,2701,2702,2703,2704,
2705,2706,2707,2708,2709,2710,2711,2712,2713,2714,2715,2716,2717,2718,2719,2720,
2721,2722,2723,2724,2725,1543,2726,2727,2728,2729,2730,2731,2732,1544,2733,2734,
2735,2736,2737,2738,2739,2740,2741,2742,2743,2744,2745,2746,2747,2748,2749,2750,
2751,2752,2753,2754,1545,2755,2756,2757,2758,2759,2760,2761,2762,2763,2764,2765,
2766,1546,2767,1547,2768,2769,2770,2771,2772,2773,2774,2775,2776,2777,2778,2779,
2780,2781,2782,2783,2784,2785,2786,1548,2787,2788,2789,1109,2790,2791,2792,2793,
2794,2795,2796,2797,2798,2799,2800,2801,2802,2803,2804,2805,2806,2807,2808,2809,
2810,2811,2812,1329,2813,2814,2815,2816,2817,2818,2819,2820,2821,2822,2823,2824,
2825,2826,2827,2828,2829,2830,2831,2832,2833,2834,2835,2836,2837,2838,2839,2840,
2841,2842,2843,2844,2845,2846,2847,2848,2849,2850,2851,2852,2853,2854,2855,2856,
1549,2857,2858,2859,2860,1550,2861,2862,1551,2863,2864,2865,2866,2867,2868,2869,
2870,2871,2872,2873,2874,1110,1330,2875,2876,2877,2878,2879,2880,2881,2882,2883,
2884,2885,2886,2887,2888,2889,2890,2891,2892,2893,2894,2895,2896,2897,2898,2899,
2900,2901,2902,2903,2904,2905,2906,2907,2908,2909,2910,2911,2912,2913,2914,2915,
2916,2917,2918,2919,2920,2921,2922,2923,2924,2925,2926,2927,2928,2929,2930,1331,
2931,2932,2933,2934,2935,2936,2937,2938,2939,2940,2941,2942,2943,1552,2944,2945,
2946,2947,2948,2949,2950,2951,2952,2953,2954,2955,2956,2957,2958,2959,2960,2961,
2962,2963,2964,1252,2965,2966,2967,2968,2969,2970,2971,2972,2973,2974,2975,2976,
2977,2978,2979,2980,2981,2982,2983,2984,2985,2986,2987,2988,2989,2990,2991,2992,
2993,2994,2995,2996,2997,2998,2999,3000,3001,3002,3003,3004,3005,3006,3007,3008,
3009,3010,3011,3012,1553,3013,3014,3015,3016,3017,1554,3018,1332,3019,3020,3021,
3022,3023,3024,3025,3026,3027,3028,3029,3030,3031,3032,3033,3034,3035,3036,3037,
3038,3039,3040,3041,3042,3043,3044,3045,3046,3047,3048,3049,3050,1555,3051,3052,
3053,1556,1557,3054,3055,3056,3057,3058,3059,3060,3061,3062,3063,3064,3065,3066,
3067,1558,3068,3069,3070,3071,3072,3073,3074,3075,3076,1559,3077,3078,3079,3080,
3081,3082,3083,1253,3084,3085,3086,3087,3088,3089,3090,3091,3092,3093,3094,3095,
3096,3097,3098,3099,3100,3101,3102,3103,3104,3105,3106,3107,3108,1152,3109,3110,
3111,3112,3113,1560,3114,3115,3116,3117,1111,3118,3119,3120,3121,3122,3123,3124,
3125,3126,3127,3128,3129,3130,3131,3132,3133,3134,3135,3136,3137,3138,3139,3140,
3141,3142,3143,3144,3145,3146,3147,3148,3149,3150,3151,3152,3153,3154,3155,3156,
3157,3158,3159,3160,3161,3162,3163,3164,3165,3166,3167,3168,3169,3170,3171,3172,
3173,3174,3175,3176,1333,3177,3178,3179,3180,3181,3182,3183,3184,3185,3186,3187,
3188,3189,1561,3190,3191,1334,3192,3193,3194,3195,3196,3197,3198,3199,3200,3201,
3202,3203,3204,3205,3206,3207,3208,3209,3210,3211,3212,3213,3214,3215,3216,3217,
3218,3219,3220,3221,3222,3223,3224,3225,3226,3227,3228,3229,3230,3231,3232,3233,
3234,1562,3235,3236,3237,3238,3239,3240,3241,3242,3243,3244,3245,3246,3247,3248,
3249,3250,3251,3252,3253,3254,3255,3256,3257,3258,3259,3260,3261,3262,3263,3264,
3265,3266,3267,3268,3269,3270,3271,3272,3273,3274,3275,3276,3277,1563,3278,3279,
3280,3281,3282,3283,3284,3285,3286,3287,3288,3289,3290,3291,3292,3293,3294,3295,
3296,3297,3298,3299,3300,3301,3302,3303,3304,3305,3306,3307,3308,3309,3310,3311,
3312,3313,3314,3315,3316,3317,3318,3319,3320,3321,3322,3323,3324,3325,3326,3327,
3328,3329,3330,3331,3332,3333,3334,3335,3336,3337,3338,3339,3340,3341,3342,3343,
3344,3345,3346,3347,3348,3349,3350,3351,3352,3353,3354,3355,3356,3357,3358,3359,
3360,3361,3362,3363,3364,1335,3365,3366,3367,3368,3369,3370,3371,3372,3373,3374,
3375,3376,3377,3378,3379,3380,3381,3382,3383,3384,3385,3386,3387,1336,3388,3389,
3390,3391,3392,3393,3394,3395,3396,3397,3398,3399,3400,3401,3402,3403,3404,3405,
3406,3407,3408,3409,3410,3411,3412,3413,3414,1337,3415,3416,3417,3418,3419,1338,
3420,3421,3422,1564,1565,3423,3424,3425,3426,3427,3428,3429,3430,3431,1254,3432,
3433,3434,1339,3435,3436,3437,3438,3439,1566,3440,3441,3442,3443,3444,3445,3446,
3447,3448,3449,3450,3451,3452,3453,3454,1255,3455,3456,3457,3458,3459,1567,1191,
3460,1568,1569,3461,3462,3463,1570,3464,3465,3466,3467,3468,1571,3469,3470,3471,
3472,3473,1572,3474,3475,3476,3477,3478,3479,3480,3481,3482,3483,3484,3485,3486,
1340,3487,3488,3489,3490,3491,3492,1021,3493,3494,3495,3496,3497,3498,1573,3499,
1341,3500,3501,3502,3503,3504,3505,3506,3507,3508,3509,3510,3511,1342,3512,3513,
3514,3515,3516,1574,1343,3517,3518,3519,1575,3520,1576,3521,3522,3523,3524,3525,
3526,3527,3528,3529,3530,3531,3532,3533,3534,3535,3536,3537,3538,3539,3540,3541,
3542,3543,3544,3545,3546,3547,3548,3549,3550,3551,3552,3553,3554,3555,3556,3557,
3558,3559,3560,3561,3562,3563,3564,3565,3566,3567,3568,3569,3570,3571,3572,3573,
3574,3575,3576,3577,3578,3579,3580,1577,3581,3582,1578,3583,3584,3585,3586,3587,
3588,3589,3590,3591,3592,3593,3594,3595,3596,3597,3598,3599,3600,3601,3602,3603,
3604,1579,3605,3606,3607,3608,3609,3610,3611,3612,3613,3614,3615,3616,3617,3618,
3619,3620,3621,3622,3623,3624,3625,3626,3627,3628,3629,1580,3630,3631,1581,3632,
3633,3634,3635,3636,3637,3638,3639,3640,3641,3642,3643,3644,3645,3646,3647,3648,
3649,3650,3651,3652,3653,3654,3655,3656,1582,3657,3658,3659,3660,3661,3662,3663,
3664,3665,3666,3667,3668,3669,3670,3671,3672,3673,3674,3675,3676,3677,3678,3679,
3680,3681,3682,3683,3684,3685,3686,3687,3688,3689,3690,3691,3692,3693,3694,3695,
3696,3697,3698,3699,3700,1192,3701,3702,3703,3704,1256,3705,3706,3707,3708,1583,
1257,3709,3710,3711,3712,3713,3714,3715,3716,1584,3717,3718,3719,3720,3721,3722,
3723,3724,3725,3726,3727,3728,3729,3730,3731,3732,3733,3734,3735,3736,3737,3738,
3739,3740,3741,3742,3743,3744,3745,1344,3746,3747,3748,3749,3750,3751,3752,3753,
3754,3755,3756,1585,3757,3758,3759,3760,3761,3762,3763,3764,3765,3766,1586,3767,
3768,3769,3770,3771,3772,3773,3774,3775,3776,3777,3778,1345,3779,3780,3781,3782,
3783,3784,3785,3786,3787,3788,3789,3790,3791,3792,3793,3794,3795,1346,1587,3796,
3797,1588,3798,3799,3800,3801,3802,3803,3804,3805,3806,1347,3807,3808,3809,3810,
3811,1589,3812,3813,3814,3815,3816,3817,3818,3819,3820,3821,1590,3822,3823,1591,
1348,3824,3825,3826,3827,3828,3829,3830,1592,3831,3832,1593,3833,3834,3835,3836,
3837,3838,3839,3840,3841,3842,3843,3844,1349,3845,3846,3847,3848,3849,3850,3851,
3852,3853,3854,3855,3856,3857,3858,1594,3859,3860,3861,3862,3863,3864,3865,3866,
3867,3868,3869,1595,3870,3871,3872,3873,1596,3874,3875,3876,3877,3878,3879,3880,
3881,3882,3883,3884,3885,3886,1597,3887,3888,3889,3890,3891,3892,3893,3894,3895,
1598,3896,3897,3898,1599,1600,3899,1350,3900,1351,3901,3902,1352,3903,3904,3905,
3906,3907,3908,3909,3910,3911,3912,3913,3914,3915,3916,3917,3918,3919,3920,3921,
3922,3923,3924,1258,3925,3926,3927,3928,3929,3930,3931,1193,3932,1601,3933,3934,
3935,3936,3937,3938,3939,3940,3941,3942,3943,1602,3944,3945,3946,3947,3948,1603,
3949,3950,3951,3952,3953,3954,3955,3956,3957,3958,3959,3960,3961,3962,3963,3964,
3965,1604,3966,3967,3968,3969,3970,3971,3972,3973,3974,3975,3976,3977,1353,3978,
3979,3980,3981,3982,3983,3984,3985,3986,3987,3988,3989,3990,3991,1354,3992,3993,
3994,3995,3996,3997,3998,3999,4000,4001,4002,4003,4004,4005,4006,4007,4008,4009,
4010,4011,4012,4013,4014,4015,4016,4017,4018,4019,4020,4021,4022,4023,1355,4024,
4025,4026,4027,4028,4029,4030,4031,4032,4033,4034,4035,4036,4037,4038,4039,4040,
1605,4041,4042,4043,4044,4045,4046,4047,4048,4049,4050,4051,4052,4053,4054,4055,
4056,4057,4058,4059,4060,1606,4061,4062,4063,4064,1607,4065,4066,4067,4068,4069,
4070,4071,4072,4073,4074,4075,4076,1194,4077,4078,1608,4079,4080,4081,4082,4083,
4084,4085,4086,4087,1609,4088,4089,4090,4091,4092,4093,4094,4095,4096,4097,4098,
4099,4100,4101,4102,4103,4104,4105,4106,4107,4108,1259,4109,4110,4111,4112,4113,
4114,4115,4116,4117,4118,4119,4120,4121,4122,4123,4124,1195,4125,4126,4127,1610,
4128,4129,4130,4131,4132,4133,4134,4135,4136,4137,1356,4138,4139,4140,4141,4142,
4143,4144,1611,4145,4146,4147,4148,4149,4150,4151,4152,4153,4154,4155,4156,4157,
4158,4159,4160,4161,4162,4163,4164,4165,4166,4167,4168,4169,4170,4171,4172,4173,
4174,4175,4176,4177,4178,4179,4180,4181,4182,4183,4184,4185,4186,4187,4188,4189,
4190,4191,4192,4193,4194,4195,4196,4197,4198,4199,4200,4201,4202,4203,4204,4205,
4206,4207,4208,4209,4210,4211,4212,4213,4214,4215,4216,4217,4218,4219,1612,4220,
4221,4222,4223,4224,4225,4226,4227,1357,4228,1613,4229,4230,4231,4232,4233,4234,
4235,4236,4237,4238,4239,4240,4241,4242,4243,1614,4244,4245,4246,4247,4248,4249,
4250,4251,4252,4253,4254,4255,4256,4257,4258,4259,4260,4261,4262,4263,4264,4265,
4266,4267,4268,4269,4270,1196,1358,4271,4272,4273,4274,4275,4276,4277,4278,4279,
4280,4281,4282,4283,4284,4285,4286,4287,1615,4288,4289,4290,4291,4292,4293,4294,
4295,4296,4297,4298,4299,4300,4301,4302,4303,4304,4305,4306,4307,4308,4309,4310,
4311,4312,4313,4314,4315,4316,4317,4318,4319,4320,4321,4322,4323,4324,4325,4326,
4327,4328,4329,4330,4331,4332,4333,4334,1616,4335,4336,4337,4338,4339,4340,4341,
4342,4343,4344,4345,4346,4347,4348,4349,4350,4351,4352,4353,4354,4355,4356,4357,
4358,4359,4360,1617,4361,4362,4363,4364,4365,1618,4366,4367,4368,4369,4370,4371,
4372,4373,4374,4375,4376,4377,4378,4379,4380,4381,4382,4383,4384,4385,4386,4387,
4388,4389,4390,4391,4392,4393,4394,4395,4396,4397,4398,4399,4400,4401,4402,4403,
4404,4405,4406,4407,4408,4409,4410,4411,4412,4413,4414,4415,4416,1619,4417,4418,
4419,4420,4421,4422,4423,4424,4425,1112,4426,4427,4428,4429,4430,1620,4431,4432,
4433,4434,4435,4436,4437,4438,4439,4440,4441,4442,1260,1261,4443,4444,4445,4446,
4447,4448,4449,4450,4451,4452,4453,4454,4455,1359,4456,4457,4458,4459,4460,4461,
4462,4463,4464,4465,1621,4466,4467,4468,4469,4470,4471,4472,4473,4474,4475,4476,
4477,4478,4479,4480,4481,4482,4483,4484,4485,4486,4487,4488,4489,1055,4490,4491,
4492,4493,4494,4495,4496,4497,4498,4499,4500,4501,4502,4503,4504,4505,4506,4507,
4508,4509,4510,4511,4512,4513,4514,4515,4516,4517,4518,1622,4519,4520,4521,1623,
4522,4523,4524,4525,4526,4527,4528,4529,4530,4531,4532,4533,4534,4535,1360,4536,
4537,4538,4539,4540,4541,4542,4543, 975,4544,4545,4546,4547,4548,4549,4550,4551,
4552,4553,4554,4555,4556,4557,4558,4559,4560,4561,4562,4563,4564,4565,4566,4567,
4568,4569,4570,4571,1624,4572,4573,4574,4575,4576,1625,4577,4578,4579,4580,4581,
4582,4583,4584,1626,4585,4586,4587,4588,4589,4590,4591,4592,4593,4594,4595,1627,
4596,4597,4598,4599,4600,4601,4602,4603,4604,4605,4606,4607,4608,4609,4610,4611,
4612,4613,4614,4615,1628,4616,4617,4618,4619,4620,4621,4622,4623,4624,4625,4626,
4627,4628,4629,4630,4631,4632,4633,4634,4635,4636,4637,4638,4639,4640,4641,4642,
4643,4644,4645,4646,4647,4648,4649,1361,4650,4651,4652,4653,4654,4655,4656,4657,
4658,4659,4660,4661,1362,4662,4663,4664,4665,4666,4667,4668,4669,4670,4671,4672,
4673,4674,4675,4676,4677,4678,4679,4680,4681,4682,1629,4683,4684,4685,4686,4687,
1630,4688,4689,4690,4691,1153,4692,4693,4694,1113,4695,4696,4697,4698,4699,4700,
4701,4702,4703,4704,4705,4706,4707,4708,4709,4710,4711,1197,4712,4713,4714,4715,
4716,4717,4718,4719,4720,4721,4722,4723,4724,4725,4726,4727,4728,4729,4730,4731,
4732,4733,4734,4735,1631,4736,1632,4737,4738,4739,4740,4741,4742,4743,4744,1633,
4745,4746,4747,4748,4749,1262,4750,4751,4752,4753,4754,1363,4755,4756,4757,4758,
4759,4760,4761,4762,4763,4764,4765,4766,4767,4768,1634,4769,4770,4771,4772,4773,
4774,4775,4776,4777,4778,1635,4779,4780,4781,4782,4783,4784,4785,4786,4787,4788,
4789,1636,4790,4791,4792,4793,4794,4795,4796,4797,4798,4799,4800,4801,4802,4803,
4804,4805,4806,1637,4807,4808,4809,1638,4810,4811,4812,4813,4814,4815,4816,4817,
4818,1639,4819,4820,4821,4822,4823,4824,4825,4826,4827,4828,4829,4830,4831,4832,
4833,1077,4834,4835,4836,4837,4838,4839,4840,4841,4842,4843,4844,4845,4846,4847,
4848,4849,4850,4851,4852,4853,4854,4855,4856,4857,4858,4859,4860,4861,4862,4863,
4864,4865,4866,4867,4868,4869,4870,4871,4872,4873,4874,4875,4876,4877,4878,4879,
4880,4881,4882,4883,1640,4884,4885,1641,4886,4887,4888,4889,4890,4891,4892,4893,
4894,4895,4896,4897,4898,4899,4900,4901,4902,4903,4904,4905,4906,4907,4908,4909,
4910,4911,1642,4912,4913,4914,1364,4915,4916,4917,4918,4919,4920,4921,4922,4923,
4924,4925,4926,4927,4928,4929,4930,4931,1643,4932,4933,4934,4935,4936,4937,4938,
4939,4940,4941,4942,4943,4944,4945,4946,4947,4948,4949,4950,4951,4952,4953,4954,
4955,4956,4957,4958,4959,4960,4961,4962,4963,4964,4965,4966,4967,4968,4969,4970,
4971,4972,4973,4974,4975,4976,4977,4978,4979,4980,1644,4981,4982,4983,4984,1645,
4985,4986,1646,4987,4988,4989,4990,4991,4992,4993,4994,4995,4996,4997,4998,4999,
5000,5001,5002,5003,5004,5005,1647,5006,1648,5007,5008,5009,5010,5011,5012,1078,
5013,5014,5015,5016,5017,5018,5019,5020,5021,5022,5023,5024,5025,5026,5027,5028,
1365,5029,5030,5031,5032,5033,5034,5035,5036,5037,5038,5039,1649,5040,5041,5042,
5043,5044,5045,1366,5046,5047,5048,5049,5050,5051,5052,5053,5054,5055,1650,5056,
5057,5058,5059,5060,5061,5062,5063,5064,5065,5066,5067,5068,5069,5070,5071,5072,
5073,5074,5075,5076,5077,1651,5078,5079,5080,5081,5082,5083,5084,5085,5086,5087,
5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102,5103,
5104,5105,5106,5107,5108,5109,5110,1652,5111,5112,5113,5114,5115,5116,5117,5118,
1367,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,1653,5130,5131,5132,
5133,5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148,
5149,1368,5150,1654,5151,1369,5152,5153,5154,5155,5156,5157,5158,5159,5160,5161,
5162,5163,5164,5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,5176,5177,
5178,1370,5179,5180,5181,5182,5183,5184,5185,5186,5187,5188,5189,5190,5191,5192,
5193,5194,5195,5196,5197,5198,1655,5199,5200,5201,5202,1656,5203,5204,5205,5206,
1371,5207,1372,5208,5209,5210,5211,1373,5212,5213,1374,5214,5215,5216,5217,5218,
5219,5220,5221,5222,5223,5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234,
5235,5236,5237,5238,5239,5240,5241,5242,5243,5244,5245,5246,5247,1657,5248,5249,
5250,5251,1658,1263,5252,5253,5254,5255,5256,1375,5257,5258,5259,5260,5261,5262,
5263,5264,5265,5266,5267,5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278,
5279,5280,5281,5282,5283,1659,5284,5285,5286,5287,5288,5289,5290,5291,5292,5293,
5294,5295,5296,5297,5298,5299,5300,1660,5301,5302,5303,5304,5305,5306,5307,5308,
5309,5310,5311,5312,5313,5314,5315,5316,5317,5318,5319,5320,5321,1376,5322,5323,
5324,5325,5326,5327,5328,5329,5330,5331,5332,5333,1198,5334,5335,5336,5337,5338,
5339,5340,5341,5342,5343,1661,5344,5345,5346,5347,5348,5349,5350,5351,5352,5353,
5354,5355,5356,5357,5358,5359,5360,5361,5362,5363,5364,5365,5366,5367,5368,5369,
5370,5371,5372,5373,5374,5375,5376,5377,5378,5379,5380,5381,5382,5383,5384,5385,
5386,5387,5388,5389,5390,5391,5392,5393,5394,5395,5396,5397,5398,1264,5399,5400,
5401,5402,5403,5404,5405,5406,5407,5408,5409,5410,5411,5412,1662,5413,5414,5415,
5416,1663,5417,5418,5419,5420,5421,5422,5423,5424,5425,5426,5427,5428,5429,5430,
5431,5432,5433,5434,5435,5436,5437,5438,1664,5439,5440,5441,5442,5443,5444,5445,
5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456,5457,5458,5459,5460,5461,
5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472,5473,5474,5475,5476,5477,
5478,1154,5479,5480,5481,5482,5483,5484,5485,1665,5486,5487,5488,5489,5490,5491,
5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504,5505,5506,5507,
5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520,5521,5522,5523,
5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536,5537,5538,5539,
5540,5541,5542,5543,5544,5545,5546,5547,5548,1377,5549,5550,5551,5552,5553,5554,
5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568,5569,5570,
1114,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584,5585,
5586,5587,5588,5589,5590,5591,5592,1378,5593,5594,5595,5596,5597,5598,5599,5600,
5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,1379,5615,
5616,5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631,
5632,5633,5634,1380,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646,
5647,5648,5649,1381,1056,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660,
1666,5661,5662,5663,5664,5665,5666,5667,5668,1667,5669,1668,5670,5671,5672,5673,
5674,5675,5676,5677,5678,1155,5679,5680,5681,5682,5683,5684,5685,5686,5687,5688,
5689,5690,5691,5692,5693,5694,5695,5696,5697,5698,1669,5699,5700,5701,5702,5703,
5704,5705,1670,5706,5707,5708,5709,5710,1671,5711,5712,5713,5714,1382,5715,5716,
5717,5718,5719,5720,5721,5722,5723,5724,5725,1672,5726,5727,1673,1674,5728,5729,
5730,5731,5732,5733,5734,5735,5736,1675,5737,5738,5739,5740,5741,5742,5743,5744,
1676,5745,5746,5747,5748,5749,5750,5751,1383,5752,5753,5754,5755,5756,5757,5758,
5759,5760,5761,5762,5763,5764,5765,5766,5767,5768,1677,5769,5770,5771,5772,5773,
1678,5774,5775,5776, 998,5777,5778,5779,5780,5781,5782,5783,5784,5785,1384,5786,
5787,5788,5789,5790,5791,5792,5793,5794,5795,5796,5797,5798,5799,5800,1679,5801,
5802,5803,1115,1116,5804,5805,5806,5807,5808,5809,5810,5811,5812,5813,5814,5815,
5816,5817,5818,5819,5820,5821,5822,5823,5824,5825,5826,5827,5828,5829,5830,5831,
5832,5833,5834,5835,5836,5837,5838,5839,5840,5841,5842,5843,5844,5845,5846,5847,
5848,5849,5850,5851,5852,5853,5854,5855,1680,5856,5857,5858,5859,5860,5861,5862,
5863,5864,1681,5865,5866,5867,1682,5868,5869,5870,5871,5872,5873,5874,5875,5876,
5877,5878,5879,1683,5880,1684,5881,5882,5883,5884,1685,5885,5886,5887,5888,5889,
5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904,5905,
5906,5907,1686,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920,
5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,1687,
5936,5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,
5952,1688,1689,5953,1199,5954,5955,5956,5957,5958,5959,5960,5961,1690,5962,5963,
5964,5965,5966,5967,5968,5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979,
5980,5981,1385,5982,1386,5983,5984,5985,5986,5987,5988,5989,5990,5991,5992,5993,
5994,5995,5996,5997,5998,5999,6000,6001,6002,6003,6004,6005,6006,6007,6008,6009,
6010,6011,6012,6013,6014,6015,6016,6017,6018,6019,6020,6021,6022,6023,6024,6025,
6026,6027,1265,6028,6029,1691,6030,6031,6032,6033,6034,6035,6036,6037,6038,6039,
6040,6041,6042,6043,6044,6045,6046,6047,6048,6049,6050,6051,6052,6053,6054,6055,
6056,6057,6058,6059,6060,6061,6062,6063,6064,6065,6066,6067,6068,6069,6070,6071,
6072,6073,6074,6075,6076,6077,6078,6079,6080,6081,6082,6083,6084,1692,6085,6086,
6087,6088,6089,6090,6091,6092,6093,6094,6095,6096,6097,6098,6099,6100,6101,6102,
6103,6104,6105,6106,6107,6108,6109,6110,6111,6112,6113,6114,6115,6116,6117,6118,
6119,6120,6121,6122,6123,6124,6125,6126,6127,6128,6129,6130,6131,1693,6132,6133,
6134,6135,6136,1694,6137,6138,6139,6140,6141,1695,6142,6143,6144,6145,6146,6147,
6148,6149,6150,6151,6152,6153,6154,6155,6156,6157,6158,6159,6160,6161,6162,6163,
6164,6165,6166,6167,6168,6169,6170,6171,6172,6173,6174,6175,6176,6177,6178,6179,
6180,6181,6182,6183,6184,6185,1696,6186,6187,6188,6189,6190,6191,6192,6193,6194,
6195,6196,6197,6198,6199,6200,6201,6202,6203,6204,6205,6206,6207,6208,6209,6210,
6211,6212,6213,6214,6215,6216,6217,6218,6219,1697,6220,6221,6222,6223,6224,6225,
6226,6227,6228,6229,6230,6231,6232,6233,6234,6235,6236,6237,6238,6239,6240,6241,
6242,6243,6244,6245,6246,6247,6248,6249,6250,6251,6252,6253,1698,6254,6255,6256,
6257,6258,6259,6260,6261,6262,6263,1200,6264,6265,6266,6267,6268,6269,6270,6271, #1024
6272,6273,6274,6275,6276,6277,6278,6279,6280,6281,6282,6283,6284,6285,6286,6287,
6288,6289,6290,6291,6292,6293,6294,6295,6296,6297,6298,6299,6300,6301,6302,1699,
6303,6304,1700,6305,6306,6307,6308,6309,6310,6311,6312,6313,6314,6315,6316,6317,
6318,6319,6320,6321,6322,6323,6324,6325,6326,6327,6328,6329,6330,6331,6332,6333,
6334,6335,6336,6337,6338,6339,1701,6340,6341,6342,6343,6344,1387,6345,6346,6347,
6348,6349,6350,6351,6352,6353,6354,6355,6356,6357,6358,6359,6360,6361,6362,6363,
6364,6365,6366,6367,6368,6369,6370,6371,6372,6373,6374,6375,6376,6377,6378,6379,
6380,6381,6382,6383,6384,6385,6386,6387,6388,6389,6390,6391,6392,6393,6394,6395,
6396,6397,6398,6399,6400,6401,6402,6403,6404,6405,6406,6407,6408,6409,6410,6411,
6412,6413,1702,6414,6415,6416,6417,6418,6419,6420,6421,6422,1703,6423,6424,6425,
6426,6427,6428,6429,6430,6431,6432,6433,6434,6435,6436,6437,6438,1704,6439,6440,
6441,6442,6443,6444,6445,6446,6447,6448,6449,6450,6451,6452,6453,6454,6455,6456,
6457,6458,6459,6460,6461,6462,6463,6464,6465,6466,6467,6468,6469,6470,6471,6472,
6473,6474,6475,6476,6477,6478,6479,6480,6481,6482,6483,6484,6485,6486,6487,6488,
6489,6490,6491,6492,6493,6494,6495,6496,6497,6498,6499,6500,6501,6502,6503,1266,
6504,6505,6506,6507,6508,6509,6510,6511,6512,6513,6514,6515,6516,6517,6518,6519,
6520,6521,6522,6523,6524,6525,6526,6527,6528,6529,6530,6531,6532,6533,6534,6535,
6536,6537,6538,6539,6540,6541,6542,6543,6544,6545,6546,6547,6548,6549,6550,6551,
1705,1706,6552,6553,6554,6555,6556,6557,6558,6559,6560,6561,6562,6563,6564,6565,
6566,6567,6568,6569,6570,6571,6572,6573,6574,6575,6576,6577,6578,6579,6580,6581,
6582,6583,6584,6585,6586,6587,6588,6589,6590,6591,6592,6593,6594,6595,6596,6597,
6598,6599,6600,6601,6602,6603,6604,6605,6606,6607,6608,6609,6610,6611,6612,6613,
6614,6615,6616,6617,6618,6619,6620,6621,6622,6623,6624,6625,6626,6627,6628,6629,
6630,6631,6632,6633,6634,6635,6636,6637,1388,6638,6639,6640,6641,6642,6643,6644,
1707,6645,6646,6647,6648,6649,6650,6651,6652,6653,6654,6655,6656,6657,6658,6659,
6660,6661,6662,6663,1708,6664,6665,6666,6667,6668,6669,6670,6671,6672,6673,6674,
1201,6675,6676,6677,6678,6679,6680,6681,6682,6683,6684,6685,6686,6687,6688,6689,
6690,6691,6692,6693,6694,6695,6696,6697,6698,6699,6700,6701,6702,6703,6704,6705,
6706,6707,6708,6709,6710,6711,6712,6713,6714,6715,6716,6717,6718,6719,6720,6721,
6722,6723,6724,6725,1389,6726,6727,6728,6729,6730,6731,6732,6733,6734,6735,6736,
1390,1709,6737,6738,6739,6740,6741,6742,1710,6743,6744,6745,6746,1391,6747,6748,
6749,6750,6751,6752,6753,6754,6755,6756,6757,1392,6758,6759,6760,6761,6762,6763,
6764,6765,6766,6767,6768,6769,6770,6771,6772,6773,6774,6775,6776,6777,6778,6779,
6780,1202,6781,6782,6783,6784,6785,6786,6787,6788,6789,6790,6791,6792,6793,6794,
6795,6796,6797,6798,6799,6800,6801,6802,6803,6804,6805,6806,6807,6808,6809,1711,
6810,6811,6812,6813,6814,6815,6816,6817,6818,6819,6820,6821,6822,6823,6824,6825,
6826,6827,6828,6829,6830,6831,6832,6833,6834,6835,6836,1393,6837,6838,6839,6840,
6841,6842,6843,6844,6845,6846,6847,6848,6849,6850,6851,6852,6853,6854,6855,6856,
6857,6858,6859,6860,6861,6862,6863,6864,6865,6866,6867,6868,6869,6870,6871,6872,
6873,6874,6875,6876,6877,6878,6879,6880,6881,6882,6883,6884,6885,6886,6887,6888,
6889,6890,6891,6892,6893,6894,6895,6896,6897,6898,6899,6900,6901,6902,1712,6903,
6904,6905,6906,6907,6908,6909,6910,1713,6911,6912,6913,6914,6915,6916,6917,6918,
6919,6920,6921,6922,6923,6924,6925,6926,6927,6928,6929,6930,6931,6932,6933,6934,
6935,6936,6937,6938,6939,6940,6941,6942,6943,6944,6945,6946,6947,6948,6949,6950,
6951,6952,6953,6954,6955,6956,6957,6958,6959,6960,6961,6962,6963,6964,6965,6966,
6967,6968,6969,6970,6971,6972,6973,6974,1714,6975,6976,6977,6978,6979,6980,6981,
6982,6983,6984,6985,6986,6987,6988,1394,6989,6990,6991,6992,6993,6994,6995,6996,
6997,6998,6999,7000,1715,7001,7002,7003,7004,7005,7006,7007,7008,7009,7010,7011,
7012,7013,7014,7015,7016,7017,7018,7019,7020,7021,7022,7023,7024,7025,7026,7027,
7028,1716,7029,7030,7031,7032,7033,7034,7035,7036,7037,7038,7039,7040,7041,7042,
7043,7044,7045,7046,7047,7048,7049,7050,7051,7052,7053,7054,7055,7056,7057,7058,
7059,7060,7061,7062,7063,7064,7065,7066,7067,7068,7069,7070,7071,7072,7073,7074,
7075,7076,7077,7078,7079,7080,7081,7082,7083,7084,7085,7086,7087,7088,7089,7090,
7091,7092,7093,7094,7095,7096,7097,7098,7099,7100,7101,7102,7103,7104,7105,7106,
7107,7108,7109,7110,7111,7112,7113,7114,7115,7116,7117,7118,7119,7120,7121,7122,
7123,7124,7125,7126,7127,7128,7129,7130,7131,7132,7133,7134,7135,7136,7137,7138,
7139,7140,7141,7142,7143,7144,7145,7146,7147,7148,7149,7150,7151,7152,7153,7154,
7155,7156,7157,7158,7159,7160,7161,7162,7163,7164,7165,7166,7167,7168,7169,7170,
7171,7172,7173,7174,7175,7176,7177,7178,7179,7180,7181,7182,7183,7184,7185,7186,
7187,7188,7189,7190,7191,7192,7193,7194,7195,7196,7197,7198,7199,7200,7201,7202,
7203,7204,7205,7206,7207,1395,7208,7209,7210,7211,7212,7213,1717,7214,7215,7216,
7217,7218,7219,7220,7221,7222,7223,7224,7225,7226,7227,7228,7229,7230,7231,7232,
7233,7234,7235,7236,7237,7238,7239,7240,7241,7242,7243,7244,7245,7246,7247,7248,
7249,7250,7251,7252,7253,7254,7255,7256,7257,7258,7259,7260,7261,7262,7263,7264,
7265,7266,7267,7268,7269,7270,7271,7272,7273,7274,7275,7276,7277,7278,7279,7280,
7281,7282,7283,7284,7285,7286,7287,7288,7289,7290,7291,7292,7293,7294,7295,7296,
7297,7298,7299,7300,7301,7302,7303,7304,7305,7306,7307,7308,7309,7310,7311,7312,
7313,1718,7314,7315,7316,7317,7318,7319,7320,7321,7322,7323,7324,7325,7326,7327,
7328,7329,7330,7331,7332,7333,7334,7335,7336,7337,7338,7339,7340,7341,7342,7343,
7344,7345,7346,7347,7348,7349,7350,7351,7352,7353,7354,7355,7356,7357,7358,7359,
7360,7361,7362,7363,7364,7365,7366,7367,7368,7369,7370,7371,7372,7373,7374,7375,
7376,7377,7378,7379,7380,7381,7382,7383,7384,7385,7386,7387,7388,7389,7390,7391,
7392,7393,7394,7395,7396,7397,7398,7399,7400,7401,7402,7403,7404,7405,7406,7407,
7408,7409,7410,7411,7412,7413,7414,7415,7416,7417,7418,7419,7420,7421,7422,7423,
7424,7425,7426,7427,7428,7429,7430,7431,7432,7433,7434,7435,7436,7437,7438,7439,
7440,7441,7442,7443,7444,7445,7446,7447,7448,7449,7450,7451,7452,7453,7454,7455,
7456,7457,7458,7459,7460,7461,7462,7463,7464,7465,7466,7467,7468,7469,7470,7471,
7472,7473,7474,7475,7476,7477,7478,7479,7480,7481,7482,7483,7484,7485,7486,7487,
7488,7489,7490,7491,7492,7493,7494,7495,7496,7497,7498,7499,7500,7501,7502,7503,
7504,7505,7506,7507,7508,7509,7510,7511,7512,7513,7514,7515,7516,7517,7518,7519,
7520,7521,7522,7523,7524,7525,7526,7527,7528,7529,7530,7531,7532,7533,7534,7535,
7536,7537,7538,7539,7540,7541,7542,7543,7544,7545,7546,7547,7548,7549,7550,7551,
7552,7553,7554,7555,7556,7557,7558,7559,7560,7561,7562,7563,7564,7565,7566,7567,
7568,7569,7570,7571,7572,7573,7574,7575,7576,7577,7578,7579,7580,7581,7582,7583,
7584,7585,7586,7587,7588,7589,7590,7591,7592,7593,7594,7595,7596,7597,7598,7599,
7600,7601,7602,7603,7604,7605,7606,7607,7608,7609,7610,7611,7612,7613,7614,7615,
7616,7617,7618,7619,7620,7621,7622,7623,7624,7625,7626,7627,7628,7629,7630,7631,
7632,7633,7634,7635,7636,7637,7638,7639,7640,7641,7642,7643,7644,7645,7646,7647,
7648,7649,7650,7651,7652,7653,7654,7655,7656,7657,7658,7659,7660,7661,7662,7663,
7664,7665,7666,7667,7668,7669,7670,7671,7672,7673,7674,7675,7676,7677,7678,7679,
7680,7681,7682,7683,7684,7685,7686,7687,7688,7689,7690,7691,7692,7693,7694,7695,
7696,7697,7698,7699,7700,7701,7702,7703,7704,7705,7706,7707,7708,7709,7710,7711,
7712,7713,7714,7715,7716,7717,7718,7719,7720,7721,7722,7723,7724,7725,7726,7727,
7728,7729,7730,7731,7732,7733,7734,7735,7736,7737,7738,7739,7740,7741,7742,7743,
7744,7745,7746,7747,7748,7749,7750,7751,7752,7753,7754,7755,7756,7757,7758,7759,
7760,7761,7762,7763,7764,7765,7766,7767,7768,7769,7770,7771,7772,7773,7774,7775,
7776,7777,7778,7779,7780,7781,7782,7783,7784,7785,7786,7787,7788,7789,7790,7791,
7792,7793,7794,7795,7796,7797,7798,7799,7800,7801,7802,7803,7804,7805,7806,7807,
7808,7809,7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,
7824,7825,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,
7840,7841,7842,7843,7844,7845,7846,7847,7848,7849,7850,7851,7852,7853,7854,7855,
7856,7857,7858,7859,7860,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870,7871,
7872,7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886,7887,
7888,7889,7890,7891,7892,7893,7894,7895,7896,7897,7898,7899,7900,7901,7902,7903,
7904,7905,7906,7907,7908,7909,7910,7911,7912,7913,7914,7915,7916,7917,7918,7919,
7920,7921,7922,7923,7924,7925,7926,7927,7928,7929,7930,7931,7932,7933,7934,7935,
7936,7937,7938,7939,7940,7941,7942,7943,7944,7945,7946,7947,7948,7949,7950,7951,
7952,7953,7954,7955,7956,7957,7958,7959,7960,7961,7962,7963,7964,7965,7966,7967,
7968,7969,7970,7971,7972,7973,7974,7975,7976,7977,7978,7979,7980,7981,7982,7983,
7984,7985,7986,7987,7988,7989,7990,7991,7992,7993,7994,7995,7996,7997,7998,7999,
8000,8001,8002,8003,8004,8005,8006,8007,8008,8009,8010,8011,8012,8013,8014,8015,
8016,8017,8018,8019,8020,8021,8022,8023,8024,8025,8026,8027,8028,8029,8030,8031,
8032,8033,8034,8035,8036,8037,8038,8039,8040,8041,8042,8043,8044,8045,8046,8047,
8048,8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063,
8064,8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079,
8080,8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095,
8096,8097,8098,8099,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110,8111,
8112,8113,8114,8115,8116,8117,8118,8119,8120,8121,8122,8123,8124,8125,8126,8127,
8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141,8142,8143,
8144,8145,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155,8156,8157,8158,8159,
8160,8161,8162,8163,8164,8165,8166,8167,8168,8169,8170,8171,8172,8173,8174,8175,
8176,8177,8178,8179,8180,8181,8182,8183,8184,8185,8186,8187,8188,8189,8190,8191,
8192,8193,8194,8195,8196,8197,8198,8199,8200,8201,8202,8203,8204,8205,8206,8207,
8208,8209,8210,8211,8212,8213,8214,8215,8216,8217,8218,8219,8220,8221,8222,8223,
8224,8225,8226,8227,8228,8229,8230,8231,8232,8233,8234,8235,8236,8237,8238,8239,
8240,8241,8242,8243,8244,8245,8246,8247,8248,8249,8250,8251,8252,8253,8254,8255,
8256,8257,8258,8259,8260,8261,8262,8263,8264,8265,8266,8267,8268,8269,8270,8271,
8272,8273,8274,8275,8276,8277,8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,
8288,8289,8290,8291,8292,8293,8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,
8304,8305,8306,8307,8308,8309,8310,8311,8312,8313,8314,8315,8316,8317,8318,8319,
8320,8321,8322,8323,8324,8325,8326,8327,8328,8329,8330,8331,8332,8333,8334,8335,
8336,8337,8338,8339,8340,8341,8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,
8352,8353,8354,8355,8356,8357,8358,8359,8360,8361,8362,8363,8364,8365,8366,8367,
8368,8369,8370,8371,8372,8373,8374,8375,8376,8377,8378,8379,8380,8381,8382,8383,
8384,8385,8386,8387,8388,8389,8390,8391,8392,8393,8394,8395,8396,8397,8398,8399,
8400,8401,8402,8403,8404,8405,8406,8407,8408,8409,8410,8411,8412,8413,8414,8415,
8416,8417,8418,8419,8420,8421,8422,8423,8424,8425,8426,8427,8428,8429,8430,8431,
8432,8433,8434,8435,8436,8437,8438,8439,8440,8441,8442,8443,8444,8445,8446,8447,
8448,8449,8450,8451,8452,8453,8454,8455,8456,8457,8458,8459,8460,8461,8462,8463,
8464,8465,8466,8467,8468,8469,8470,8471,8472,8473,8474,8475,8476,8477,8478,8479,
8480,8481,8482,8483,8484,8485,8486,8487,8488,8489,8490,8491,8492,8493,8494,8495,
8496,8497,8498,8499,8500,8501,8502,8503,8504,8505,8506,8507,8508,8509,8510,8511,
8512,8513,8514,8515,8516,8517,8518,8519,8520,8521,8522,8523,8524,8525,8526,8527,
8528,8529,8530,8531,8532,8533,8534,8535,8536,8537,8538,8539,8540,8541,8542,8543,
8544,8545,8546,8547,8548,8549,8550,8551,8552,8553,8554,8555,8556,8557,8558,8559,
8560,8561,8562,8563,8564,8565,8566,8567,8568,8569,8570,8571,8572,8573,8574,8575,
8576,8577,8578,8579,8580,8581,8582,8583,8584,8585,8586,8587,8588,8589,8590,8591,
8592,8593,8594,8595,8596,8597,8598,8599,8600,8601,8602,8603,8604,8605,8606,8607,
8608,8609,8610,8611,8612,8613,8614,8615,8616,8617,8618,8619,8620,8621,8622,8623,
8624,8625,8626,8627,8628,8629,8630,8631,8632,8633,8634,8635,8636,8637,8638,8639,
8640,8641,8642,8643,8644,8645,8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,
8656,8657,8658,8659,8660,8661,8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,
8672,8673,8674,8675,8676,8677,8678,8679,8680,8681,8682,8683,8684,8685,8686,8687,
8688,8689,8690,8691,8692,8693,8694,8695,8696,8697,8698,8699,8700,8701,8702,8703,
8704,8705,8706,8707,8708,8709,8710,8711,8712,8713,8714,8715,8716,8717,8718,8719,
8720,8721,8722,8723,8724,8725,8726,8727,8728,8729,8730,8731,8732,8733,8734,8735,
8736,8737,8738,8739,8740,8741)
# flake8: noqa
| gpl-3.0 |
BehavioralInsightsTeam/edx-platform | common/lib/xmodule/xmodule/tests/test_editing_module.py | 13 | 2787 | """ Tests for editing descriptors"""
import unittest
import os
import logging
from mock import Mock
from pkg_resources import resource_string
from opaque_keys.edx.locator import BlockUsageLocator, CourseLocator
from xmodule.editing_module import TabsEditingDescriptor
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from xmodule.tests import get_test_descriptor_system
log = logging.getLogger(__name__)
class TabsEditingDescriptorTestCase(unittest.TestCase):
""" Testing TabsEditingDescriptor"""
shard = 1
def setUp(self):
super(TabsEditingDescriptorTestCase, self).setUp()
system = get_test_descriptor_system()
system.render_template = Mock(return_value="<div>Test Template HTML</div>")
self.tabs = [
{
'name': "Test_css",
'template': "tabs/codemirror-edit.html",
'current': True,
'css': {
'scss': [
resource_string(
__name__,
'../../test_files/test_tabseditingdescriptor.scss'
)
],
'css': [
resource_string(
__name__,
'../../test_files/test_tabseditingdescriptor.css'
)
]
}
},
{
'name': "Subtitles",
'template': "video/subtitles.html",
},
{
'name': "Settings",
'template': "tabs/video-metadata-edit-tab.html"
}
]
TabsEditingDescriptor.tabs = self.tabs
self.descriptor = system.construct_xblock_from_class(
TabsEditingDescriptor,
scope_ids=ScopeIds(None, None, None,
BlockUsageLocator(CourseLocator('org', 'course', 'run', branch='revision'),
'category', 'name')),
field_data=DictFieldData({}),
)
def test_get_css(self):
"""test get_css"""
css = self.descriptor.get_css()
test_files_dir = os.path.dirname(__file__).replace('xmodule/tests', 'test_files')
test_css_file = os.path.join(test_files_dir, 'test_tabseditingdescriptor.scss')
with open(test_css_file) as new_css:
added_css = new_css.read()
self.assertEqual(css['scss'].pop(), added_css)
self.assertEqual(css['css'].pop(), added_css)
def test_get_context(self):
""""test get_context"""
rendered_context = self.descriptor.get_context()
self.assertListEqual(rendered_context['tabs'], self.tabs)
| agpl-3.0 |
Lautitia/newfies-dialer | newfies/newfies_dialer/settings.py | 3 | 17942 | #
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2014 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <[email protected]>
#
import os
import djcelery
djcelery.setup_loader()
# Django settings for project.
DEBUG = False
TEMPLATE_DEBUG = False
ADMINS = (
('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
SERVER_EMAIL = '[email protected]'
APPLICATION_DIR = os.path.dirname(globals()['__file__']) + '/../'
DATABASES = {
'default': {
# 'postgresql_psycopg2','postgresql','sqlite3','oracle', 'django.db.backends.mysql'
'ENGINE': 'django.db.backends.sqlite3',
# Database name or path to database file if using sqlite3.
'NAME': APPLICATION_DIR + '/database/newfies-dialer.db',
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Not used with sqlite3.
'PORT': '', # Not used with sqlite3.
# 'OPTIONS': {
# 'init_command': 'SET storage_engine=INNODB',
# }
}
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': '/var/tmp/django_cache',
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# set use of timezone true or false
USE_TZ = True
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = False
DATETIME_FORMAT = 'Y-m-d H:i:s'
DATE_FORMAT = 'Y-m-d'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(APPLICATION_DIR, 'static')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(APPLICATION_DIR, 'usermedia')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/usermedia/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
# os.path.join(APPLICATION_DIR, "resources"),
("newfies", os.path.join(APPLICATION_DIR, "resources")),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
#'django.contrib.staticfiles.finders.DefaultStorageFinder',
'dajaxice.finders.DajaxiceFinder',
'djangobower.finders.BowerFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'ujau$^uei_ak=@-v8va(&@q_sc0^1nn*qpwyc-776n&qoam@+v'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
#'raven.contrib.django.middleware.SentryResponseErrorIdMiddleware',
#'raven.contrib.django.middleware.Sentry404CatchMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
#'pagination.middleware.PaginationMiddleware',
'linaro_django_pagination.middleware.PaginationMiddleware',
'common.filter_persist_middleware.FilterPersistMiddleware',
'audiofield.middleware.threadlocals.ThreadLocals',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.csrf",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"context_processors.newfies_version",
#needed by Sentry
"django.core.context_processors.request",
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
ROOT_URLCONF = 'newfies_dialer.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or
# "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(APPLICATION_DIR, 'templates'),
)
INTERNAL_IPS = ('127.0.0.1',)
ALLOWED_HOSTS = ['127.0.0.1']
DAJAXICE_MEDIA_PREFIX = "dajaxice"
#DAJAXICE_MEDIA_PREFIX = "dajax" # http://domain.com/dajax/
#DAJAXICE_CACHE_CONTROL = 10 * 24 * 60 * 60
INSTALLED_APPS = (
#admin tool apps
'admin_tools',
'admin_tools.theming',
'admin_tools.menu',
'admin_tools.dashboard',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.sites',
'django.contrib.staticfiles',
# 'django.contrib.markup',
'django_countries',
'admin_tools_stats',
'genericadmin',
'mailer',
'south',
'djcelery',
'audiofield',
'tagging',
'adminsortable',
'dajaxice',
'dajax',
'dateutil',
#'pagination',
'linaro_django_pagination',
#'memcache_status',
'country_dialcode',
'common',
'sms',
'sms_module',
'dialer_contact',
'dialer_audio',
'dialer_campaign',
'dialer_cdr',
'dialer_gateway',
'dialer_settings',
'user_profile',
'notification',
'survey',
'dnc',
#'agent',
#'callcenter',
'appointment',
'mod_mailer',
#'raven.contrib.django',
'frontend_notification',
'django_nvd3',
'rest_framework',
'rest_framework.authtoken',
'corsheaders',
'djangobower',
'activelink',
'bootstrap3_datetime',
)
# Django extensions
try:
import gunicorn
except ImportError:
pass
else:
INSTALLED_APPS = INSTALLED_APPS + ('gunicorn',)
# Redisboard
try:
import redisboard
except ImportError:
pass
else:
INSTALLED_APPS = INSTALLED_APPS + ('redisboard',)
# Debug Toolbar
try:
import debug_toolbar
except ImportError:
pass
else:
INSTALLED_APPS = INSTALLED_APPS + ('debug_toolbar', )
#INSTALLED_APPS = INSTALLED_APPS + ('debug_toolbar', 'template_timings_panel',)
MIDDLEWARE_CLASSES = MIDDLEWARE_CLASSES + \
('debug_toolbar.middleware.DebugToolbarMiddleware',)
DEBUG_TOOLBAR_PANELS = [
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
# StaticFilesPanel broken https://github.com/django-debug-toolbar/django-debug-toolbar/issues/503
# 'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.redirects.RedirectsPanel',
#'template_timings_panel.panels.TemplateTimings.TemplateTimings',
]
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
'HIDE_DJANGO_SQL': False,
'ENABLE_STACKTRACES': True,
'SQL_WARNING_THRESHOLD': 100, # milliseconds
}
DEBUG_TOOLBAR_PATCH_SETTINGS = False
# Django extensions
try:
import django_extensions
except ImportError:
pass
else:
INSTALLED_APPS = INSTALLED_APPS + ('django_extensions',)
# Nose
try:
import nose
except ImportError:
pass
else:
INSTALLED_APPS = INSTALLED_APPS + ('django_nose',)
TEST_RUNNER = 'utils.test_runner.MyRunner'
# Dilla
try:
import django_dilla
except ImportError:
pass
else:
INSTALLED_APPS = INSTALLED_APPS + ('dilla',)
#No of records per page
#=======================
PAGE_SIZE = 10
# AUTH MODULE SETTINGS
AUTH_PROFILE_MODULE = 'user_profile.UserProfile'
#AUTH_USER_MODEL = 'user_profile.UserProfile'
LOGIN_REDIRECT_URL = '/'
LOGIN_URL = '/pleaselog/'
#DILLA SETTINGS
#==============
DICTIONARY = "/usr/share/dict/words"
DILLA_USE_LOREM_IPSUM = False # set to True ignores dictionary
DILLA_APPS = [
'auth',
#'dialer_gateway',
'voip_app',
'dialer_campaign',
'dialer_cdr',
]
DILLA_SPAMLIBS = [
#'voip_app.voip_app_custom_spamlib',
#'dialer_campaign.dialer_campaign_custom_spamlib',
'dialer_cdr.dialer_cdr_custom_spamlib',
]
# To use Dilla
# > python manage.py run_dilla --cycles=100
#MEMCACHE
#========
#CACHES = {
# 'default': {
# 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
# 'LOCATION': '127.0.0.1:11211',
# 'KEY_PREFIX': 'newfies_',
# }
#}
#REST FRAMEWORK
#==============
REST_FRAMEWORK = {
#'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.IsAdminUser',),
'PAGINATE_BY': 10,
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
#'rest_framework.permissions.DjangoModelPermissions',
),
#'DEFAULT_THROTTLE_CLASSES': (
# 'rest_framework.throttling.SimpleRateThrottle',
#),
#'DEFAULT_THROTTLE_RATES': {
# 'anon': '100/day',
# 'user': '1000/day'
#}
}
#REDIS-CACHE
#===========
CACHES = {
'default': {
'BACKEND': 'redis_cache.RedisCache',
'LOCATION': '127.0.0.1:6379',
#'OPTIONS': {
# 'DB': 1,
# 'PASSWORD': 'yadayada',
# 'PARSER_CLASS': 'redis.connection.HiredisParser'
#},
},
}
#CELERY SETTINGS
#===============
## Broker settings
BROKER_URL = "redis://localhost:6379/0"
#BROKER_URL = 'amqp://guest:guest@localhost:5672//'
REDIS_HOST = 'localhost'
REDIS_PORT = 6379
REDIS_DB = 0
#REDIS_CONNECT_RETRY = True
## Using the database to store task state and results.
CELERY_RESULT_BACKEND = "redis://localhost:6379/0"
CELERY_TASK_RESULT_EXPIRES = 18000 # 5 hours.
#CELERY_REDIS_CONNECT_RETRY = True
CELERY_TIMEZONE = 'Europe/Madrid'
CELERY_ENABLE_UTC = True
REDIS_DB = 0
# REDIS_CONNECT_RETRY = True
CELERY_DEFAULT_QUEUE = 'newfies'
CELERY_DEFAULT_EXCHANGE = "newfies_tasks"
CELERY_DEFAULT_EXCHANGE_TYPE = "topic"
CELERY_DEFAULT_ROUTING_KEY = "task.newfies"
CELERY_QUEUES = {
'newfies': {
'binding_key': '#',
},
}
from kombu import Queue
CELERY_DEFAULT_QUEUE = 'default'
#Define list of Queues and their routing keys
CELERY_QUEUES = (
Queue('default', routing_key='task.#'),
Queue('sms_tasks', routing_key='sms_module.#'),
Queue('appointment', routing_key='appointment.#'),
)
CELERY_DEFAULT_EXCHANGE = 'tasks'
CELERY_DEFAULT_EXCHANGE_TYPE = 'topic'
CELERY_DEFAULT_ROUTING_KEY = 'task.default'
# python manage.py celeryd -EB -l info --purge --queue=sms_tasks
# Define tasks and which queue they will use with their routing key
CELERY_ROUTES = {
'sms_module.tasks.sms_campaign_running': {
'queue': 'sms_tasks',
'routing_key': 'sms_module.sms_campaign_running',
},
}
"""
from datetime import timedelta
from celery.schedules import crontab
CELERYBEAT_SCHEDULE = {
"runs-every-second": {
"task": "dialer_campaign.tasks.campaign_running",
"schedule": timedelta(seconds=1),
#"args": (50)
},
}
"""
#LANGUAGES
#===========
gettext = lambda s: s
LANGUAGES = (
('en', gettext('English')),
('fr', gettext('French')),
('es', gettext('Spanish')),
('pt', gettext('Portuguese')),
('zh', gettext('Chinese')),
('tr', gettext('Turkish')),
('ja', gettext('Japanese')),
)
LOCALE_PATHS = (
os.path.join(APPLICATION_DIR, 'locale'),
)
LANGUAGE_COOKIE_NAME = 'newfies_dialer_language'
#DJANGO-ADMIN-TOOL
#=================
ADMIN_TOOLS_MENU = 'custom_admin_tools.menu.CustomMenu'
ADMIN_TOOLS_INDEX_DASHBOARD = \
'custom_admin_tools.dashboard.CustomIndexDashboard'
ADMIN_TOOLS_APP_INDEX_DASHBOARD = \
'custom_admin_tools.dashboard.CustomAppIndexDashboard'
ADMIN_MEDIA_PREFIX = '/static/admin/'
#EMAIL BACKEND
#=============
# Use only in Debug mode. Not in production
#EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
MAILER_EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# ADD 'dummy','plivo','twilio','esl'
NEWFIES_DIALER_ENGINE = 'esl'
#TASTYPIE API
#============
API_ALLOWED_IP = ['127.0.0.1', 'localhost']
#SENTRY SETTINGS
#===============
#SENTRY_DSN = 'http://asdada:asdasd@localhost:9000/1'
#DIALER
#======
MAX_CALLS_PER_SECOND = 20 # By default configured to 20 calls per second
# Frontend widget values
CHANNEL_TYPE_VALUE = 1 # 0-Keep original, 1-Mono, 2-Stereo
# 0-Keep original, 8000-8000Hz, 16000-16000Hz, 22050-22050Hz,
# 44100-44100Hz, 48000-48000Hz, 96000-96000Hz
FREQ_TYPE_VALUE = 8000
# 0-Keep original, 1-Convert to MP3, 2-Convert to WAV, 3-Convert to OGG
CONVERT_TYPE_VALUE = 2
AUDIO_DEBUG = False
#ESL
#===
ESL_HOSTNAME = '127.0.0.1'
ESL_PORT = '8021'
ESL_SECRET = 'ClueCon'
ESL_SCRIPT = '&lua(/usr/share/newfies-lua/newfies.lua)'
#TEXT-TO-SPEECH
#==============
TTS_ENGINE = 'FLITE' # FLITE, CEPSTRAL, ACAPELA
ACCOUNT_LOGIN = 'EVAL_XXXX'
APPLICATION_LOGIN = 'EVAL_XXXXXXX'
APPLICATION_PASSWORD = 'XXXXXXXX'
SERVICE_URL = 'http://vaas.acapela-group.com/Services/Synthesizer'
QUALITY = '22k' # 22k, 8k, 8ka, 8kmu
ACAPELA_GENDER = 'W'
ACAPELA_INTONATION = 'NORMAL'
#DEBUG DIALER
#============
DIALERDEBUG = False
DIALERDEBUG_PHONENUMBER = 1000
#Survey in dev
#=============
SURVEYDEV = False
AMD = False
#Demo mode
#=========
#This will disable certain save, to avoid changing password
DEMO_MODE = False
#IPYTHON
#=======
IPYTHON_ARGUMENTS = [
'--ext', 'django_extensions.management.notebook_extension',
'--profile=nbserver',
'--debug'
]
#GENERAL
#=======
# PREFIX_LIMIT_MIN & PREFIX_LIMIT_MAX are used to know
# how many digits are used to match against the dialcode prefix database
PREFIX_LIMIT_MIN = 2
PREFIX_LIMIT_MAX = 5
# List of phonenumber prefix to ignore, this will be remove prior analysis
PREFIX_TO_IGNORE = "+,0,00,000,0000,00000,011,55555,99999"
#CORS (Cross-Origin Resource Sharing)
#====================================
#if True, the whitelist will not be used and all origins will be accepted
CORS_ORIGIN_ALLOW_ALL = True
#specify a list of origin hostnames that are authorized to make a cross-site HTTP request
#CORS_ORIGIN_WHITELIST = ()
#specify a regex list of origin hostnames that are authorized to make a cross-site HTTP request
#CORS_ORIGIN_REGEX_WHITELIST = ('^http?://(\w+\.)?google\.com$', )
#specify the allowed HTTP methods that can be used when making the actual request
CORS_ALLOW_METHODS = (
'GET',
'POST',
'PUT',
'PATCH',
'DELETE',
'OPTIONS'
)
#specify which non-standard HTTP headers can be used when making the actual request
CORS_ALLOW_HEADERS = (
'x-requested-with',
'content-type',
'accept',
'origin',
'authorization',
'x-csrftoken',
)
CORS_ORIGIN_WHITELIST = (
'hostname.example.com',
)
#specify which HTTP headers are to be exposed to the browser
CORS_EXPOSE_HEADERS = ()
#specify whether or not cookies are allowed to be included
CORS_ALLOW_CREDENTIALS = False
# Django-bower
# ------------
# Specifie path to components root (you need to use absolute path)
BOWER_COMPONENTS_ROOT = os.path.join(APPLICATION_DIR, 'components')
BOWER_PATH = '/usr/bin/bower'
BOWER_INSTALLED_APPS = (
'jquery#2.0.3',
'jquery-ui#~1.10.3',
'bootstrap#3.0.3',
'bootstrap-switch#2.0.0',
'bootbox#4.1.0',
'd3#3.3.6',
'nvd3#1.1.12-beta',
'components-font-awesome#4.0.3',
)
#Need to build documentation with Django 1.6
LOGGING_CONFIG = None
# DAJAXICE setting
# Not Include XmlHttpRequest.js inside dajaxice.core.js
DAJAXICE_XMLHTTPREQUEST_JS_IMPORT = False
#IMPORT LOCAL SETTINGS
#=====================
try:
from settings_local import *
except ImportError:
pass
| mpl-2.0 |
miptliot/edx-platform | common/djangoapps/third_party_auth/migrations/0001_initial.py | 11 | 13948 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.db.models.deletion
import provider.utils
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('oauth2', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='LTIProviderConfig',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('change_date', models.DateTimeField(auto_now_add=True, verbose_name='Change date')),
('enabled', models.BooleanField(default=False, verbose_name='Enabled')),
('icon_class', models.CharField(default=b'fa-sign-in', help_text=b'The Font Awesome (or custom) icon class to use on the login button for this provider. Examples: fa-google-plus, fa-facebook, fa-linkedin, fa-sign-in, fa-university', max_length=50)),
('name', models.CharField(help_text=b'Name of this provider (shown to users)', max_length=50)),
('secondary', models.BooleanField(default=False, help_text='Secondary providers are displayed less prominently, in a separate list of "Institution" login providers.')),
('skip_registration_form', models.BooleanField(default=False, help_text='If this option is enabled, users will not be asked to confirm their details (name, email, etc.) during the registration process. Only select this option for trusted providers that are known to provide accurate user information.')),
('skip_email_verification', models.BooleanField(default=False, help_text='If this option is selected, users will not be required to confirm their email, and their account will be activated immediately upon registration.')),
('lti_consumer_key', models.CharField(help_text=b'The name that the LTI Tool Consumer will use to identify itself', max_length=255)),
('lti_hostname', models.CharField(default=b'localhost', help_text=b'The domain that will be acting as the LTI consumer.', max_length=255, db_index=True)),
('lti_consumer_secret', models.CharField(default=provider.utils.long_token, help_text=b'The shared secret that the LTI Tool Consumer will use to authenticate requests. Only this edX instance and this tool consumer instance should know this value. For increased security, you can avoid storing this in your database by leaving this field blank and setting SOCIAL_AUTH_LTI_CONSUMER_SECRETS = {"consumer key": "secret", ...} in your instance\'s Django setttigs (or lms.auth.json)', max_length=255, blank=True)),
('lti_max_timestamp_age', models.IntegerField(default=10, help_text=b'The maximum age of oauth_timestamp values, in seconds.')),
('changed_by', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='Changed by')),
],
options={
'verbose_name': 'Provider Configuration (LTI)',
'verbose_name_plural': 'Provider Configuration (LTI)',
},
),
migrations.CreateModel(
name='OAuth2ProviderConfig',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('change_date', models.DateTimeField(auto_now_add=True, verbose_name='Change date')),
('enabled', models.BooleanField(default=False, verbose_name='Enabled')),
('icon_class', models.CharField(default=b'fa-sign-in', help_text=b'The Font Awesome (or custom) icon class to use on the login button for this provider. Examples: fa-google-plus, fa-facebook, fa-linkedin, fa-sign-in, fa-university', max_length=50)),
('name', models.CharField(help_text=b'Name of this provider (shown to users)', max_length=50)),
('secondary', models.BooleanField(default=False, help_text='Secondary providers are displayed less prominently, in a separate list of "Institution" login providers.')),
('skip_registration_form', models.BooleanField(default=False, help_text='If this option is enabled, users will not be asked to confirm their details (name, email, etc.) during the registration process. Only select this option for trusted providers that are known to provide accurate user information.')),
('skip_email_verification', models.BooleanField(default=False, help_text='If this option is selected, users will not be required to confirm their email, and their account will be activated immediately upon registration.')),
('backend_name', models.CharField(help_text=b'Which python-social-auth OAuth2 provider backend to use. The list of backend choices is determined by the THIRD_PARTY_AUTH_BACKENDS setting.', max_length=50, db_index=True)),
('key', models.TextField(verbose_name=b'Client ID', blank=True)),
('secret', models.TextField(help_text=b'For increased security, you can avoid storing this in your database by leaving this field blank and setting SOCIAL_AUTH_OAUTH_SECRETS = {"(backend name)": "secret", ...} in your instance\'s Django settings (or lms.auth.json)', verbose_name=b'Client Secret', blank=True)),
('other_settings', models.TextField(help_text=b'Optional JSON object with advanced settings, if any.', blank=True)),
('changed_by', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='Changed by')),
],
options={
'verbose_name': 'Provider Configuration (OAuth)',
'verbose_name_plural': 'Provider Configuration (OAuth)',
},
),
migrations.CreateModel(
name='ProviderApiPermissions',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('provider_id', models.CharField(help_text=b'Uniquely identify a provider. This is different from backend_name.', max_length=255)),
('client', models.ForeignKey(to='oauth2.Client')),
],
options={
'verbose_name': 'Provider API Permission',
'verbose_name_plural': 'Provider API Permissions',
},
),
migrations.CreateModel(
name='SAMLConfiguration',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('change_date', models.DateTimeField(auto_now_add=True, verbose_name='Change date')),
('enabled', models.BooleanField(default=False, verbose_name='Enabled')),
('private_key', models.TextField(help_text=b'To generate a key pair as two files, run "openssl req -new -x509 -days 3652 -nodes -out saml.crt -keyout saml.key". Paste the contents of saml.key here. For increased security, you can avoid storing this in your database by leaving this field blank and setting it via the SOCIAL_AUTH_SAML_SP_PRIVATE_KEY setting in your instance\'s Django settings (or lms.auth.json).', blank=True)),
('public_key', models.TextField(help_text=b"Public key certificate. For increased security, you can avoid storing this in your database by leaving this field blank and setting it via the SOCIAL_AUTH_SAML_SP_PUBLIC_CERT setting in your instance's Django settings (or lms.auth.json).", blank=True)),
('entity_id', models.CharField(default=b'http://saml.example.com', max_length=255, verbose_name=b'Entity ID')),
('org_info_str', models.TextField(default=b'{"en-US": {"url": "http://www.example.com", "displayname": "Example Inc.", "name": "example"}}', help_text=b"JSON dictionary of 'url', 'displayname', and 'name' for each language", verbose_name=b'Organization Info')),
('other_config_str', models.TextField(default=b'{\n"SECURITY_CONFIG": {"metadataCacheDuration": 604800, "signMetadata": false}\n}', help_text=b'JSON object defining advanced settings that are passed on to python-saml. Valid keys that can be set here include: SECURITY_CONFIG and SP_EXTRA')),
('changed_by', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='Changed by')),
],
options={
'verbose_name': 'SAML Configuration',
'verbose_name_plural': 'SAML Configuration',
},
),
migrations.CreateModel(
name='SAMLProviderConfig',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('change_date', models.DateTimeField(auto_now_add=True, verbose_name='Change date')),
('enabled', models.BooleanField(default=False, verbose_name='Enabled')),
('icon_class', models.CharField(default=b'fa-sign-in', help_text=b'The Font Awesome (or custom) icon class to use on the login button for this provider. Examples: fa-google-plus, fa-facebook, fa-linkedin, fa-sign-in, fa-university', max_length=50)),
('name', models.CharField(help_text=b'Name of this provider (shown to users)', max_length=50)),
('secondary', models.BooleanField(default=False, help_text='Secondary providers are displayed less prominently, in a separate list of "Institution" login providers.')),
('skip_registration_form', models.BooleanField(default=False, help_text='If this option is enabled, users will not be asked to confirm their details (name, email, etc.) during the registration process. Only select this option for trusted providers that are known to provide accurate user information.')),
('skip_email_verification', models.BooleanField(default=False, help_text='If this option is selected, users will not be required to confirm their email, and their account will be activated immediately upon registration.')),
('backend_name', models.CharField(default=b'tpa-saml', help_text=b"Which python-social-auth provider backend to use. 'tpa-saml' is the standard edX SAML backend.", max_length=50)),
('idp_slug', models.SlugField(help_text=b'A short string uniquely identifying this provider. Cannot contain spaces and should be a usable as a CSS class. Examples: "ubc", "mit-staging"', max_length=30)),
('entity_id', models.CharField(help_text=b'Example: https://idp.testshib.org/idp/shibboleth', max_length=255, verbose_name=b'Entity ID')),
('metadata_source', models.CharField(help_text=b"URL to this provider's XML metadata. Should be an HTTPS URL. Example: https://www.testshib.org/metadata/testshib-providers.xml", max_length=255)),
('attr_user_permanent_id', models.CharField(help_text=b'URN of the SAML attribute that we can use as a unique, persistent user ID. Leave blank for default.', max_length=128, verbose_name=b'User ID Attribute', blank=True)),
('attr_full_name', models.CharField(help_text=b"URN of SAML attribute containing the user's full name. Leave blank for default.", max_length=128, verbose_name=b'Full Name Attribute', blank=True)),
('attr_first_name', models.CharField(help_text=b"URN of SAML attribute containing the user's first name. Leave blank for default.", max_length=128, verbose_name=b'First Name Attribute', blank=True)),
('attr_last_name', models.CharField(help_text=b"URN of SAML attribute containing the user's last name. Leave blank for default.", max_length=128, verbose_name=b'Last Name Attribute', blank=True)),
('attr_username', models.CharField(help_text=b'URN of SAML attribute to use as a suggested username for this user. Leave blank for default.', max_length=128, verbose_name=b'Username Hint Attribute', blank=True)),
('attr_email', models.CharField(help_text=b"URN of SAML attribute containing the user's email address[es]. Leave blank for default.", max_length=128, verbose_name=b'Email Attribute', blank=True)),
('other_settings', models.TextField(help_text=b'For advanced use cases, enter a JSON object with addtional configuration. The tpa-saml backend supports only {"requiredEntitlements": ["urn:..."]} which can be used to require the presence of a specific eduPersonEntitlement.', verbose_name=b'Advanced settings', blank=True)),
('changed_by', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='Changed by')),
],
options={
'verbose_name': 'Provider Configuration (SAML IdP)',
'verbose_name_plural': 'Provider Configuration (SAML IdPs)',
},
),
migrations.CreateModel(
name='SAMLProviderData',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('fetched_at', models.DateTimeField(db_index=True)),
('expires_at', models.DateTimeField(null=True, db_index=True)),
('entity_id', models.CharField(max_length=255, db_index=True)),
('sso_url', models.URLField(verbose_name=b'SSO URL')),
('public_key', models.TextField()),
],
options={
'ordering': ('-fetched_at',),
'verbose_name': 'SAML Provider Data',
'verbose_name_plural': 'SAML Provider Data',
},
),
]
| agpl-3.0 |
drm00/beets | beets/importer.py | 14 | 53270 | # This file is part of beets.
# Copyright 2015, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from __future__ import (division, absolute_import, print_function,
unicode_literals)
"""Provides the basic, interface-agnostic workflow for importing and
autotagging music files.
"""
import os
import re
import pickle
import itertools
from collections import defaultdict
from tempfile import mkdtemp
from bisect import insort, bisect_left
from contextlib import contextmanager
import shutil
import time
from beets import logging
from beets import autotag
from beets import library
from beets import dbcore
from beets import plugins
from beets import util
from beets import config
from beets.util import pipeline, sorted_walk, ancestry
from beets.util import syspath, normpath, displayable_path
from enum import Enum
from beets import mediafile
action = Enum('action',
['SKIP', 'ASIS', 'TRACKS', 'MANUAL', 'APPLY', 'MANUAL_ID',
'ALBUMS'])
QUEUE_SIZE = 128
SINGLE_ARTIST_THRESH = 0.25
VARIOUS_ARTISTS = u'Various Artists'
PROGRESS_KEY = 'tagprogress'
HISTORY_KEY = 'taghistory'
# Global logger.
log = logging.getLogger('beets')
class ImportAbort(Exception):
"""Raised when the user aborts the tagging operation.
"""
pass
# Utilities.
def _open_state():
"""Reads the state file, returning a dictionary."""
try:
with open(config['statefile'].as_filename()) as f:
return pickle.load(f)
except Exception as exc:
# The `pickle` module can emit all sorts of exceptions during
# unpickling, including ImportError. We use a catch-all
# exception to avoid enumerating them all (the docs don't even have a
# full list!).
log.debug(u'state file could not be read: {0}', exc)
return {}
def _save_state(state):
"""Writes the state dictionary out to disk."""
try:
with open(config['statefile'].as_filename(), 'w') as f:
pickle.dump(state, f)
except IOError as exc:
log.error(u'state file could not be written: {0}', exc)
# Utilities for reading and writing the beets progress file, which
# allows long tagging tasks to be resumed when they pause (or crash).
def progress_read():
state = _open_state()
return state.setdefault(PROGRESS_KEY, {})
@contextmanager
def progress_write():
state = _open_state()
progress = state.setdefault(PROGRESS_KEY, {})
yield progress
_save_state(state)
def progress_add(toppath, *paths):
"""Record that the files under all of the `paths` have been imported
under `toppath`.
"""
with progress_write() as state:
imported = state.setdefault(toppath, [])
for path in paths:
# Normally `progress_add` will be called with the path
# argument increasing. This is because of the ordering in
# `albums_in_dir`. We take advantage of that to make the
# code faster
if imported and imported[len(imported) - 1] <= path:
imported.append(path)
else:
insort(imported, path)
def progress_element(toppath, path):
"""Return whether `path` has been imported in `toppath`.
"""
state = progress_read()
if toppath not in state:
return False
imported = state[toppath]
i = bisect_left(imported, path)
return i != len(imported) and imported[i] == path
def has_progress(toppath):
"""Return `True` if there exist paths that have already been
imported under `toppath`.
"""
state = progress_read()
return toppath in state
def progress_reset(toppath):
with progress_write() as state:
if toppath in state:
del state[toppath]
# Similarly, utilities for manipulating the "incremental" import log.
# This keeps track of all directories that were ever imported, which
# allows the importer to only import new stuff.
def history_add(paths):
"""Indicate that the import of the album in `paths` is completed and
should not be repeated in incremental imports.
"""
state = _open_state()
if HISTORY_KEY not in state:
state[HISTORY_KEY] = set()
state[HISTORY_KEY].add(tuple(paths))
_save_state(state)
def history_get():
"""Get the set of completed path tuples in incremental imports.
"""
state = _open_state()
if HISTORY_KEY not in state:
return set()
return state[HISTORY_KEY]
# Abstract session class.
class ImportSession(object):
"""Controls an import action. Subclasses should implement methods to
communicate with the user or otherwise make decisions.
"""
def __init__(self, lib, loghandler, paths, query):
"""Create a session. `lib` is a Library object. `loghandler` is a
logging.Handler. Either `paths` or `query` is non-null and indicates
the source of files to be imported.
"""
self.lib = lib
self.logger = self._setup_logging(loghandler)
self.paths = paths
self.query = query
self.seen_idents = set()
self._is_resuming = dict()
# Normalize the paths.
if self.paths:
self.paths = map(normpath, self.paths)
def _setup_logging(self, loghandler):
logger = logging.getLogger(__name__)
logger.propagate = False
if not loghandler:
loghandler = logging.NullHandler()
logger.handlers = [loghandler]
return logger
def set_config(self, config):
"""Set `config` property from global import config and make
implied changes.
"""
# FIXME: Maybe this function should not exist and should instead
# provide "decision wrappers" like "should_resume()", etc.
iconfig = dict(config)
self.config = iconfig
# Incremental and progress are mutually exclusive.
if iconfig['incremental']:
iconfig['resume'] = False
# When based on a query instead of directories, never
# save progress or try to resume.
if self.query is not None:
iconfig['resume'] = False
iconfig['incremental'] = False
# Copy, move, and link are mutually exclusive.
if iconfig['move']:
iconfig['copy'] = False
iconfig['link'] = False
elif iconfig['link']:
iconfig['copy'] = False
iconfig['move'] = False
# Only delete when copying.
if not iconfig['copy']:
iconfig['delete'] = False
self.want_resume = config['resume'].as_choice([True, False, 'ask'])
def tag_log(self, status, paths):
"""Log a message about a given album to the importer log. The status
should reflect the reason the album couldn't be tagged.
"""
self.logger.info(u'{0} {1}', status, displayable_path(paths))
def log_choice(self, task, duplicate=False):
"""Logs the task's current choice if it should be logged. If
``duplicate``, then this is a secondary choice after a duplicate was
detected and a decision was made.
"""
paths = task.paths
if duplicate:
# Duplicate: log all three choices (skip, keep both, and trump).
if task.should_remove_duplicates:
self.tag_log('duplicate-replace', paths)
elif task.choice_flag in (action.ASIS, action.APPLY):
self.tag_log('duplicate-keep', paths)
elif task.choice_flag is (action.SKIP):
self.tag_log('duplicate-skip', paths)
else:
# Non-duplicate: log "skip" and "asis" choices.
if task.choice_flag is action.ASIS:
self.tag_log('asis', paths)
elif task.choice_flag is action.SKIP:
self.tag_log('skip', paths)
def should_resume(self, path):
raise NotImplementedError
def choose_match(self, task):
raise NotImplementedError
def resolve_duplicate(self, task, found_duplicates):
raise NotImplementedError
def choose_item(self, task):
raise NotImplementedError
def run(self):
"""Run the import task.
"""
self.logger.info(u'import started {0}', time.asctime())
self.set_config(config['import'])
# Set up the pipeline.
if self.query is None:
stages = [read_tasks(self)]
else:
stages = [query_tasks(self)]
# In pretend mode, just log what would otherwise be imported.
if self.config['pretend']:
stages += [log_files(self)]
else:
if self.config['group_albums'] and \
not self.config['singletons']:
# Split directory tasks into one task for each album.
stages += [group_albums(self)]
if self.config['autotag']:
stages += [lookup_candidates(self), user_query(self)]
else:
stages += [import_asis(self)]
stages += [apply_choices(self)]
# Plugin stages.
for stage_func in plugins.import_stages():
stages.append(plugin_stage(self, stage_func))
stages += [manipulate_files(self)]
pl = pipeline.Pipeline(stages)
# Run the pipeline.
plugins.send('import_begin', session=self)
try:
if config['threaded']:
pl.run_parallel(QUEUE_SIZE)
else:
pl.run_sequential()
except ImportAbort:
# User aborted operation. Silently stop.
pass
# Incremental and resumed imports
def already_imported(self, toppath, paths):
"""Returns true if the files belonging to this task have already
been imported in a previous session.
"""
if self.is_resuming(toppath) \
and all(map(lambda p: progress_element(toppath, p), paths)):
return True
if self.config['incremental'] \
and tuple(paths) in self.history_dirs:
return True
return False
@property
def history_dirs(self):
if not hasattr(self, '_history_dirs'):
self._history_dirs = history_get()
return self._history_dirs
def is_resuming(self, toppath):
"""Return `True` if user wants to resume import of this path.
You have to call `ask_resume` first to determine the return value.
"""
return self._is_resuming.get(toppath, False)
def ask_resume(self, toppath):
"""If import of `toppath` was aborted in an earlier session, ask
user if she wants to resume the import.
Determines the return value of `is_resuming(toppath)`.
"""
if self.want_resume and has_progress(toppath):
# Either accept immediately or prompt for input to decide.
if self.want_resume is True or \
self.should_resume(toppath):
log.warn(u'Resuming interrupted import of {0}',
util.displayable_path(toppath))
self._is_resuming[toppath] = True
else:
# Clear progress; we're starting from the top.
progress_reset(toppath)
# The importer task class.
class BaseImportTask(object):
"""An abstract base class for importer tasks.
Tasks flow through the importer pipeline. Each stage can update
them. """
def __init__(self, toppath, paths, items):
"""Create a task. The primary fields that define a task are:
* `toppath`: The user-specified base directory that contains the
music for this task. If the task has *no* user-specified base
(for example, when importing based on an -L query), this can
be None. This is used for tracking progress and history.
* `paths`: A list of *specific* paths where the music for this task
came from. These paths can be directories, when their entire
contents are being imported, or files, when the task comprises
individual tracks. This is used for progress/history tracking and
for displaying the task to the user.
* `items`: A list of `Item` objects representing the music being
imported.
These fields should not change after initialization.
"""
self.toppath = toppath
self.paths = paths
self.items = items
class ImportTask(BaseImportTask):
"""Represents a single set of items to be imported along with its
intermediate state. May represent an album or a single item.
The import session and stages call the following methods in the
given order.
* `lookup_candidates()` Sets the `common_artist`, `common_album`,
`candidates`, and `rec` attributes. `candidates` is a list of
`AlbumMatch` objects.
* `choose_match()` Uses the session to set the `match` attribute
from the `candidates` list.
* `find_duplicates()` Returns a list of albums from `lib` with the
same artist and album name as the task.
* `apply_metadata()` Sets the attributes of the items from the
task's `match` attribute.
* `add()` Add the imported items and album to the database.
* `manipulate_files()` Copy, move, and write files depending on the
session configuration.
* `finalize()` Update the import progress and cleanup the file
system.
"""
def __init__(self, toppath, paths, items):
super(ImportTask, self).__init__(toppath, paths, items)
self.choice_flag = None
self.cur_album = None
self.cur_artist = None
self.candidates = []
self.rec = None
self.should_remove_duplicates = False
self.is_album = True
def set_choice(self, choice):
"""Given an AlbumMatch or TrackMatch object or an action constant,
indicates that an action has been selected for this task.
"""
# Not part of the task structure:
assert choice not in (action.MANUAL, action.MANUAL_ID)
assert choice != action.APPLY # Only used internally.
if choice in (action.SKIP, action.ASIS, action.TRACKS, action.ALBUMS):
self.choice_flag = choice
self.match = None
else:
self.choice_flag = action.APPLY # Implicit choice.
self.match = choice
def save_progress(self):
"""Updates the progress state to indicate that this album has
finished.
"""
if self.toppath:
progress_add(self.toppath, *self.paths)
def save_history(self):
"""Save the directory in the history for incremental imports.
"""
if self.paths:
history_add(self.paths)
# Logical decisions.
@property
def apply(self):
return self.choice_flag == action.APPLY
@property
def skip(self):
return self.choice_flag == action.SKIP
# Convenient data.
def chosen_ident(self):
"""Returns identifying metadata about the current choice. For
albums, this is an (artist, album) pair. For items, this is
(artist, title). May only be called when the choice flag is ASIS
(in which case the data comes from the files' current metadata)
or APPLY (data comes from the choice).
"""
if self.choice_flag is action.ASIS:
return (self.cur_artist, self.cur_album)
elif self.choice_flag is action.APPLY:
return (self.match.info.artist, self.match.info.album)
def imported_items(self):
"""Return a list of Items that should be added to the library.
If the tasks applies an album match the method only returns the
matched items.
"""
if self.choice_flag == action.ASIS:
return list(self.items)
elif self.choice_flag == action.APPLY:
return self.match.mapping.keys()
else:
assert False
def apply_metadata(self):
"""Copy metadata from match info to the items.
"""
autotag.apply_metadata(self.match.info, self.match.mapping)
def duplicate_items(self, lib):
duplicate_items = []
for album in self.find_duplicates(lib):
duplicate_items += album.items()
return duplicate_items
def remove_duplicates(self, lib):
duplicate_items = self.duplicate_items(lib)
log.debug(u'removing {0} old duplicated items', len(duplicate_items))
for item in duplicate_items:
item.remove()
if lib.directory in util.ancestry(item.path):
log.debug(u'deleting duplicate {0}',
util.displayable_path(item.path))
util.remove(item.path)
util.prune_dirs(os.path.dirname(item.path),
lib.directory)
def finalize(self, session):
"""Save progress, clean up files, and emit plugin event.
"""
# Update progress.
if session.want_resume:
self.save_progress()
if session.config['incremental']:
self.save_history()
self.cleanup(copy=session.config['copy'],
delete=session.config['delete'],
move=session.config['move'])
if not self.skip:
self._emit_imported(session.lib)
def cleanup(self, copy=False, delete=False, move=False):
"""Remove and prune imported paths.
"""
# Do not delete any files or prune directories when skipping.
if self.skip:
return
items = self.imported_items()
# When copying and deleting originals, delete old files.
if copy and delete:
new_paths = [os.path.realpath(item.path) for item in items]
for old_path in self.old_paths:
# Only delete files that were actually copied.
if old_path not in new_paths:
util.remove(syspath(old_path), False)
self.prune(old_path)
# When moving, prune empty directories containing the original files.
elif move:
for old_path in self.old_paths:
self.prune(old_path)
def _emit_imported(self, lib):
plugins.send('album_imported', lib=lib, album=self.album)
def handle_created(self, session):
"""Send the `import_task_created` event for this task. Return a list of
tasks that should continue through the pipeline. By default, this is a
list containing only the task itself, but plugins can replace the task
with new ones.
"""
tasks = plugins.send('import_task_created', session=session, task=self)
if not tasks:
tasks = [self]
else:
# The plugins gave us a list of lists of tasks. Flatten it.
tasks = [t for inner in tasks for t in inner]
return tasks
def lookup_candidates(self):
"""Retrieve and store candidates for this album.
"""
artist, album, candidates, recommendation = \
autotag.tag_album(self.items)
self.cur_artist = artist
self.cur_album = album
self.candidates = candidates
self.rec = recommendation
def find_duplicates(self, lib):
"""Return a list of albums from `lib` with the same artist and
album name as the task.
"""
artist, album = self.chosen_ident()
if artist is None:
# As-is import with no artist. Skip check.
return []
duplicates = []
task_paths = set(i.path for i in self.items if i)
duplicate_query = dbcore.AndQuery((
dbcore.MatchQuery('albumartist', artist),
dbcore.MatchQuery('album', album),
))
for album in lib.albums(duplicate_query):
# Check whether the album is identical in contents, in which
# case it is not a duplicate (will be replaced).
album_paths = set(i.path for i in album.items())
if album_paths != task_paths:
duplicates.append(album)
return duplicates
def align_album_level_fields(self):
"""Make the some album fields equal across `self.items`
"""
changes = {}
if self.choice_flag == action.ASIS:
# Taking metadata "as-is". Guess whether this album is VA.
plur_albumartist, freq = util.plurality(
[i.albumartist or i.artist for i in self.items]
)
if freq == len(self.items) or \
(freq > 1 and
float(freq) / len(self.items) >= SINGLE_ARTIST_THRESH):
# Single-artist album.
changes['albumartist'] = plur_albumartist
changes['comp'] = False
else:
# VA.
changes['albumartist'] = VARIOUS_ARTISTS
changes['comp'] = True
elif self.choice_flag == action.APPLY:
# Applying autotagged metadata. Just get AA from the first
# item.
if not self.items[0].albumartist:
changes['albumartist'] = self.items[0].artist
if not self.items[0].mb_albumartistid:
changes['mb_albumartistid'] = self.items[0].mb_artistid
# Apply new metadata.
for item in self.items:
item.update(changes)
def manipulate_files(self, move=False, copy=False, write=False,
link=False, session=None):
items = self.imported_items()
# Save the original paths of all items for deletion and pruning
# in the next step (finalization).
self.old_paths = [item.path for item in items]
for item in items:
if move or copy or link:
# In copy and link modes, treat re-imports specially:
# move in-library files. (Out-of-library files are
# copied/moved as usual).
old_path = item.path
if (copy or link) and self.replaced_items[item] and \
session.lib.directory in util.ancestry(old_path):
item.move()
# We moved the item, so remove the
# now-nonexistent file from old_paths.
self.old_paths.remove(old_path)
else:
# A normal import. Just copy files and keep track of
# old paths.
item.move(copy, link)
if write and self.apply:
item.try_write()
with session.lib.transaction():
for item in self.imported_items():
item.store()
plugins.send('import_task_files', session=session, task=self)
def add(self, lib):
"""Add the items as an album to the library and remove replaced items.
"""
self.align_album_level_fields()
with lib.transaction():
self.record_replaced(lib)
self.remove_replaced(lib)
self.album = lib.add_album(self.imported_items())
self.reimport_metadata(lib)
def record_replaced(self, lib):
"""Records the replaced items and albums in the `replaced_items`
and `replaced_albums` dictionaries.
"""
self.replaced_items = defaultdict(list)
self.replaced_albums = defaultdict(list)
replaced_album_ids = set()
for item in self.imported_items():
dup_items = list(lib.items(
dbcore.query.BytesQuery('path', item.path)
))
self.replaced_items[item] = dup_items
for dup_item in dup_items:
if (not dup_item.album_id or
dup_item.album_id in replaced_album_ids):
continue
replaced_album = dup_item.get_album()
if replaced_album:
replaced_album_ids.add(dup_item.album_id)
self.replaced_albums[replaced_album.path] = replaced_album
def reimport_metadata(self, lib):
"""For reimports, preserves metadata for reimported items and
albums.
"""
if self.is_album:
replaced_album = self.replaced_albums.get(self.album.path)
if replaced_album:
self.album.added = replaced_album.added
self.album.update(replaced_album._values_flex)
self.album.store()
log.debug(
u'Reimported album: added {0}, flexible '
u'attributes {1} from album {2} for {3}',
self.album.added,
replaced_album._values_flex.keys(),
replaced_album.id,
displayable_path(self.album.path)
)
for item in self.imported_items():
dup_items = self.replaced_items[item]
for dup_item in dup_items:
if dup_item.added and dup_item.added != item.added:
item.added = dup_item.added
log.debug(
u'Reimported item added {0} '
u'from item {1} for {2}',
item.added,
dup_item.id,
displayable_path(item.path)
)
item.update(dup_item._values_flex)
log.debug(
u'Reimported item flexible attributes {0} '
u'from item {1} for {2}',
dup_item._values_flex.keys(),
dup_item.id,
displayable_path(item.path)
)
item.store()
def remove_replaced(self, lib):
"""Removes all the items from the library that have the same
path as an item from this task.
"""
for item in self.imported_items():
for dup_item in self.replaced_items[item]:
log.debug(u'Replacing item {0}: {1}',
dup_item.id, displayable_path(item.path))
dup_item.remove()
log.debug(u'{0} of {1} items replaced',
sum(bool(l) for l in self.replaced_items.values()),
len(self.imported_items()))
def choose_match(self, session):
"""Ask the session which match should apply and apply it.
"""
choice = session.choose_match(self)
self.set_choice(choice)
session.log_choice(self)
def reload(self):
"""Reload albums and items from the database.
"""
for item in self.imported_items():
item.load()
self.album.load()
# Utilities.
def prune(self, filename):
"""Prune any empty directories above the given file. If this
task has no `toppath` or the file path provided is not within
the `toppath`, then this function has no effect. Similarly, if
the file still exists, no pruning is performed, so it's safe to
call when the file in question may not have been removed.
"""
if self.toppath and not os.path.exists(filename):
util.prune_dirs(os.path.dirname(filename),
self.toppath,
clutter=config['clutter'].as_str_seq())
class SingletonImportTask(ImportTask):
"""ImportTask for a single track that is not associated to an album.
"""
def __init__(self, toppath, item):
super(SingletonImportTask, self).__init__(toppath, [item.path], [item])
self.item = item
self.is_album = False
self.paths = [item.path]
def chosen_ident(self):
assert self.choice_flag in (action.ASIS, action.APPLY)
if self.choice_flag is action.ASIS:
return (self.item.artist, self.item.title)
elif self.choice_flag is action.APPLY:
return (self.match.info.artist, self.match.info.title)
def imported_items(self):
return [self.item]
def apply_metadata(self):
autotag.apply_item_metadata(self.item, self.match.info)
def _emit_imported(self, lib):
for item in self.imported_items():
plugins.send('item_imported', lib=lib, item=item)
def lookup_candidates(self):
candidates, recommendation = autotag.tag_item(self.item)
self.candidates = candidates
self.rec = recommendation
def find_duplicates(self, lib):
"""Return a list of items from `lib` that have the same artist
and title as the task.
"""
artist, title = self.chosen_ident()
found_items = []
query = dbcore.AndQuery((
dbcore.MatchQuery('artist', artist),
dbcore.MatchQuery('title', title),
))
for other_item in lib.items(query):
# Existing items not considered duplicates.
if other_item.path != self.item.path:
found_items.append(other_item)
return found_items
duplicate_items = find_duplicates
def add(self, lib):
with lib.transaction():
self.record_replaced(lib)
self.remove_replaced(lib)
lib.add(self.item)
self.reimport_metadata(lib)
def infer_album_fields(self):
raise NotImplementedError
def choose_match(self, session):
"""Ask the session which match should apply and apply it.
"""
choice = session.choose_item(self)
self.set_choice(choice)
session.log_choice(self)
def reload(self):
self.item.load()
# FIXME The inheritance relationships are inverted. This is why there
# are so many methods which pass. More responsibility should be delegated to
# the BaseImportTask class.
class SentinelImportTask(ImportTask):
"""A sentinel task marks the progress of an import and does not
import any items itself.
If only `toppath` is set the task indicates the end of a top-level
directory import. If the `paths` argument is also given, the task
indicates the progress in the `toppath` import.
"""
def __init__(self, toppath, paths):
super(SentinelImportTask, self).__init__(toppath, paths, ())
# TODO Remove the remaining attributes eventually
self.should_remove_duplicates = False
self.is_album = True
self.choice_flag = None
def save_history(self):
pass
def save_progress(self):
if self.paths is None:
# "Done" sentinel.
progress_reset(self.toppath)
else:
# "Directory progress" sentinel for singletons
progress_add(self.toppath, *self.paths)
def skip(self):
return True
def set_choice(self, choice):
raise NotImplementedError
def cleanup(self, **kwargs):
pass
def _emit_imported(self, session):
pass
class ArchiveImportTask(SentinelImportTask):
"""An import task that represents the processing of an archive.
`toppath` must be a `zip`, `tar`, or `rar` archive. Archive tasks
serve two purposes:
- First, it will unarchive the files to a temporary directory and
return it. The client should read tasks from the resulting
directory and send them through the pipeline.
- Second, it will clean up the temporary directory when it proceeds
through the pipeline. The client should send the archive task
after sending the rest of the music tasks to make this work.
"""
def __init__(self, toppath):
super(ArchiveImportTask, self).__init__(toppath, ())
self.extracted = False
@classmethod
def is_archive(cls, path):
"""Returns true if the given path points to an archive that can
be handled.
"""
if not os.path.isfile(path):
return False
for path_test, _ in cls.handlers():
if path_test(path):
return True
return False
@classmethod
def handlers(cls):
"""Returns a list of archive handlers.
Each handler is a `(path_test, ArchiveClass)` tuple. `path_test`
is a function that returns `True` if the given path can be
handled by `ArchiveClass`. `ArchiveClass` is a class that
implements the same interface as `tarfile.TarFile`.
"""
if not hasattr(cls, '_handlers'):
cls._handlers = []
from zipfile import is_zipfile, ZipFile
cls._handlers.append((is_zipfile, ZipFile))
from tarfile import is_tarfile, TarFile
cls._handlers.append((is_tarfile, TarFile))
try:
from rarfile import is_rarfile, RarFile
except ImportError:
pass
else:
cls._handlers.append((is_rarfile, RarFile))
return cls._handlers
def cleanup(self, **kwargs):
"""Removes the temporary directory the archive was extracted to.
"""
if self.extracted:
log.debug(u'Removing extracted directory: {0}',
displayable_path(self.toppath))
shutil.rmtree(self.toppath)
def extract(self):
"""Extracts the archive to a temporary directory and sets
`toppath` to that directory.
"""
for path_test, handler_class in self.handlers():
if path_test(self.toppath):
break
try:
extract_to = mkdtemp()
archive = handler_class(self.toppath, mode='r')
archive.extractall(extract_to)
finally:
archive.close()
self.extracted = True
self.toppath = extract_to
class ImportTaskFactory(object):
"""Generate album and singleton import tasks for all media files
indicated by a path.
"""
def __init__(self, toppath, session):
"""Create a new task factory.
`toppath` is the user-specified path to search for music to
import. `session` is the `ImportSession`, which controls how
tasks are read from the directory.
"""
self.toppath = toppath
self.session = session
self.skipped = 0 # Skipped due to incremental/resume.
self.imported = 0 # "Real" tasks created.
self.is_archive = ArchiveImportTask.is_archive(syspath(toppath))
def tasks(self):
"""Yield all import tasks for music found in the user-specified
path `self.toppath`. Any necessary sentinel tasks are also
produced.
During generation, update `self.skipped` and `self.imported`
with the number of tasks that were not produced (due to
incremental mode or resumed imports) and the number of concrete
tasks actually produced, respectively.
If `self.toppath` is an archive, it is adjusted to point to the
extracted data.
"""
# Check whether this is an archive.
if self.is_archive:
archive_task = self.unarchive()
if not archive_task:
return
# Search for music in the directory.
for dirs, paths in self.paths():
if self.session.config['singletons']:
for path in paths:
tasks = self._create(self.singleton(path))
for task in tasks:
yield task
yield self.sentinel(dirs)
else:
tasks = self._create(self.album(paths, dirs))
for task in tasks:
yield task
# Produce the final sentinel for this toppath to indicate that
# it is finished. This is usually just a SentinelImportTask, but
# for archive imports, send the archive task instead (to remove
# the extracted directory).
if self.is_archive:
yield archive_task
else:
yield self.sentinel()
def _create(self, task):
"""Handle a new task to be emitted by the factory.
Emit the `import_task_created` event and increment the
`imported` count if the task is not skipped. Return the same
task. If `task` is None, do nothing.
"""
if task:
tasks = task.handle_created(self.session)
self.imported += len(tasks)
return tasks
return []
def paths(self):
"""Walk `self.toppath` and yield `(dirs, files)` pairs where
`files` are individual music files and `dirs` the set of
containing directories where the music was found.
This can either be a recursive search in the ordinary case, a
single track when `toppath` is a file, a single directory in
`flat` mode.
"""
if not os.path.isdir(syspath(self.toppath)):
yield [self.toppath], [self.toppath]
elif self.session.config['flat']:
paths = []
for dirs, paths_in_dir in albums_in_dir(self.toppath):
paths += paths_in_dir
yield [self.toppath], paths
else:
for dirs, paths in albums_in_dir(self.toppath):
yield dirs, paths
def singleton(self, path):
"""Return a `SingletonImportTask` for the music file.
"""
if self.session.already_imported(self.toppath, [path]):
log.debug(u'Skipping previously-imported path: {0}',
displayable_path(path))
self.skipped += 1
return None
item = self.read_item(path)
if item:
return SingletonImportTask(self.toppath, item)
else:
return None
def album(self, paths, dirs=None):
"""Return a `ImportTask` with all media files from paths.
`dirs` is a list of parent directories used to record already
imported albums.
"""
if not paths:
return None
if dirs is None:
dirs = list(set(os.path.dirname(p) for p in paths))
if self.session.already_imported(self.toppath, dirs):
log.debug(u'Skipping previously-imported path: {0}',
displayable_path(dirs))
self.skipped += 1
return None
items = map(self.read_item, paths)
items = [item for item in items if item]
if items:
return ImportTask(self.toppath, dirs, items)
else:
return None
def sentinel(self, paths=None):
"""Return a `SentinelImportTask` indicating the end of a
top-level directory import.
"""
return SentinelImportTask(self.toppath, paths)
def unarchive(self):
"""Extract the archive for this `toppath`.
Extract the archive to a new directory, adjust `toppath` to
point to the extracted directory, and return an
`ArchiveImportTask`. If extraction fails, return None.
"""
assert self.is_archive
if not (self.session.config['move'] or
self.session.config['copy']):
log.warn(u"Archive importing requires either "
"'copy' or 'move' to be enabled.")
return
log.debug(u'Extracting archive: {0}',
displayable_path(self.toppath))
archive_task = ArchiveImportTask(self.toppath)
try:
archive_task.extract()
except Exception as exc:
log.error(u'extraction failed: {0}', exc)
return
# Now read albums from the extracted directory.
self.toppath = archive_task.toppath
log.debug(u'Archive extracted to: {0}', self.toppath)
return archive_task
def read_item(self, path):
"""Return an `Item` read from the path.
If an item cannot be read, return `None` instead and log an
error.
"""
try:
return library.Item.from_path(path)
except library.ReadError as exc:
if isinstance(exc.reason, mediafile.FileTypeError):
# Silently ignore non-music files.
pass
elif isinstance(exc.reason, mediafile.UnreadableFileError):
log.warn(u'unreadable file: {0}', displayable_path(path))
else:
log.error(u'error reading {0}: {1}',
displayable_path(path), exc)
# Full-album pipeline stages.
def read_tasks(session):
"""A generator yielding all the albums (as ImportTask objects) found
in the user-specified list of paths. In the case of a singleton
import, yields single-item tasks instead.
"""
skipped = 0
for toppath in session.paths:
# Check whether we need to resume the import.
session.ask_resume(toppath)
# Generate tasks.
task_factory = ImportTaskFactory(toppath, session)
for t in task_factory.tasks():
yield t
skipped += task_factory.skipped
if not task_factory.imported:
log.warn(u'No files imported from {0}',
displayable_path(toppath))
# Show skipped directories (due to incremental/resume).
if skipped:
log.info(u'Skipped {0} paths.', skipped)
def query_tasks(session):
"""A generator that works as a drop-in-replacement for read_tasks.
Instead of finding files from the filesystem, a query is used to
match items from the library.
"""
if session.config['singletons']:
# Search for items.
for item in session.lib.items(session.query):
task = SingletonImportTask(None, item)
for task in task.handle_created(session):
yield task
else:
# Search for albums.
for album in session.lib.albums(session.query):
log.debug(u'yielding album {0}: {1} - {2}',
album.id, album.albumartist, album.album)
items = list(album.items())
# Clear IDs from re-tagged items so they appear "fresh" when
# we add them back to the library.
for item in items:
item.id = None
item.album_id = None
task = ImportTask(None, [album.item_dir()], items)
for task in task.handle_created(session):
yield task
@pipeline.mutator_stage
def lookup_candidates(session, task):
"""A coroutine for performing the initial MusicBrainz lookup for an
album. It accepts lists of Items and yields
(items, cur_artist, cur_album, candidates, rec) tuples. If no match
is found, all of the yielded parameters (except items) are None.
"""
if task.skip:
# FIXME This gets duplicated a lot. We need a better
# abstraction.
return
plugins.send('import_task_start', session=session, task=task)
log.debug(u'Looking up: {0}', displayable_path(task.paths))
task.lookup_candidates()
@pipeline.stage
def user_query(session, task):
"""A coroutine for interfacing with the user about the tagging
process.
The coroutine accepts an ImportTask objects. It uses the
session's `choose_match` method to determine the `action` for
this task. Depending on the action additional stages are executed
and the processed task is yielded.
It emits the ``import_task_choice`` event for plugins. Plugins have
acces to the choice via the ``taks.choice_flag`` property and may
choose to change it.
"""
if task.skip:
return task
# Ask the user for a choice.
task.choose_match(session)
plugins.send('import_task_choice', session=session, task=task)
# As-tracks: transition to singleton workflow.
if task.choice_flag is action.TRACKS:
# Set up a little pipeline for dealing with the singletons.
def emitter(task):
for item in task.items:
task = SingletonImportTask(task.toppath, item)
for new_task in task.handle_created(session):
yield new_task
yield SentinelImportTask(task.toppath, task.paths)
ipl = pipeline.Pipeline([
emitter(task),
lookup_candidates(session),
user_query(session),
])
return pipeline.multiple(ipl.pull())
# As albums: group items by albums and create task for each album
if task.choice_flag is action.ALBUMS:
ipl = pipeline.Pipeline([
iter([task]),
group_albums(session),
lookup_candidates(session),
user_query(session)
])
return pipeline.multiple(ipl.pull())
resolve_duplicates(session, task)
return task
def resolve_duplicates(session, task):
"""Check if a task conflicts with items or albums already imported
and ask the session to resolve this.
"""
if task.choice_flag in (action.ASIS, action.APPLY):
ident = task.chosen_ident()
found_duplicates = task.find_duplicates(session.lib)
if ident in session.seen_idents or found_duplicates:
session.resolve_duplicate(task, found_duplicates)
session.log_choice(task, True)
session.seen_idents.add(ident)
@pipeline.mutator_stage
def import_asis(session, task):
"""Select the `action.ASIS` choice for all tasks.
This stage replaces the initial_lookup and user_query stages
when the importer is run without autotagging.
"""
if task.skip:
return
log.info('{}', displayable_path(task.paths))
task.set_choice(action.ASIS)
@pipeline.mutator_stage
def apply_choices(session, task):
"""A coroutine for applying changes to albums and singletons during
the autotag process.
"""
if task.skip:
return
# Change metadata.
if task.apply:
task.apply_metadata()
plugins.send('import_task_apply', session=session, task=task)
task.add(session.lib)
@pipeline.mutator_stage
def plugin_stage(session, func, task):
"""A coroutine (pipeline stage) that calls the given function with
each non-skipped import task. These stages occur between applying
metadata changes and moving/copying/writing files.
"""
if task.skip:
return
func(session, task)
# Stage may modify DB, so re-load cached item data.
# FIXME Importer plugins should not modify the database but instead
# the albums and items attached to tasks.
task.reload()
@pipeline.stage
def manipulate_files(session, task):
"""A coroutine (pipeline stage) that performs necessary file
manipulations *after* items have been added to the library and
finalizes each task.
"""
if not task.skip:
if task.should_remove_duplicates:
task.remove_duplicates(session.lib)
task.manipulate_files(
move=session.config['move'],
copy=session.config['copy'],
write=session.config['write'],
link=session.config['link'],
session=session,
)
# Progress, cleanup, and event.
task.finalize(session)
@pipeline.stage
def log_files(session, task):
"""A coroutine (pipeline stage) to log each file to be imported.
"""
if isinstance(task, SingletonImportTask):
log.info(u'Singleton: {0}', displayable_path(task.item['path']))
elif task.items:
log.info(u'Album: {0}', displayable_path(task.paths[0]))
for item in task.items:
log.info(u' {0}', displayable_path(item['path']))
def group_albums(session):
"""A pipeline stage that groups the items of each task into albums
using their metadata.
Groups are identified using their artist and album fields. The
pipeline stage emits new album tasks for each discovered group.
"""
def group(item):
return (item.albumartist or item.artist, item.album)
task = None
while True:
task = yield task
if task.skip:
continue
tasks = []
sorted_items = sorted(task.items, key=group)
for _, items in itertools.groupby(sorted_items, group):
items = list(items)
task = ImportTask(task.toppath, [i.path for i in items],
items)
tasks += task.handle_created(session)
tasks.append(SentinelImportTask(task.toppath, task.paths))
task = pipeline.multiple(tasks)
MULTIDISC_MARKERS = (r'dis[ck]', r'cd')
MULTIDISC_PAT_FMT = r'^(.*%s[\W_]*)\d'
def albums_in_dir(path):
"""Recursively searches the given directory and returns an iterable
of (paths, items) where paths is a list of directories and items is
a list of Items that is probably an album. Specifically, any folder
containing any media files is an album.
"""
collapse_pat = collapse_paths = collapse_items = None
ignore = config['ignore'].as_str_seq()
for root, dirs, files in sorted_walk(path, ignore=ignore, logger=log):
items = [os.path.join(root, f) for f in files]
# If we're currently collapsing the constituent directories in a
# multi-disc album, check whether we should continue collapsing
# and add the current directory. If so, just add the directory
# and move on to the next directory. If not, stop collapsing.
if collapse_paths:
if (not collapse_pat and collapse_paths[0] in ancestry(root)) or \
(collapse_pat and
collapse_pat.match(os.path.basename(root))):
# Still collapsing.
collapse_paths.append(root)
collapse_items += items
continue
else:
# Collapse finished. Yield the collapsed directory and
# proceed to process the current one.
if collapse_items:
yield collapse_paths, collapse_items
collapse_pat = collapse_paths = collapse_items = None
# Check whether this directory looks like the *first* directory
# in a multi-disc sequence. There are two indicators: the file
# is named like part of a multi-disc sequence (e.g., "Title Disc
# 1") or it contains no items but only directories that are
# named in this way.
start_collapsing = False
for marker in MULTIDISC_MARKERS:
marker_pat = re.compile(MULTIDISC_PAT_FMT % marker, re.I)
match = marker_pat.match(os.path.basename(root))
# Is this directory the root of a nested multi-disc album?
if dirs and not items:
# Check whether all subdirectories have the same prefix.
start_collapsing = True
subdir_pat = None
for subdir in dirs:
# The first directory dictates the pattern for
# the remaining directories.
if not subdir_pat:
match = marker_pat.match(subdir)
if match:
subdir_pat = re.compile(
br'^%s\d' % re.escape(match.group(1)), re.I
)
else:
start_collapsing = False
break
# Subsequent directories must match the pattern.
elif not subdir_pat.match(subdir):
start_collapsing = False
break
# If all subdirectories match, don't check other
# markers.
if start_collapsing:
break
# Is this directory the first in a flattened multi-disc album?
elif match:
start_collapsing = True
# Set the current pattern to match directories with the same
# prefix as this one, followed by a digit.
collapse_pat = re.compile(
br'^%s\d' % re.escape(match.group(1)), re.I
)
break
# If either of the above heuristics indicated that this is the
# beginning of a multi-disc album, initialize the collapsed
# directory and item lists and check the next directory.
if start_collapsing:
# Start collapsing; continue to the next iteration.
collapse_paths = [root]
collapse_items = items
continue
# If it's nonempty, yield it.
if items:
yield [root], items
# Clear out any unfinished collapse.
if collapse_paths and collapse_items:
yield collapse_paths, collapse_items
| mit |
alexbruy/QGIS | python/plugins/processing/algs/qgis/RectanglesOvalsDiamondsVariable.py | 1 | 12223 | # -*- coding: utf-8 -*-
"""
***************************************************************************
RectanglesOvalsDiamondsVariable.py
---------------------
Date : April 2016
Copyright : (C) 2016 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive323
__revision__ = '$Format:%H$'
import os
import math
from qgis.PyQt.QtGui import QIcon
from qgis.core import QgsWkbTypes, QgsFeature, QgsGeometry, QgsPoint
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.ProcessingLog import ProcessingLog
from processing.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterSelection
from processing.core.parameters import ParameterTableField
from processing.core.parameters import ParameterNumber
from processing.core.outputs import OutputVector
from processing.tools import dataobjects, vector
class RectanglesOvalsDiamondsVariable(GeoAlgorithm):
INPUT_LAYER = 'INPUT_LAYER'
SHAPE = 'SHAPE'
WIDTH = 'WIDTH'
HEIGHT = 'HEIGHT'
ROTATION = 'ROTATION'
SEGMENTS = 'SEGMENTS'
OUTPUT_LAYER = 'OUTPUT_LAYER'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Rectangles, ovals, diamonds (variable)')
self.group, self.i18n_group = self.trAlgorithm('Vector geometry tools')
self.shapes = [self.tr('Rectangles'), self.tr('Diamonds'), self.tr('Ovals')]
self.addParameter(ParameterVector(self.INPUT_LAYER,
self.tr('Input layer'),
[ParameterVector.VECTOR_TYPE_POINT]))
self.addParameter(ParameterSelection(self.SHAPE,
self.tr('Buffer shape'), self.shapes))
self.addParameter(ParameterTableField(self.WIDTH,
self.tr('Width field'),
self.INPUT_LAYER,
ParameterTableField.DATA_TYPE_NUMBER))
self.addParameter(ParameterTableField(self.HEIGHT,
self.tr('Height field'),
self.INPUT_LAYER,
ParameterTableField.DATA_TYPE_NUMBER))
self.addParameter(ParameterTableField(self.ROTATION,
self.tr('Rotation field'),
self.INPUT_LAYER,
ParameterTableField.DATA_TYPE_NUMBER,
True))
self.addParameter(ParameterNumber(self.SEGMENTS,
self.tr('Number of segments'),
1,
999999999,
36))
self.addOutput(OutputVector(self.OUTPUT_LAYER,
self.tr('Output')))
def processAlgorithm(self, progress):
layer = dataobjects.getObjectFromUri(
self.getParameterValue(self.INPUT_LAYER))
shape = self.getParameterValue(self.SHAPE)
width = self.getParameterValue(self.WIDTH)
height = self.getParameterValue(self.HEIGHT)
rotation = self.getParameterValue(self.ROTATION)
segments = self.getParameterValue(self.SEGMENTS)
writer = self.getOutputFromName(
self.OUTPUT_LAYER).getVectorWriter(
layer.fields().toList(),
QgsWkbTypes.Polygon,
layer.crs())
outFeat = QgsFeature()
features = vector.features(layer)
total = 100.0 / len(features)
if shape == 0:
self.rectangles(writer, features, width, height, rotation)
elif shape == 1:
self.diamonds(writer, features, width, height, rotation)
else:
self.ovals(writer, features, width, height, rotation, segments)
del writer
def rectangles(self, writer, features, width, height, rotation):
ft = QgsFeature()
if rotation is not None:
for current, feat in enumerate(features):
w = feat[width]
h = feat[height]
angle = feat[rotation]
if not w or not h or not angle:
ProcessingLog.addToLog(ProcessingLog.LOG_WARNING,
self.tr('Feature {} has empty '
'width, height or angle. '
'Skipping...'.format(feat.id())))
continue
xOffset = w / 2.0
yOffset = h / 2.0
phi = angle * math.pi / 180
point = feat.geometry().asPoint()
x = point.x()
y = point.y()
points = [(-xOffset, -yOffset), (-xOffset, yOffset), (xOffset, yOffset), (xOffset, -yOffset)]
polygon = [[QgsPoint(i[0] * math.cos(phi) + i[1] * math.sin(phi) + x,
-i[0] * math.sin(phi) + i[1] * math.cos(phi) + y) for i in points]]
ft.setGeometry(QgsGeometry.fromPolygon(polygon))
ft.setAttributes(feat.attributes())
writer.addFeature(ft)
else:
for current, feat in enumerate(features):
w = feat[width]
h = feat[height]
if not w or not h:
ProcessingLog.addToLog(ProcessingLog.LOG_WARNING,
self.tr('Feature {} has empty '
'width or height. '
'Skipping...'.format(feat.id())))
continue
xOffset = w / 2.0
yOffset = h / 2.0
point = feat.geometry().asPoint()
x = point.x()
y = point.y()
points = [(-xOffset, -yOffset), (-xOffset, yOffset), (xOffset, yOffset), (xOffset, -yOffset)]
polygon = [[QgsPoint(i[0] + x, i[1] + y) for i in points]]
ft.setGeometry(QgsGeometry.fromPolygon(polygon))
ft.setAttributes(feat.attributes())
writer.addFeature(ft)
def diamonds(self, writer, features, width, height, rotation):
ft = QgsFeature()
if rotation is not None:
for current, feat in enumerate(features):
w = feat[width]
h = feat[height]
angle = feat[rotation]
if not w or not h or not angle:
ProcessingLog.addToLog(ProcessingLog.LOG_WARNING,
self.tr('Feature {} has empty '
'width, height or angle. '
'Skipping...'.format(feat.id())))
continue
xOffset = w / 2.0
yOffset = h / 2.0
phi = angle * math.pi / 180
point = feat.geometry().asPoint()
x = point.x()
y = point.y()
points = [(0.0, -yOffset), (-xOffset, 0.0), (0.0, yOffset), (xOffset, 0.0)]
polygon = [[QgsPoint(i[0] * math.cos(phi) + i[1] * math.sin(phi) + x,
-i[0] * math.sin(phi) + i[1] * math.cos(phi) + y) for i in points]]
ft.setGeometry(QgsGeometry.fromPolygon(polygon))
ft.setAttributes(feat.attributes())
writer.addFeature(ft)
else:
for current, feat in enumerate(features):
w = feat[width]
h = feat[height]
if not w or not h:
ProcessingLog.addToLog(ProcessingLog.LOG_WARNING,
self.tr('Feature {} has empty '
'width or height. '
'Skipping...'.format(feat.id())))
continue
xOffset = w / 2.0
yOffset = h / 2.0
point = feat.geometry().asPoint()
x = point.x()
y = point.y()
points = [(0.0, -yOffset), (-xOffset, 0.0), (0.0, yOffset), (xOffset, 0.0)]
polygon = [[QgsPoint(i[0] + x, i[1] + y) for i in points]]
ft.setGeometry(QgsGeometry.fromPolygon(polygon))
ft.setAttributes(feat.attributes())
writer.addFeature(ft)
def ovals(self, writer, features, width, height, rotation, segments):
ft = QgsFeature()
if rotation is not None:
for current, feat in enumerate(features):
w = feat[width]
h = feat[height]
angle = feat[rotation]
if not w or not h or not angle:
ProcessingLog.addToLog(ProcessingLog.LOG_WARNING,
self.tr('Feature {} has empty '
'width, height or angle. '
'Skipping...'.format(feat.id())))
continue
xOffset = w / 2.0
yOffset = h / 2.0
phi = angle * math.pi / 180
point = feat.geometry().asPoint()
x = point.x()
y = point.y()
points = []
for t in [(2 * math.pi) / segments * i for i in xrange(segments)]:
points.append((xOffset * math.cos(t), yOffset * math.sin(t)))
polygon = [[QgsPoint(i[0] * math.cos(phi) + i[1] * math.sin(phi) + x,
-i[0] * math.sin(phi) + i[1] * math.cos(phi) + y) for i in points]]
ft.setGeometry(QgsGeometry.fromPolygon(polygon))
ft.setAttributes(feat.attributes())
writer.addFeature(ft)
else:
for current, feat in enumerate(features):
w = feat[width]
h = feat[height]
if not w or not h:
ProcessingLog.addToLog(ProcessingLog.LOG_WARNING,
self.tr('Feature {} has empty '
'width or height. '
'Skipping...'.format(feat.id())))
continue
xOffset = w / 2.0
yOffset = h / 2.0
point = feat.geometry().asPoint()
x = point.x()
y = point.y()
points = []
for t in [(2 * math.pi) / segments * i for i in xrange(segments)]:
points.append((xOffset * math.cos(t), yOffset * math.sin(t)))
polygon = [[QgsPoint(i[0] + x, i[1] + y) for i in points]]
ft.setGeometry(QgsGeometry.fromPolygon(polygon))
ft.setAttributes(feat.attributes())
writer.addFeature(ft)
| gpl-2.0 |
antonve/s4-project-mooc | lms/djangoapps/bulk_email/migrations/0010_auto__chg_field_optout_course_id__add_field_courseemail_template_name_.py | 120 | 8430 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Optout.course_id'
db.alter_column('bulk_email_optout', 'course_id', self.gf('xmodule_django.models.CourseKeyField')(max_length=255))
# Adding field 'CourseEmail.template_name'
db.add_column('bulk_email_courseemail', 'template_name',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True),
keep_default=False)
# Adding field 'CourseEmail.from_addr'
db.add_column('bulk_email_courseemail', 'from_addr',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True),
keep_default=False)
# Changing field 'CourseEmail.course_id'
db.alter_column('bulk_email_courseemail', 'course_id', self.gf('xmodule_django.models.CourseKeyField')(max_length=255))
# Adding field 'CourseEmailTemplate.name'
db.add_column('bulk_email_courseemailtemplate', 'name',
self.gf('django.db.models.fields.CharField')(max_length=255, unique=True, null=True),
keep_default=False)
# Changing field 'CourseAuthorization.course_id'
db.alter_column('bulk_email_courseauthorization', 'course_id', self.gf('xmodule_django.models.CourseKeyField')(unique=True, max_length=255))
def backwards(self, orm):
# Changing field 'Optout.course_id'
db.alter_column('bulk_email_optout', 'course_id', self.gf('django.db.models.fields.CharField')(max_length=255))
# Deleting field 'CourseEmail.template_name'
db.delete_column('bulk_email_courseemail', 'template_name')
# Deleting field 'CourseEmail.from_addr'
db.delete_column('bulk_email_courseemail', 'from_addr')
# Changing field 'CourseEmail.course_id'
db.alter_column('bulk_email_courseemail', 'course_id', self.gf('django.db.models.fields.CharField')(max_length=255))
# Deleting field 'CourseEmailTemplate.name'
db.delete_column('bulk_email_courseemailtemplate', 'name')
# Changing field 'CourseAuthorization.course_id'
db.alter_column('bulk_email_courseauthorization', 'course_id', self.gf('django.db.models.fields.CharField')(max_length=255, unique=True))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'bulk_email.courseauthorization': {
'Meta': {'object_name': 'CourseAuthorization'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'email_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'bulk_email.courseemail': {
'Meta': {'object_name': 'CourseEmail'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'from_addr': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'html_message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'template_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'text_message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'to_option': ('django.db.models.fields.CharField', [], {'default': "'myself'", 'max_length': '64'})
},
'bulk_email.courseemailtemplate': {
'Meta': {'object_name': 'CourseEmailTemplate'},
'html_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True', 'null': 'True'}),
'plain_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'bulk_email.optout': {
'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'Optout'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['bulk_email']
| agpl-3.0 |
drtuxwang/system-config | bin/fls.py | 1 | 5121 | #!/usr/bin/env python3
"""
Show full list of files.
"""
import argparse
import glob
import os
import signal
import sys
from typing import Iterator, List, Union
import file_mod
class Options:
"""
Options class
"""
def __init__(self) -> None:
self._args: argparse.Namespace = None
self.parse(sys.argv)
def get_files(self) -> List[str]:
"""
Return list of files.
"""
return self._files
def get_order(self) -> str:
"""
Return display order.
"""
return self._args.order
def get_recursive_flag(self) -> bool:
"""
Return recursive flag.
"""
return self._args.recursive_flag
def get_reverse_flag(self) -> bool:
"""
Return reverse flag.
"""
return self._args.reverse_flag
def _parse_args(self, args: List[str]) -> None:
parser = argparse.ArgumentParser(
description='Show full list of files.',
)
parser.add_argument(
'-R',
dest='recursive_flag',
action='store_true',
help='Show directories recursively.'
)
parser.add_argument(
'-s',
action='store_const',
const='size',
dest='order',
default='name',
help='Sort by size of file.'
)
parser.add_argument(
'-t',
action='store_const',
const='mtime',
dest='order',
default='name',
help='Sort by modification time of file.'
)
parser.add_argument(
'-c',
action='store_const',
const='ctime',
dest='order',
default='name',
help='Sort by meta data change time of file.'
)
parser.add_argument(
'-r',
dest='reverse_flag',
action='store_true',
help='Reverse order.'
)
parser.add_argument(
'files',
nargs='*',
metavar='file',
help='File or directory.'
)
self._args = parser.parse_args(args)
def parse(self, args: List[str]) -> None:
"""
Parse arguments
"""
self._parse_args(args[1:])
if self._args.files:
self._files = self._args.files
else:
self._files = sorted(os.listdir())
class Main:
"""
Main class
"""
def __init__(self) -> None:
try:
self.config()
sys.exit(self.run())
except (EOFError, KeyboardInterrupt):
sys.exit(114)
except SystemExit as exception:
sys.exit(exception)
@staticmethod
def config() -> None:
"""
Configure program
"""
if hasattr(signal, 'SIGPIPE'):
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
if os.name == 'nt':
argv = []
for arg in sys.argv:
files = glob.glob(arg) # Fixes Windows globbing bug
if files:
argv.extend(files)
else:
argv.append(arg)
sys.argv = argv
def _list(self, options: Options, files: List[str]) -> None:
file_stats = []
for file in files:
if os.path.islink(file):
file_stats.append(file_mod.FileStat(file, size=0))
elif os.path.isdir(file):
file_stats.append(file_mod.FileStat(file + os.sep))
elif os.path.isfile(file):
file_stats.append(file_mod.FileStat(file))
for file_stat in self._sorted(options, file_stats):
print("{0:10d} [{1:s}] {2:s}".format(
file_stat.get_size(),
file_stat.get_time_local(),
file_stat.get_file()
))
if (options.get_recursive_flag() and
file_stat.get_file().endswith(os.sep)):
self._list(options, sorted(
glob.glob(file_stat.get_file() + '.*') +
glob.glob(file_stat.get_file() + '*')
))
@staticmethod
def _sorted(
options: Options,
file_stats: List[file_mod.FileStat],
) -> Union[Iterator[file_mod.FileStat], List[file_mod.FileStat]]:
order = options.get_order()
if order == 'ctime':
file_stats = sorted(file_stats, key=lambda s: s.get_time_change())
elif order == 'mtime':
file_stats = sorted(file_stats, key=lambda s: s.get_time())
elif order == 'size':
file_stats = sorted(file_stats, key=lambda s: s.get_size())
if options.get_reverse_flag():
return reversed(file_stats)
return file_stats
def run(self) -> int:
"""
Start program
"""
options = Options()
self._list(options, options.get_files())
return 0
if __name__ == '__main__':
if '--pydoc' in sys.argv:
help(__name__)
else:
Main()
| gpl-2.0 |
stelfrich/openmicroscopy | components/tools/OmeroWeb/omeroweb/settings.py | 1 | 40171 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # Django settings for OMERO.web project. # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
#
# Copyright (c) 2008-2014 University of Dundee.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Aleksandra Tarkowska <A(dot)Tarkowska(at)dundee(dot)ac(dot)uk>, 2008.
#
# Version: 1.0
#
import os.path
import sys
import platform
import logging
import omero
import omero.config
import omero.clients
import tempfile
import re
import json
from omero_ext import portalocker
logger = logging.getLogger(__name__)
# LOGS
# NEVER DEPLOY a site into production with DEBUG turned on.
# Debuging mode.
# A boolean that turns on/off debug mode.
# handler404 and handler500 works only when False
if 'OMERO_HOME' in os.environ:
OMERO_HOME = os.environ.get('OMERO_HOME')
else:
OMERO_HOME = os.path.join(os.path.dirname(__file__), '..', '..', '..')
OMERO_HOME = os.path.normpath(OMERO_HOME)
INSIGHT_JARS = os.path.join(OMERO_HOME, "lib", "insight").replace('\\', '/')
WEBSTART = False
if os.path.isdir(INSIGHT_JARS):
WEBSTART = True
# Logging
LOGDIR = os.path.join(OMERO_HOME, 'var', 'log').replace('\\', '/')
if not os.path.isdir(LOGDIR):
try:
os.makedirs(LOGDIR)
except Exception, x:
exctype, value = sys.exc_info()[:2]
raise exctype(value)
# DEBUG: Never deploy a site into production with DEBUG turned on.
# Logging levels: logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR
# logging.CRITICAL
# FORMAT: 2010-01-01 00:00:00,000 INFO [omeroweb.webadmin.webadmin_utils]
# (proc.1308 ) getGuestConnection:20 Open connection is not available
STANDARD_LOGFORMAT = (
'%(asctime)s %(levelname)5.5s [%(name)40.40s]'
' (proc.%(process)5.5d) %(funcName)s:%(lineno)d %(message)s')
if platform.system() in ("Windows",):
LOGGING_CLASS = 'logging.handlers.RotatingFileHandler'
else:
LOGGING_CLASS = 'omero_ext.cloghandler.ConcurrentRotatingFileHandler'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': STANDARD_LOGFORMAT
},
},
'handlers': {
'default': {
'level': 'DEBUG',
'class': LOGGING_CLASS,
'filename': os.path.join(
LOGDIR, 'OMEROweb.log').replace('\\', '/'),
'maxBytes': 1024*1024*5, # 5 MB
'backupCount': 10,
'formatter': 'standard',
},
'request_handler': {
'level': 'DEBUG',
'class': LOGGING_CLASS,
'filename': os.path.join(
LOGDIR, 'OMEROweb_request.log').replace('\\', '/'),
'maxBytes': 1024*1024*5, # 5 MB
'backupCount': 10,
'formatter': 'standard',
},
'null': {
'level': 'DEBUG',
'class': 'django.utils.log.NullHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': { # Stop SQL debug from logging to main logger
'handlers': ['request_handler', 'mail_admins'],
'level': 'DEBUG',
'propagate': False
},
'django': {
'handlers': ['null'],
'level': 'DEBUG',
'propagate': True
},
'': {
'handlers': ['default'],
'level': 'DEBUG',
'propagate': True
}
}
}
# Load custom settings from etc/grid/config.xml
# Tue 2 Nov 2010 11:03:18 GMT -- ticket:3228
from omero.util.concurrency import get_event
CONFIG_XML = os.path.join(OMERO_HOME, 'etc', 'grid', 'config.xml')
count = 10
event = get_event("websettings")
while True:
try:
CUSTOM_SETTINGS = dict()
if os.path.exists(CONFIG_XML):
CONFIG_XML = omero.config.ConfigXml(CONFIG_XML, read_only=True)
CUSTOM_SETTINGS = CONFIG_XML.as_map()
CONFIG_XML.close()
break
except portalocker.LockException:
# logger.error("Exception while loading configuration retrying...",
# exc_info=True)
exctype, value = sys.exc_info()[:2]
count -= 1
if not count:
raise exctype(value)
else:
event.wait(1) # Wait a total of 10 seconds
except:
# logger.error("Exception while loading configuration...",
# exc_info=True)
exctype, value = sys.exc_info()[:2]
raise exctype(value)
del event
del count
del get_event
WSGI = "wsgi"
WSGITCP = "wsgi-tcp"
WSGI_TYPES = (WSGI, WSGITCP)
FASTCGITCP = "fastcgi-tcp"
FASTCGI_TYPES = (FASTCGITCP, )
DEVELOPMENT = "development"
DEFAULT_SERVER_TYPE = FASTCGITCP
ALL_SERVER_TYPES = (WSGI, WSGITCP, FASTCGITCP, DEVELOPMENT)
DEFAULT_SESSION_ENGINE = 'omeroweb.filesessionstore'
SESSION_ENGINE_VALUES = ('omeroweb.filesessionstore',
'django.contrib.sessions.backends.db',
'django.contrib.sessions.backends.file',
'django.contrib.sessions.backends.cache',
'django.contrib.sessions.backends.cached_db')
def parse_boolean(s):
s = s.strip().lower()
if s in ('true', '1', 't'):
return True
return False
def parse_paths(s):
return [os.path.normpath(path) for path in json.loads(s)]
def check_server_type(s):
if s not in ALL_SERVER_TYPES:
raise ValueError(
"Unknown server type: %s. Valid values are: %s"
% (s, ALL_SERVER_TYPES))
return s
def check_session_engine(s):
if s not in SESSION_ENGINE_VALUES:
raise ValueError(
"Unknown session engine: %s. Valid values are: %s"
% (s, SESSION_ENGINE_VALUES))
return s
def identity(x):
return x
def str_slash(s):
if s is not None:
s = str(s)
if s and not s.endswith("/"):
s += "/"
return s
class LeaveUnset(Exception):
pass
def leave_none_unset(s):
if s is None:
raise LeaveUnset()
return s
def leave_none_unset_int(s):
s = leave_none_unset(s)
if s is not None:
return int(s)
CUSTOM_HOST = CUSTOM_SETTINGS.get("Ice.Default.Host", "localhost")
# DO NOT EDIT!
INTERNAL_SETTINGS_MAPPING = {
"omero.qa.feedback":
["FEEDBACK_URL", "http://qa.openmicroscopy.org.uk", str, None],
"omero.web.upgrades.url":
["UPGRADES_URL", None, leave_none_unset, None],
"omero.web.check_version":
["CHECK_VERSION", "true", parse_boolean, None],
# Allowed hosts:
# https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
"omero.web.allowed_hosts":
["ALLOWED_HOSTS", '["*"]', json.loads, None],
# WEBSTART
"omero.web.webstart_template":
["WEBSTART_TEMPLATE", None, identity, None],
"omero.web.webstart_jar":
["WEBSTART_JAR", "omero.insight.jar", str, None],
"omero.web.webstart_icon":
["WEBSTART_ICON", "webstart/img/icon-omero-insight.png", str, None],
"omero.web.webstart_heap":
["WEBSTART_HEAP", "1024m", str, None],
"omero.web.webstart_host":
["WEBSTART_HOST", CUSTOM_HOST, str, None],
"omero.web.webstart_port":
["WEBSTART_PORT", "4064", str, None],
"omero.web.webstart_class":
["WEBSTART_CLASS", "org.openmicroscopy.shoola.Main", str, None],
"omero.web.webstart_title":
["WEBSTART_TITLE", "OMERO.insight", str, None],
"omero.web.webstart_vendor":
["WEBSTART_VENDOR", "The Open Microscopy Environment", str, None],
"omero.web.webstart_homepage":
["WEBSTART_HOMEPAGE", "http://www.openmicroscopy.org", str, None],
"omero.web.webstart_admins_only":
["WEBSTART_ADMINS_ONLY", "false", parse_boolean, None],
# Internal email notification for omero.web.admins,
# loaded from config.xml directly
"omero.mail.from":
["SERVER_EMAIL",
None,
identity,
("The email address that error messages come from, such as those"
" sent to :property:`omero.web.admins`. Requires EMAIL properties"
" below.")],
"omero.mail.host":
["EMAIL_HOST",
None,
identity,
"The SMTP server host to use for sending email."],
"omero.mail.password":
["EMAIL_HOST_PASSWORD",
None,
identity,
"Password to use for the SMTP server."],
"omero.mail.username":
["EMAIL_HOST_USER",
None,
identity,
"Username to use for the SMTP server."],
"omero.mail.port":
["EMAIL_PORT",
25,
identity,
"Port to use for the SMTP server."],
"omero.web.admins.email_subject_prefix":
["EMAIL_SUBJECT_PREFIX",
"[OMERO.web - admin notification]",
str,
"Subject-line prefix for email messages"],
"omero.mail.smtp.starttls.enable":
["EMAIL_USE_TLS",
"false",
parse_boolean,
("Whether to use a TLS (secure) connection when talking to the SMTP"
" server.")],
}
CUSTOM_SETTINGS_MAPPINGS = {
# Deployment configuration
"omero.web.debug":
["DEBUG",
"false",
parse_boolean,
"A boolean that turns on/off debug mode."],
"omero.web.admins":
["ADMINS",
'[]',
json.loads,
("A list of people who get code error notifications whenever the "
"application identifies a broken link or raises an unhandled "
"exception that results in an internal server error. This gives "
"the administrators immediate notification of any errors, "
"see :doc:`/sysadmins/mail`. "
"Example:``'[[\"Full Name\", \"email address\"]]'``.")],
"omero.web.application_server":
["APPLICATION_SERVER",
DEFAULT_SERVER_TYPE,
check_server_type,
("OMERO.web is configured to use FastCGI TCP by default. If you are "
"using a non-standard web server configuration you may wish to "
"change this before generating your web server configuration. "
"Available options: \"fastcgi-tcp\", \"wsgi-tcp\", \"wsgi\"")],
"omero.web.application_server.host":
["APPLICATION_SERVER_HOST",
"127.0.0.1",
str,
"Upstream application host"],
"omero.web.application_server.port":
["APPLICATION_SERVER_PORT", "4080", str, "Upstream application port"],
"omero.web.application_server.max_requests":
["APPLICATION_SERVER_MAX_REQUESTS", 400, int, None],
"omero.web.prefix":
["FORCE_SCRIPT_NAME",
None,
leave_none_unset,
("Used as the value of the SCRIPT_NAME environment variable in any"
" HTTP request.")],
"omero.web.use_x_forwarded_host":
["USE_X_FORWARDED_HOST",
"false",
parse_boolean,
("Specifies whether to use the X-Forwarded-Host header in preference "
"to the Host header. This should only be enabled if a proxy which "
"sets this header is in use.")],
"omero.web.static_url":
["STATIC_URL",
"/static/",
str_slash,
("URL to use when referring to static files. Example: ``'/static/'``"
" or ``'http://static.example.com/'``. Used as the base path for"
" asset definitions (the Media class) and the staticfiles app. It"
" must end in a slash if set to a non-empty value.")],
"omero.web.session_engine":
["SESSION_ENGINE",
DEFAULT_SESSION_ENGINE,
check_session_engine,
("Controls where Django stores session data. See :djangodoc:"
"`Configuring the session engine for more details <ref/settings"
"/#session-engine>`.")],
"omero.web.session_expire_at_browser_close":
["SESSION_EXPIRE_AT_BROWSER_CLOSE",
"true",
parse_boolean,
("A boolean that determines whether to expire the session when the "
"user closes their browser. See :djangodoc:`Django Browser-length "
"sessions vs. persistent sessions documentation <topics/http/"
"sessions/#browser-length-vs-persistent-sessions>` for more "
"details.")],
"omero.web.caches":
["CACHES",
('{"default": {"BACKEND":'
' "django.core.cache.backends.dummy.DummyCache"}}'),
json.loads,
("OMERO.web offers alternative session backends to automatically"
" delete stale data using the cache session store backend, see "
":djangodoc:`Django cached session documentation <topics/http/"
"sessions/#using-cached-sessions>` for more details.")],
"omero.web.session_cookie_age":
["SESSION_COOKIE_AGE",
86400,
int,
"The age of session cookies, in seconds."],
"omero.web.session_cookie_domain":
["SESSION_COOKIE_DOMAIN",
None,
leave_none_unset,
"The domain to use for session cookies"],
"omero.web.session_cookie_name":
["SESSION_COOKIE_NAME",
None,
leave_none_unset,
"The name to use for session cookies"],
"omero.web.logdir":
["LOGDIR", LOGDIR, str, "A path to the custom log directory."],
# Public user
"omero.web.public.enabled":
["PUBLIC_ENABLED",
"false",
parse_boolean,
"Enable and disable the OMERO.web public user functionality."],
"omero.web.public.url_filter":
["PUBLIC_URL_FILTER",
r'^/(?!webadmin)',
re.compile,
("Set a URL filter for which the OMERO.web public user is allowed to"
" navigate. The idea is that you can create the public pages"
" yourself (see OMERO.web framework since we do not provide public"
" pages.")],
"omero.web.public.server_id":
["PUBLIC_SERVER_ID", 1, int, "Server to authenticate against."],
"omero.web.public.user":
["PUBLIC_USER",
None,
leave_none_unset,
"Username to use during authentication."],
"omero.web.public.password":
["PUBLIC_PASSWORD",
None,
leave_none_unset,
"Password to use during authentication."],
"omero.web.public.cache.enabled":
["PUBLIC_CACHE_ENABLED", "false", parse_boolean, None],
"omero.web.public.cache.key":
["PUBLIC_CACHE_KEY", "omero.web.public.cache.key", str, None],
"omero.web.public.cache.timeout":
["PUBLIC_CACHE_TIMEOUT", 60 * 60 * 24, int, None],
# Application configuration
"omero.web.server_list":
["SERVER_LIST",
'[["%s", 4064, "omero"]]' % CUSTOM_HOST,
json.loads,
"A list of servers the Web client can connect to."],
"omero.web.ping_interval":
["PING_INTERVAL", 60000, int, "description"],
"omero.web.webgateway_cache":
["WEBGATEWAY_CACHE", None, leave_none_unset, None],
# VIEWER
# the following parameters configure when to show/hide the 'Volume viewer'
# icon in the Image metadata panel
"omero.web.open_astex_max_side":
["OPEN_ASTEX_MAX_SIDE", 400, int, None],
"omero.web.open_astex_min_side":
["OPEN_ASTEX_MIN_SIDE", 20, int, None],
"omero.web.open_astex_max_voxels":
["OPEN_ASTEX_MAX_VOXELS", 27000000, int, None], # 300 x 300 x 300
# PIPELINE 1.3.20
# Pipeline is an asset packaging library for Django, providing both CSS
# and JavaScript concatenation and compression, built-in JavaScript
# template support, and optional data-URI image and font embedding.
"omero.web.pipeline_js_compressor":
["PIPELINE_JS_COMPRESSOR",
None,
identity,
("Compressor class to be applied to JavaScript files. If empty or "
"None, JavaScript files won't be compressed.")],
"omero.web.pipeline_css_compressor":
["PIPELINE_CSS_COMPRESSOR",
None,
identity,
("Compressor class to be applied to CSS files. If empty or None,"
" CSS files won't be compressed.")],
"omero.web.pipeline_staticfile_storage":
["STATICFILES_STORAGE",
"pipeline.storage.PipelineStorage",
str,
("The file storage engine to use when collecting static files with"
" the collectstatic management command. See `the documentation "
"<http://django-pipeline.readthedocs.org/en/latest/storages.html>`_"
" for more details.")],
# Customisation
"omero.web.login_logo":
["LOGIN_LOGO",
None,
leave_none_unset,
("Customize webclient login page with your own logo. Logo images "
"should ideally be 150 pixels high or less and will appear above "
"the OMERO logo. You will need to host the image somewhere else "
"and link to it with"
" ``\"http://www.openmicroscopy.org/site/logo.jpg\"``.")],
"omero.web.login_view":
["LOGIN_VIEW", "weblogin", str, None],
"omero.web.staticfile_dirs":
["STATICFILES_DIRS",
'[]',
json.loads,
("Defines the additional locations the staticfiles app will traverse"
" if the FileSystemFinder finder is enabled, e.g. if you use the"
" collectstatic or findstatic management command or use the static"
" file serving view.")],
"omero.web.template_dirs":
["TEMPLATE_DIRS",
'[]',
json.loads,
("List of locations of the template source files, in search order. "
"Note that these paths should use Unix-style forward slashes, even"
" on Windows.")],
"omero.web.index_template":
["INDEX_TEMPLATE",
None,
identity,
("Define template used as an index page ``http://your_host/omero/``."
"If None user is automatically redirected to the login page."
"For example use 'webstart/start.html'. ")],
"omero.web.login_redirect":
["LOGIN_REDIRECT",
'{}',
json.loads,
("Redirect to the given location after logging in. It only supports "
"arguments for :djangodoc:`Django reverse function"
" <ref/urlresolvers/#django.core.urlresolvers.reverse>`. "
"For example: ``'{\"redirect\": [\"webindex\"], \"viewname\":"
" \"load_template\", \"args\":[\"userdata\"], \"query_string\":"
" \"experimenter=-1\"}'``")],
"omero.web.apps":
["ADDITIONAL_APPS",
'[]',
json.loads,
("Add additional Django applications. For example, see"
" :doc:`/developers/Web/CreateApp`")],
"omero.web.databases":
["DATABASES", '{}', json.loads, None],
"omero.web.page_size":
["PAGE",
200,
int,
("Number of images displayed within a dataset or 'orphaned'"
" container to prevent from loading them all at once.")],
"omero.web.ui.top_links":
["TOP_LINKS",
('['
'["Data", "webindex", {"title": "Browse Data via Projects, Tags'
' etc"}],'
'["History", "history", {"title": "History"}],'
'["Help", "http://help.openmicroscopy.org/",'
'{"title":"Open OMERO user guide in a new tab", "target":"new"}]'
']'),
json.loads,
("Add links to the top header: links are ``['Link Text', 'link',"
" options]``, where "
"the url is reverse('link') OR simply 'link' (for external urls). "
"E.g. ``'[[\"Webtest\", \"webtest_index\"], [\"Homepage\","
" \"http://...\", {\"title\": \"Homepage\", \"target\": \"new\"}"
" ]]'``")],
"omero.web.ui.right_plugins":
["RIGHT_PLUGINS",
('[["Acquisition",'
' "webclient/data/includes/right_plugin.acquisition.js.html",'
' "metadata_tab"],'
# '["ROIs", "webtest/webclient_plugins/right_plugin.rois.js.html",
# "image_roi_tab"],'
'["Preview", "webclient/data/includes/right_plugin.preview.js.html"'
', "preview_tab"]]'),
json.loads,
("Add plugins to the right-hand panel. "
"Plugins are ``['Label', 'include.js', 'div_id']``. "
"The javascript loads data into ``$('#div_id')``.")],
"omero.web.ui.center_plugins":
["CENTER_PLUGINS",
('['
# '["Split View",
# "webtest/webclient_plugins/center_plugin.splitview.js.html",
# "split_view_panel"],'
']'),
json.loads,
("Add plugins to the center panels. Plugins are "
"``['Channel overlay',"
" 'webtest/webclient_plugins/center_plugin.overlay.js.html',"
" 'channel_overlay_panel']``. "
"The javascript loads data into ``$('#div_id')``.")],
}
DEPRECATED_SETTINGS_MAPPINGS = {
# Deprecated settings, description should indicate the replacement.
"omero.web.force_script_name":
["FORCE_SCRIPT_NAME",
None,
leave_none_unset,
("Use omero.web.prefix instead.")],
"omero.web.server_email":
["SERVER_EMAIL",
None,
identity,
("Use omero.mail.from instead.")],
"omero.web.email_host":
["EMAIL_HOST",
None,
identity,
("Use omero.mail.host instead.")],
"omero.web.email_host_password":
["EMAIL_HOST_PASSWORD",
None,
identity,
("Use omero.mail.password instead.")],
"omero.web.email_host_user":
["EMAIL_HOST_USER",
None,
identity,
("Use omero.mail.username instead.")],
"omero.web.email_port":
["EMAIL_PORT",
None,
identity,
("Use omero.mail.port instead.")],
"omero.web.email_subject_prefix":
["EMAIL_SUBJECT_PREFIX",
"[OMERO.web]",
str,
("Default email subject is no longer configurable.")],
"omero.web.email_use_tls":
["EMAIL_USE_TLS",
"false",
parse_boolean,
("Use omero.mail.smtp.* instead to set up"
" javax.mail.Session properties.")],
"omero.web.plate_download.enabled":
["PLATE_DOWNLOAD_ENABLED",
"false",
parse_boolean,
("Use omero.policy.binary_access instead to restrict download.")],
"omero.web.viewer.initial_zoom_level":
["VIEWER_INITIAL_ZOOM_LEVEL",
None,
leave_none_unset_int,
("Use omero.client.viewer.initial_zoom_level instead.")],
"omero.web.send_broken_link_emails":
["SEND_BROKEN_LINK_EMAILS",
"false",
parse_boolean,
("Replaced by django.middleware.common.BrokenLinkEmailsMiddleware."
"To get notification set :property:`omero.web.admins` property.")
],
}
del CUSTOM_HOST
# DEVELOPMENT_SETTINGS_MAPPINGS - WARNING: For each setting developer MUST open
# a ticket that needs to be resolved before a release either by moving the
# setting to CUSTOM_SETTINGS_MAPPINGS or by removing the setting at all.
DEVELOPMENT_SETTINGS_MAPPINGS = {}
def map_deprecated_settings(settings):
m = {}
for key, values in settings.items():
try:
global_name = values[0]
m[global_name] = (CUSTOM_SETTINGS[key], key)
if len(values) < 5:
# Not using default (see process_custom_settings)
values.append(False)
except KeyError:
if len(values) < 5:
values.append(True)
return m
def process_custom_settings(
module, settings='CUSTOM_SETTINGS_MAPPINGS', deprecated=None):
logging.info('Processing custom settings for module %s' % module.__name__)
if deprecated:
deprecated_map = map_deprecated_settings(
getattr(module, deprecated, {}))
else:
deprecated_map = {}
for key, values in getattr(module, settings, {}).items():
# Django may import settings.py more than once, see:
# http://blog.dscpl.com.au/2010/03/improved-wsgi-script-for-use-with.html
# In that case, the custom settings have already been processed.
if len(values) == 5:
continue
global_name, default_value, mapping, description = values
try:
global_value = CUSTOM_SETTINGS[key]
values.append(False)
except KeyError:
global_value = default_value
values.append(True)
try:
using_default = values[-1]
if global_name in deprecated_map:
dep_value, dep_key = deprecated_map[global_name]
if using_default:
logging.warning(
'Setting %s is deprecated, use %s', dep_key, key)
global_value = dep_value
else:
logging.error(
'%s and its deprecated key %s are both set, using %s',
key, dep_key, key)
setattr(module, global_name, mapping(global_value))
except ValueError:
raise ValueError(
"Invalid %s JSON: %r" % (global_name, global_value))
except LeaveUnset:
pass
process_custom_settings(sys.modules[__name__], 'INTERNAL_SETTINGS_MAPPING')
process_custom_settings(sys.modules[__name__], 'CUSTOM_SETTINGS_MAPPINGS',
'DEPRECATED_SETTINGS_MAPPINGS')
process_custom_settings(sys.modules[__name__], 'DEVELOPMENT_SETTINGS_MAPPINGS')
if not DEBUG: # from CUSTOM_SETTINGS_MAPPINGS # noqa
LOGGING['loggers']['django.request']['level'] = 'INFO'
LOGGING['loggers']['django']['level'] = 'INFO'
LOGGING['loggers']['']['level'] = 'INFO'
# TEMPLATE_DEBUG: A boolean that turns on/off template debug mode. If this is
# True, the fancy error page will display a detailed report for any
# TemplateSyntaxError. This report contains
# the relevant snippet of the template, with the appropriate line highlighted.
# Note that Django only displays fancy error pages if DEBUG is True,
# alternatively error is handled by:
# handler404 = "omeroweb.feedback.views.handler404"
# handler500 = "omeroweb.feedback.views.handler500"
TEMPLATE_DEBUG = DEBUG # from CUSTOM_SETTINGS_MAPPINGS # noqa
def report_settings(module):
from django.views.debug import cleanse_setting
custom_settings_mappings = getattr(module, 'CUSTOM_SETTINGS_MAPPINGS', {})
for key in sorted(custom_settings_mappings):
values = custom_settings_mappings[key]
global_name, default_value, mapping, description, using_default = \
values
source = using_default and "default" or key
global_value = getattr(module, global_name, None)
if global_name.isupper():
logger.debug(
"%s = %r (source:%s)", global_name,
cleanse_setting(global_name, global_value), source)
deprecated_settings = getattr(module, 'DEPRECATED_SETTINGS_MAPPINGS', {})
for key in sorted(deprecated_settings):
values = deprecated_settings[key]
global_name, default_value, mapping, description, using_default = \
values
global_value = getattr(module, global_name, None)
if global_name.isupper() and not using_default:
logger.debug(
"%s = %r (deprecated:%s, %s)", global_name,
cleanse_setting(global_name, global_value), key, description)
report_settings(sys.modules[__name__])
SITE_ID = 1
# Local time zone for this installation. Choices can be found here:
# http://www.postgresql.org/docs/8.1/static/datetime-keywords.html#DATETIME-TIMEZONE-SET-TABLE
# although not all variations may be possible on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/London'
FIRST_DAY_OF_WEEK = 0 # 0-Monday, ... 6-Sunday
# LANGUAGE_CODE: A string representing the language code for this
# installation. This should be in standard language format. For example, U.S.
# English is "en-us".
LANGUAGE_CODE = 'en-gb'
# SECRET_KEY: A secret key for this particular Django installation. Used to
# provide a seed in secret-key hashing algorithms. Set this to a random string
# -- the longer, the better. django-admin.py startproject creates one
# automatically.
# Make this unique, and don't share it with anybody.
SECRET_KEY = '@@k%g#7=%4b6ib7yr1tloma&g0s2nni6ljf!m0h&x9c712c7yj'
# USE_I18N: A boolean that specifies whether Django's internationalization
# system should be enabled.
# This provides an easy way to turn it off, for performance. If this is set to
# False, Django will make some optimizations so as not to load the
# internationalization machinery.
USE_I18N = True
# MIDDLEWARE_CLASSES: A tuple of middleware classes to use.
# See https://docs.djangoproject.com/en/1.6/topics/http/middleware/.
MIDDLEWARE_CLASSES = (
'django.middleware.common.BrokenLinkEmailsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
# ROOT_URLCONF: A string representing the full Python import path to your root
# URLconf.
# For example: "mydjangoapps.urls". Can be overridden on a per-request basis
# by setting the attribute urlconf on the incoming HttpRequest object.
ROOT_URLCONF = 'omeroweb.urls'
# STATICFILES_FINDERS: The list of finder backends that know how to find
# static files in various locations. The default will find files stored in the
# STATICFILES_DIRS setting (using
# django.contrib.staticfiles.finders.FileSystemFinder) and in a static
# subdirectory of each app (using
# django.contrib.staticfiles.finders.AppDirectoriesFinder)
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
)
# STATIC_URL: URL to use when referring to static files located in
# STATIC_ROOT.
# Example: "/site_media/static/" or "http://static.example.com/".
# If not None, this will be used as the base path for media definitions and
# the staticfiles app. It must end in a slash if set to a non-empty value.
# This var is configurable by omero.web.static_url STATIC_URL = '/static/'
# STATIC_ROOT: The absolute path to the directory where collectstatic will
# collect static files for deployment. If the staticfiles contrib app is
# enabled (default) the collectstatic management command will collect static
# files into this directory.
STATIC_ROOT = os.path.join(os.path.dirname(__file__),
'static').replace('\\', '/')
# STATICFILES_DIRS: This setting defines the additional locations the
# staticfiles app will traverse if the FileSystemFinder finder is enabled,
# e.g. if you use the collectstatic or findstatic management command or use
# the static file serving view.
if WEBSTART:
# from CUSTOM_SETTINGS_MAPPINGS
STATICFILES_DIRS += (("webstart/jars", INSIGHT_JARS),) # noqa
# TEMPLATE_CONTEXT_PROCESSORS: A tuple of callables that are used to populate
# the context in RequestContext. These callables take a request object as
# their argument and return a dictionary of items to be merged into the
# context.
TEMPLATE_CONTEXT_PROCESSORS = (
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.contrib.messages.context_processors.messages",
"omeroweb.custom_context_processor.url_suffix"
)
# TEMPLATE_LOADERS: A tuple of template loader classes, specified as strings.
# Each Loader class knows how to import templates from a particular source.
# Optionally, a tuple can be used instead of a string. The first item in the
# tuple should be the Loader's module, subsequent items are passed to the
# Loader during initialization.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# INSTALLED_APPS: A tuple of strings designating all applications that are
# enabled in this Django installation. Each string should be a full Python
# path to a Python package that contains a Django application, as created by
# django-admin.py startapp.
INSTALLED_APPS = (
'django.contrib.staticfiles',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'omeroweb.feedback',
'omeroweb.webadmin',
'omeroweb.webclient',
'omeroweb.webgateway',
'omeroweb.webredirect',
'omeroweb.webstart',
'pipeline',
)
# ADDITONAL_APPS: We import any settings.py from apps. This allows them to
# modify settings.
# We're also processing any CUSTOM_SETTINGS_MAPPINGS defined there.
for app in ADDITIONAL_APPS: # from CUSTOM_SETTINGS_MAPPINGS # noqa
# Previously the app was added to INSTALLED_APPS as 'omeroweb.app', which
# then required the app to reside within or be symlinked from within
# omeroweb, instead of just having to be somewhere on the python path.
# To allow apps to just be on the path, but keep it backwards compatible,
# try to import as omeroweb.app, if it works, keep that in INSTALLED_APPS,
# otherwise add it to INSTALLED_APPS just with its own name.
try:
__import__('omeroweb.%s' % app)
INSTALLED_APPS += ('omeroweb.%s' % app,)
except ImportError:
INSTALLED_APPS += (app,)
try:
logger.debug(
'Attempting to import additional app settings for app: %s' % app)
module = __import__('%s.settings' % app)
process_custom_settings(module.settings)
report_settings(module.settings)
except ImportError:
logger.debug("Couldn't import settings from app: %s" % app)
logger.debug('INSTALLED_APPS=%s' % [INSTALLED_APPS])
PIPELINE_CSS = {
'webgateway_viewer': {
'source_filenames': (
'webgateway/css/reset.css',
'webgateway/css/ome.body.css',
'webclient/css/dusty.css',
'webgateway/css/ome.viewport.css',
'webgateway/css/ome.toolbar.css',
'webgateway/css/ome.gs_slider.css',
'webgateway/css/base.css',
'webgateway/css/ome.snippet_header_logo.css',
'webgateway/css/ome.postit.css',
'webgateway/css/ome.rangewidget.css',
'3rdparty/farbtastic-1.2/farbtastic.css',
'webgateway/css/ome.colorbtn.css',
'3rdparty/JQuerySpinBtn-1.3a/JQuerySpinBtn.css',
'3rdparty/jquery-ui-1.10.4/themes/base/jquery-ui.all.css',
'webgateway/css/omero_image.css',
'3rdparty/panojs-2.0.0/panojs.css',
),
'output_filename': 'omeroweb.viewer.min.css',
},
}
PIPELINE_JS = {
'webgateway_viewer': {
'source_filenames': (
'3rdparty/jquery-1.11.1.js',
'3rdparty/jquery-migrate-1.2.1.js',
'3rdparty/jquery-ui-1.10.4/js/jquery-ui.1.10.4.js',
'webgateway/js/ome.popup.js',
'3rdparty/aop-1.3.js',
'3rdparty/raphael-2.1.0/raphael.js',
'3rdparty/raphael-2.1.0/scale.raphael.js',
'3rdparty/panojs-2.0.0/utils.js',
'3rdparty/panojs-2.0.0/PanoJS.js',
'3rdparty/panojs-2.0.0/controls.js',
'3rdparty/panojs-2.0.0/pyramid_Bisque.js',
'3rdparty/panojs-2.0.0/pyramid_imgcnv.js',
'3rdparty/panojs-2.0.0/pyramid_Zoomify.js',
'3rdparty/panojs-2.0.0/control_thumbnail.js',
'3rdparty/panojs-2.0.0/control_info.js',
'3rdparty/panojs-2.0.0/control_svg.js',
'3rdparty/panojs-2.0.0/control_roi.js',
'3rdparty/panojs-2.0.0/control_scalebar.js',
'3rdparty/hammer-2.0.2/hammer.min.js',
'webgateway/js/ome.gs_utils.js',
'webgateway/js/ome.viewportImage.js',
'webgateway/js/ome.gs_slider.js',
'webgateway/js/ome.viewport.js',
'webgateway/js/omero_image.js',
'webgateway/js/ome.roidisplay.js',
'webgateway/js/ome.scalebardisplay.js',
'webgateway/js/ome.smartdialog.js',
'3rdparty/JQuerySpinBtn-1.3a/JQuerySpinBtn.js',
'webgateway/js/ome.colorbtn.js',
'webgateway/js/ome.postit.js',
'3rdparty/jquery.selectboxes-2.2.6.js',
'webgateway/js/ome.rangewidget.js',
'3rdparty/farbtastic-1.2/farbtastic.js',
'3rdparty/jquery.mousewheel-3.0.6.js',
),
'output_filename': 'omeroweb.viewer.min.js',
}
}
CSRF_FAILURE_VIEW = "omeroweb.feedback.views.csrf_failure"
# FEEDBACK - DO NOT MODIFY!
# FEEDBACK_URL: Is now configurable for testing purpuse only. Used in
# feedback.sendfeedback.SendFeedback class in order to submit errors or
# comment messages to http://qa.openmicroscopy.org.uk.
# FEEDBACK_APP: 6 = OMERO.web
FEEDBACK_APP = 6
# IGNORABLE_404_STARTS:
# Default: ('/cgi-bin/', '/_vti_bin', '/_vti_inf')
# IGNORABLE_404_ENDS:
# Default: ('mail.pl', 'mailform.pl', 'mail.cgi', 'mailform.cgi',
# 'favicon.ico', '.php')
# SESSION_FILE_PATH: If you're using file-based session storage, this sets the
# directory in which Django will store session data. When the default value
# (None) is used, Django will use the standard temporary directory for the
# system.
SESSION_FILE_PATH = tempfile.gettempdir()
# FILE_UPLOAD_TEMP_DIR: The directory to store data temporarily while
# uploading files.
FILE_UPLOAD_TEMP_DIR = tempfile.gettempdir()
# # FILE_UPLOAD_MAX_MEMORY_SIZE: The maximum size (in bytes) that an upload
# will be before it gets streamed to the file system.
FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # default 2621440 (i.e. 2.5 MB).
# DEFAULT_IMG: Used in
# webclient.webclient_gateway.OmeroWebGateway.defaultThumbnail in order to
# load default image while thumbnail can't be retrieved from the server.
DEFAULT_IMG = os.path.join(
os.path.dirname(__file__), 'webgateway', 'static', 'webgateway', 'img',
'image128.png').replace('\\', '/')
# # DEFAULT_USER: Used in
# webclient.webclient_gateway.OmeroWebGateway.getExperimenterDefaultPhoto in
# order to load default avatar while experimenter photo can't be retrieved
# from the server.
DEFAULT_USER = os.path.join(
os.path.dirname(__file__), 'webgateway', 'static', 'webgateway', 'img',
'personal32.png').replace('\\', '/')
# MANAGERS: A tuple in the same format as ADMINS that specifies who should get
# broken-link notifications when
# SEND_BROKEN_LINK_EMAILS=True.
MANAGERS = ADMINS # from CUSTOM_SETTINGS_MAPPINGS # noqa
# https://docs.djangoproject.com/en/1.6/releases/1.6/#default-session-serialization-switched-to-json
# JSON serializer, which is now the default, cannot handle
# omeroweb.connector.Connector object
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
# Load server list and freeze
from connector import Server
def load_server_list():
for s in SERVER_LIST: # from CUSTOM_SETTINGS_MAPPINGS # noqa
server = (len(s) > 2) and unicode(s[2]) or None
Server(host=unicode(s[0]), port=int(s[1]), server=server)
Server.freeze()
load_server_list()
| gpl-2.0 |
maxalbert/tohu | tohu/v7/derived_generators/fstr.py | 1 | 1341 | import inspect
import re
from .apply import Apply
class fstr(Apply):
"""
Helper function for easy formatting of tohu generators.
Usage example:
>>> g1 = Integer(100, 200)
>>> g2 = Integer(300, 400)
>>> g3 = g1 + g2
>>> h = fstr('{g1} + {g2} = {g3}')
>>> print(next(h))
122 + 338 = 460
>>> print(next(h))
165 + 325 = 490
"""
def __init__(self, spec):
# FIXME: this pattern is not yet compatible with the full f-string spec.
# For example, it doesn't recognise double '{{' and '}}' (for escaping).
# Also it would be awesome if we could parse arbitrary expressions inside
# the curly braces.
# TODO: re-implement this using the `string.Formatter` class from the standard library.
pattern = "{([^}:]+)(:.*)?}"
gen_names = [gen_name for (gen_name, _) in re.findall(pattern, spec)]
# TODO: do we ever need to store and pass in the original namespace when spawning generators?
namespace = inspect.currentframe().f_back.f_globals
namespace.update(inspect.currentframe().f_back.f_locals)
gens = {name: namespace[name] for name in gen_names}
def format_items(**kwargs):
return spec.format(**kwargs)
super().__init__(format_items, **gens)
| mit |
liangazhou/django-rdp | packages/eclipse/plugins/org.python.pydev.jython_4.4.0.201510052309/Lib/encodings/mac_romanian.py | 593 | 13917 | """ Python Character Mapping Codec mac_romanian generated from 'MAPPINGS/VENDORS/APPLE/ROMANIAN.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-romanian',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
u'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
u'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
u'\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE
u'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE
u'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
u'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
u'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
u'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u2020' # 0xA0 -> DAGGER
u'\xb0' # 0xA1 -> DEGREE SIGN
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa7' # 0xA4 -> SECTION SIGN
u'\u2022' # 0xA5 -> BULLET
u'\xb6' # 0xA6 -> PILCROW SIGN
u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u2122' # 0xAA -> TRADE MARK SIGN
u'\xb4' # 0xAB -> ACUTE ACCENT
u'\xa8' # 0xAC -> DIAERESIS
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\u0102' # 0xAE -> LATIN CAPITAL LETTER A WITH BREVE
u'\u0218' # 0xAF -> LATIN CAPITAL LETTER S WITH COMMA BELOW # for Unicode 3.0 and later
u'\u221e' # 0xB0 -> INFINITY
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\xa5' # 0xB4 -> YEN SIGN
u'\xb5' # 0xB5 -> MICRO SIGN
u'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
u'\u2211' # 0xB7 -> N-ARY SUMMATION
u'\u220f' # 0xB8 -> N-ARY PRODUCT
u'\u03c0' # 0xB9 -> GREEK SMALL LETTER PI
u'\u222b' # 0xBA -> INTEGRAL
u'\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR
u'\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR
u'\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA
u'\u0103' # 0xBE -> LATIN SMALL LETTER A WITH BREVE
u'\u0219' # 0xBF -> LATIN SMALL LETTER S WITH COMMA BELOW # for Unicode 3.0 and later
u'\xbf' # 0xC0 -> INVERTED QUESTION MARK
u'\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK
u'\xac' # 0xC2 -> NOT SIGN
u'\u221a' # 0xC3 -> SQUARE ROOT
u'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK
u'\u2248' # 0xC5 -> ALMOST EQUAL TO
u'\u2206' # 0xC6 -> INCREMENT
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE
u'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
u'\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE
u'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE
u'\u2013' # 0xD0 -> EN DASH
u'\u2014' # 0xD1 -> EM DASH
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u25ca' # 0xD7 -> LOZENGE
u'\xff' # 0xD8 -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\u0178' # 0xD9 -> LATIN CAPITAL LETTER Y WITH DIAERESIS
u'\u2044' # 0xDA -> FRACTION SLASH
u'\u20ac' # 0xDB -> EURO SIGN
u'\u2039' # 0xDC -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\u203a' # 0xDD -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\u021a' # 0xDE -> LATIN CAPITAL LETTER T WITH COMMA BELOW # for Unicode 3.0 and later
u'\u021b' # 0xDF -> LATIN SMALL LETTER T WITH COMMA BELOW # for Unicode 3.0 and later
u'\u2021' # 0xE0 -> DOUBLE DAGGER
u'\xb7' # 0xE1 -> MIDDLE DOT
u'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
u'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2030' # 0xE4 -> PER MILLE SIGN
u'\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xca' # 0xE6 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xcb' # 0xE8 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\uf8ff' # 0xF0 -> Apple logo
u'\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\u0131' # 0xF5 -> LATIN SMALL LETTER DOTLESS I
u'\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u02dc' # 0xF7 -> SMALL TILDE
u'\xaf' # 0xF8 -> MACRON
u'\u02d8' # 0xF9 -> BREVE
u'\u02d9' # 0xFA -> DOT ABOVE
u'\u02da' # 0xFB -> RING ABOVE
u'\xb8' # 0xFC -> CEDILLA
u'\u02dd' # 0xFD -> DOUBLE ACUTE ACCENT
u'\u02db' # 0xFE -> OGONEK
u'\u02c7' # 0xFF -> CARON
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 |
jrwdunham/old | onlinelinguisticdatabase/model/form.py | 1 | 8233 | # Copyright 2016 Joel Dunham
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Form model"""
from sqlalchemy import Column, Sequence, ForeignKey
from sqlalchemy.types import Integer, Unicode, UnicodeText, Date, DateTime
from sqlalchemy.orm import relation
from onlinelinguisticdatabase.model.meta import Base, now
class FormFile(Base):
__tablename__ = 'formfile'
id = Column(Integer, Sequence('formfile_seq_id', optional=True), primary_key=True)
form_id = Column(Integer, ForeignKey('form.id'))
file_id = Column(Integer, ForeignKey('file.id'))
datetime_modified = Column(DateTime, default=now)
class FormTag(Base):
__tablename__ = 'formtag'
id = Column(Integer, Sequence('formtag_seq_id', optional=True), primary_key=True)
form_id = Column(Integer, ForeignKey('form.id'))
tag_id = Column(Integer, ForeignKey('tag.id'))
datetime_modified = Column(DateTime(), default=now)
class CollectionForm(Base):
__tablename__ = 'collectionform'
id = Column(Integer, Sequence('collectionform_seq_id', optional=True), primary_key=True)
collection_id = Column(Integer, ForeignKey('collection.id'))
form_id = Column(Integer, ForeignKey('form.id'))
datetime_modified = Column(DateTime(), default=now)
class Form(Base):
__tablename__ = "form"
def __repr__(self):
return "<Form (%s)>" % self.id
id = Column(Integer, Sequence('form_seq_id', optional=True), primary_key=True)
UUID = Column(Unicode(36))
transcription = Column(Unicode(510), nullable=False)
phonetic_transcription = Column(Unicode(510))
narrow_phonetic_transcription = Column(Unicode(510))
morpheme_break = Column(Unicode(510))
morpheme_gloss = Column(Unicode(510))
comments = Column(UnicodeText)
speaker_comments = Column(UnicodeText)
grammaticality = Column(Unicode(255))
date_elicited = Column(Date)
datetime_entered = Column(DateTime)
datetime_modified = Column(DateTime, default=now)
syntactic_category_string = Column(Unicode(510))
morpheme_break_ids = Column(UnicodeText)
morpheme_gloss_ids = Column(UnicodeText)
break_gloss_category = Column(Unicode(1023))
syntax = Column(Unicode(1023))
semantics = Column(Unicode(1023))
status = Column(Unicode(40), default=u'tested') # u'tested' vs. u'requires testing'
elicitor_id = Column(Integer, ForeignKey('user.id', ondelete='SET NULL'))
elicitor = relation('User', primaryjoin='Form.elicitor_id==User.id')
enterer_id = Column(Integer, ForeignKey('user.id', ondelete='SET NULL'))
enterer = relation('User', primaryjoin='Form.enterer_id==User.id')
modifier_id = Column(Integer, ForeignKey('user.id', ondelete='SET NULL'))
modifier = relation('User', primaryjoin='Form.modifier_id==User.id')
verifier_id = Column(Integer, ForeignKey('user.id', ondelete='SET NULL'))
verifier = relation('User', primaryjoin='Form.verifier_id==User.id')
speaker_id = Column(Integer, ForeignKey('speaker.id', ondelete='SET NULL'))
speaker = relation('Speaker')
elicitationmethod_id = Column(Integer, ForeignKey('elicitationmethod.id', ondelete='SET NULL'))
elicitation_method = relation('ElicitationMethod')
syntacticcategory_id = Column(Integer, ForeignKey('syntacticcategory.id', ondelete='SET NULL'))
syntactic_category = relation('SyntacticCategory', backref='forms')
source_id = Column(Integer, ForeignKey('source.id', ondelete='SET NULL'))
source = relation('Source')
translations = relation('Translation', backref='form', cascade='all, delete, delete-orphan')
files = relation('File', secondary=FormFile.__table__, backref='forms')
collections = relation('Collection', secondary=CollectionForm.__table__, backref='forms')
tags = relation('Tag', secondary=FormTag.__table__, backref='forms')
def get_dict(self):
"""Return a Python dictionary representation of the Form. This
facilitates JSON-stringification, cf. utils.JSONOLDEncoder. Relational
data are truncated, e.g., form_dict['elicitor'] is a dict with keys for
'id', 'first_name' and 'last_name' (cf. get_mini_user_dict above) and lacks
keys for other attributes such as 'username', 'personal_page_content', etc.
"""
return {
'id': self.id,
'UUID': self.UUID,
'transcription': self.transcription,
'phonetic_transcription': self.phonetic_transcription,
'narrow_phonetic_transcription': self.narrow_phonetic_transcription,
'morpheme_break': self.morpheme_break,
'morpheme_gloss': self.morpheme_gloss,
'comments': self.comments,
'speaker_comments': self.speaker_comments,
'grammaticality': self.grammaticality,
'date_elicited': self.date_elicited,
'datetime_entered': self.datetime_entered,
'datetime_modified': self.datetime_modified,
'syntactic_category_string': self.syntactic_category_string,
'morpheme_break_ids': self.json_loads(self.morpheme_break_ids),
'morpheme_gloss_ids': self.json_loads(self.morpheme_gloss_ids),
'break_gloss_category': self.break_gloss_category,
'syntax': self.syntax,
'semantics': self.semantics,
'status': self.status,
'elicitor': self.get_mini_user_dict(self.elicitor),
'enterer': self.get_mini_user_dict(self.enterer),
'modifier': self.get_mini_user_dict(self.modifier),
'verifier': self.get_mini_user_dict(self.verifier),
'speaker': self.get_mini_speaker_dict(self.speaker),
'elicitation_method': self.get_mini_elicitation_method_dict(self.elicitation_method),
'syntactic_category': self.get_mini_syntactic_category_dict(self.syntactic_category),
'source': self.get_mini_source_dict(self.source),
'translations': self.get_translations_list(self.translations),
'tags': self.get_tags_list(self.tags),
'files': self.get_files_list(self.files)
}
def extract_word_pos_sequences(self, unknown_category, morpheme_splitter,
extract_morphemes=False):
"""Return the unique word-based pos sequences, as well as (possibly) the morphemes, implicit in the form.
:param str unknown_category: the string used in syntactic category strings when a morpheme-gloss pair is unknown
:param morpheme_splitter: callable that splits a strings into its morphemes and delimiters
:param bool extract_morphemes: determines whether we return a list of morphemes implicit in the form.
:returns: 2-tuple: (set of pos/delimiter sequences, list of morphemes as (pos, (mb, mg)) tuples).
"""
if not self.syntactic_category_string:
return None, None
pos_sequences = set()
morphemes = []
sc_words = self.syntactic_category_string.split()
mb_words = self.morpheme_break.split()
mg_words = self.morpheme_gloss.split()
for sc_word, mb_word, mg_word in zip(sc_words, mb_words, mg_words):
pos_sequence = tuple(morpheme_splitter(sc_word))
if unknown_category not in pos_sequence:
pos_sequences.add(pos_sequence)
if extract_morphemes:
morpheme_sequence = morpheme_splitter(mb_word)[::2]
gloss_sequence = morpheme_splitter(mg_word)[::2]
for pos, morpheme, gloss in zip(pos_sequence[::2], morpheme_sequence, gloss_sequence):
morphemes.append((pos, (morpheme, gloss)))
return pos_sequences, morphemes
| apache-2.0 |
kirca/OpenUpgrade | addons/mail/mail_mail.py | 183 | 18372 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import base64
import logging
from email.utils import formataddr
from urlparse import urljoin
from openerp import api, tools
from openerp import SUPERUSER_ID
from openerp.addons.base.ir.ir_mail_server import MailDeliveryException
from openerp.osv import fields, osv
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class mail_mail(osv.Model):
""" Model holding RFC2822 email messages to send. This model also provides
facilities to queue and send new email messages. """
_name = 'mail.mail'
_description = 'Outgoing Mails'
_inherits = {'mail.message': 'mail_message_id'}
_order = 'id desc'
_rec_name = 'subject'
_columns = {
'mail_message_id': fields.many2one('mail.message', 'Message', required=True, ondelete='cascade', auto_join=True),
'state': fields.selection([
('outgoing', 'Outgoing'),
('sent', 'Sent'),
('received', 'Received'),
('exception', 'Delivery Failed'),
('cancel', 'Cancelled'),
], 'Status', readonly=True, copy=False),
'auto_delete': fields.boolean('Auto Delete',
help="Permanently delete this email after sending it, to save space"),
'references': fields.text('References', help='Message references, such as identifiers of previous messages', readonly=1),
'email_to': fields.text('To', help='Message recipients (emails)'),
'recipient_ids': fields.many2many('res.partner', string='To (Partners)'),
'email_cc': fields.char('Cc', help='Carbon copy message recipients'),
'body_html': fields.text('Rich-text Contents', help="Rich-text/HTML message"),
'headers': fields.text('Headers', copy=False),
# Auto-detected based on create() - if 'mail_message_id' was passed then this mail is a notification
# and during unlink() we will not cascade delete the parent and its attachments
'notification': fields.boolean('Is Notification',
help='Mail has been created to notify people of an existing mail.message'),
}
_defaults = {
'state': 'outgoing',
}
def default_get(self, cr, uid, fields, context=None):
# protection for `default_type` values leaking from menu action context (e.g. for invoices)
# To remove when automatic context propagation is removed in web client
if context and context.get('default_type') and context.get('default_type') not in self._all_columns['type'].column.selection:
context = dict(context, default_type=None)
return super(mail_mail, self).default_get(cr, uid, fields, context=context)
def create(self, cr, uid, values, context=None):
# notification field: if not set, set if mail comes from an existing mail.message
if 'notification' not in values and values.get('mail_message_id'):
values['notification'] = True
return super(mail_mail, self).create(cr, uid, values, context=context)
def unlink(self, cr, uid, ids, context=None):
# cascade-delete the parent message for all mails that are not created for a notification
ids_to_cascade = self.search(cr, uid, [('notification', '=', False), ('id', 'in', ids)])
parent_msg_ids = [m.mail_message_id.id for m in self.browse(cr, uid, ids_to_cascade, context=context)]
res = super(mail_mail, self).unlink(cr, uid, ids, context=context)
self.pool.get('mail.message').unlink(cr, uid, parent_msg_ids, context=context)
return res
def mark_outgoing(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'outgoing'}, context=context)
def cancel(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'cancel'}, context=context)
@api.cr_uid
def process_email_queue(self, cr, uid, ids=None, context=None):
"""Send immediately queued messages, committing after each
message is sent - this is not transactional and should
not be called during another transaction!
:param list ids: optional list of emails ids to send. If passed
no search is performed, and these ids are used
instead.
:param dict context: if a 'filters' key is present in context,
this value will be used as an additional
filter to further restrict the outgoing
messages to send (by default all 'outgoing'
messages are sent).
"""
if context is None:
context = {}
if not ids:
filters = [('state', '=', 'outgoing')]
if 'filters' in context:
filters.extend(context['filters'])
ids = self.search(cr, uid, filters, context=context)
res = None
try:
# Force auto-commit - this is meant to be called by
# the scheduler, and we can't allow rolling back the status
# of previously sent emails!
res = self.send(cr, uid, ids, auto_commit=True, context=context)
except Exception:
_logger.exception("Failed processing mail queue")
return res
def _postprocess_sent_message(self, cr, uid, mail, context=None, mail_sent=True):
"""Perform any post-processing necessary after sending ``mail``
successfully, including deleting it completely along with its
attachment if the ``auto_delete`` flag of the mail was set.
Overridden by subclasses for extra post-processing behaviors.
:param browse_record mail: the mail that was just sent
:return: True
"""
if mail_sent and mail.auto_delete:
# done with SUPERUSER_ID to avoid giving large unlink access rights
self.unlink(cr, SUPERUSER_ID, [mail.id], context=context)
return True
#------------------------------------------------------
# mail_mail formatting, tools and send mechanism
#------------------------------------------------------
def _get_partner_access_link(self, cr, uid, mail, partner=None, context=None):
"""Generate URLs for links in mails: partner has access (is user):
link to action_mail_redirect action that will redirect to doc or Inbox """
if context is None:
context = {}
if partner and partner.user_ids:
base_url = self.pool.get('ir.config_parameter').get_param(cr, SUPERUSER_ID, 'web.base.url')
mail_model = mail.model or 'mail.thread'
url = urljoin(base_url, self.pool[mail_model]._get_access_link(cr, uid, mail, partner, context=context))
return "<span class='oe_mail_footer_access'><small>%(access_msg)s <a style='color:inherit' href='%(portal_link)s'>%(portal_msg)s</a></small></span>" % {
'access_msg': _('about') if mail.record_name else _('access'),
'portal_link': url,
'portal_msg': '%s %s' % (context.get('model_name', ''), mail.record_name) if mail.record_name else _('your messages'),
}
else:
return None
def send_get_mail_subject(self, cr, uid, mail, force=False, partner=None, context=None):
"""If subject is void, set the subject as 'Re: <Resource>' or
'Re: <mail.parent_id.subject>'
:param boolean force: force the subject replacement
"""
if (force or not mail.subject) and mail.record_name:
return 'Re: %s' % (mail.record_name)
elif (force or not mail.subject) and mail.parent_id and mail.parent_id.subject:
return 'Re: %s' % (mail.parent_id.subject)
return mail.subject
def send_get_mail_body(self, cr, uid, mail, partner=None, context=None):
"""Return a specific ir_email body. The main purpose of this method
is to be inherited to add custom content depending on some module."""
body = mail.body_html
# generate access links for notifications or emails linked to a specific document with auto threading
link = None
if mail.notification or (mail.model and mail.res_id and not mail.no_auto_thread):
link = self._get_partner_access_link(cr, uid, mail, partner, context=context)
if link:
body = tools.append_content_to_html(body, link, plaintext=False, container_tag='div')
return body
def send_get_mail_to(self, cr, uid, mail, partner=None, context=None):
"""Forge the email_to with the following heuristic:
- if 'partner', recipient specific (Partner Name <email>)
- else fallback on mail.email_to splitting """
if partner:
email_to = [formataddr((partner.name, partner.email))]
else:
email_to = tools.email_split(mail.email_to)
return email_to
def send_get_email_dict(self, cr, uid, mail, partner=None, context=None):
"""Return a dictionary for specific email values, depending on a
partner, or generic to the whole recipients given by mail.email_to.
:param browse_record mail: mail.mail browse_record
:param browse_record partner: specific recipient partner
"""
body = self.send_get_mail_body(cr, uid, mail, partner=partner, context=context)
body_alternative = tools.html2plaintext(body)
res = {
'body': body,
'body_alternative': body_alternative,
'subject': self.send_get_mail_subject(cr, uid, mail, partner=partner, context=context),
'email_to': self.send_get_mail_to(cr, uid, mail, partner=partner, context=context),
}
return res
def send(self, cr, uid, ids, auto_commit=False, raise_exception=False, context=None):
""" Sends the selected emails immediately, ignoring their current
state (mails that have already been sent should not be passed
unless they should actually be re-sent).
Emails successfully delivered are marked as 'sent', and those
that fail to be deliver are marked as 'exception', and the
corresponding error mail is output in the server logs.
:param bool auto_commit: whether to force a commit of the mail status
after sending each mail (meant only for scheduler processing);
should never be True during normal transactions (default: False)
:param bool raise_exception: whether to raise an exception if the
email sending process has failed
:return: True
"""
context = dict(context or {})
ir_mail_server = self.pool.get('ir.mail_server')
ir_attachment = self.pool['ir.attachment']
for mail in self.browse(cr, SUPERUSER_ID, ids, context=context):
try:
# TDE note: remove me when model_id field is present on mail.message - done here to avoid doing it multiple times in the sub method
if mail.model:
model_id = self.pool['ir.model'].search(cr, SUPERUSER_ID, [('model', '=', mail.model)], context=context)[0]
model = self.pool['ir.model'].browse(cr, SUPERUSER_ID, model_id, context=context)
else:
model = None
if model:
context['model_name'] = model.name
# load attachment binary data with a separate read(), as prefetching all
# `datas` (binary field) could bloat the browse cache, triggerring
# soft/hard mem limits with temporary data.
attachment_ids = [a.id for a in mail.attachment_ids]
attachments = [(a['datas_fname'], base64.b64decode(a['datas']))
for a in ir_attachment.read(cr, SUPERUSER_ID, attachment_ids,
['datas_fname', 'datas'])]
# specific behavior to customize the send email for notified partners
email_list = []
if mail.email_to:
email_list.append(self.send_get_email_dict(cr, uid, mail, context=context))
for partner in mail.recipient_ids:
email_list.append(self.send_get_email_dict(cr, uid, mail, partner=partner, context=context))
# headers
headers = {}
bounce_alias = self.pool['ir.config_parameter'].get_param(cr, uid, "mail.bounce.alias", context=context)
catchall_domain = self.pool['ir.config_parameter'].get_param(cr, uid, "mail.catchall.domain", context=context)
if bounce_alias and catchall_domain:
if mail.model and mail.res_id:
headers['Return-Path'] = '%s-%d-%s-%d@%s' % (bounce_alias, mail.id, mail.model, mail.res_id, catchall_domain)
else:
headers['Return-Path'] = '%s-%d@%s' % (bounce_alias, mail.id, catchall_domain)
if mail.headers:
try:
headers.update(eval(mail.headers))
except Exception:
pass
# Writing on the mail object may fail (e.g. lock on user) which
# would trigger a rollback *after* actually sending the email.
# To avoid sending twice the same email, provoke the failure earlier
mail.write({'state': 'exception'})
mail_sent = False
# build an RFC2822 email.message.Message object and send it without queuing
res = None
for email in email_list:
msg = ir_mail_server.build_email(
email_from=mail.email_from,
email_to=email.get('email_to'),
subject=email.get('subject'),
body=email.get('body'),
body_alternative=email.get('body_alternative'),
email_cc=tools.email_split(mail.email_cc),
reply_to=mail.reply_to,
attachments=attachments,
message_id=mail.message_id,
references=mail.references,
object_id=mail.res_id and ('%s-%s' % (mail.res_id, mail.model)),
subtype='html',
subtype_alternative='plain',
headers=headers)
try:
res = ir_mail_server.send_email(cr, uid, msg,
mail_server_id=mail.mail_server_id.id,
context=context)
except AssertionError as error:
if error.message == ir_mail_server.NO_VALID_RECIPIENT:
# No valid recipient found for this particular
# mail item -> ignore error to avoid blocking
# delivery to next recipients, if any. If this is
# the only recipient, the mail will show as failed.
_logger.warning("Ignoring invalid recipients for mail.mail %s: %s",
mail.message_id, email.get('email_to'))
else:
raise
if res:
mail.write({'state': 'sent', 'message_id': res})
mail_sent = True
# /!\ can't use mail.state here, as mail.refresh() will cause an error
# see revid:[email protected] in 6.1
if mail_sent:
_logger.info('Mail with ID %r and Message-Id %r successfully sent', mail.id, mail.message_id)
self._postprocess_sent_message(cr, uid, mail, context=context, mail_sent=mail_sent)
except MemoryError:
# prevent catching transient MemoryErrors, bubble up to notify user or abort cron job
# instead of marking the mail as failed
_logger.exception('MemoryError while processing mail with ID %r and Msg-Id %r. '\
'Consider raising the --limit-memory-hard startup option',
mail.id, mail.message_id)
raise
except Exception as e:
_logger.exception('failed sending mail.mail %s', mail.id)
mail.write({'state': 'exception'})
self._postprocess_sent_message(cr, uid, mail, context=context, mail_sent=False)
if raise_exception:
if isinstance(e, AssertionError):
# get the args of the original error, wrap into a value and throw a MailDeliveryException
# that is an except_orm, with name and value as arguments
value = '. '.join(e.args)
raise MailDeliveryException(_("Mail Delivery Failed"), value)
raise
if auto_commit is True:
cr.commit()
return True
| agpl-3.0 |
thomazs/geraldo | site/newsite/site-geraldo/django/views/defaults.py | 24 | 3359 | from django.core.exceptions import ObjectDoesNotExist
from django.template import Context, RequestContext, loader
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django import http
def shortcut(request, content_type_id, object_id):
"Redirect to an object's page based on a content-type ID and an object ID."
# Look up the object, making sure it's got a get_absolute_url() function.
try:
content_type = ContentType.objects.get(pk=content_type_id)
obj = content_type.get_object_for_this_type(pk=object_id)
except ObjectDoesNotExist:
raise http.Http404, "Content type %s object %s doesn't exist" % (content_type_id, object_id)
try:
absurl = obj.get_absolute_url()
except AttributeError:
raise http.Http404, "%s objects don't have get_absolute_url() methods" % content_type.name
# Try to figure out the object's domain, so we can do a cross-site redirect
# if necessary.
# If the object actually defines a domain, we're done.
if absurl.startswith('http://') or absurl.startswith('https://'):
return http.HttpResponseRedirect(absurl)
object_domain = None
# Otherwise, we need to introspect the object's relationships for a
# relation to the Site object
opts = obj._meta
# First, look for an many-to-many relationship to sites
for field in opts.many_to_many:
if field.rel.to is Site:
try:
object_domain = getattr(obj, field.name).all()[0].domain
except IndexError:
pass
if object_domain is not None:
break
# Next look for a many-to-one relationship to site
if object_domain is None:
for field in obj._meta.fields:
if field.rel and field.rel.to is Site:
try:
object_domain = getattr(obj, field.name).domain
except Site.DoesNotExist:
pass
if object_domain is not None:
break
# Fall back to the current site (if possible)
if object_domain is None:
try:
object_domain = Site.objects.get_current().domain
except Site.DoesNotExist:
pass
# If all that malarkey found an object domain, use it; otherwise fall back
# to whatever get_absolute_url() returned.
if object_domain is not None:
protocol = request.is_secure() and 'https' or 'http'
return http.HttpResponseRedirect('%s://%s%s' % (protocol, object_domain, absurl))
else:
return http.HttpResponseRedirect(absurl)
def page_not_found(request, template_name='404.html'):
"""
Default 404 handler.
Templates: `404.html`
Context:
request_path
The path of the requested URL (e.g., '/app/pages/bad_page/')
"""
t = loader.get_template(template_name) # You need to create a 404.html template.
return http.HttpResponseNotFound(t.render(RequestContext(request, {'request_path': request.path})))
def server_error(request, template_name='500.html'):
"""
500 error handler.
Templates: `500.html`
Context: None
"""
t = loader.get_template(template_name) # You need to create a 500.html template.
return http.HttpResponseServerError(t.render(Context({})))
| lgpl-3.0 |
wrouesnel/ansible | lib/ansible/modules/cloud/vmware/vmware_migrate_vmk.py | 26 | 7038 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Joseph Callen <jcallen () csc.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vmware_migrate_vmk
short_description: Migrate a VMK interface from VSS to VDS
description:
- Migrate a VMK interface from VSS to VDS
version_added: 2.0
author: "Joseph Callen (@jcpowermac), Russell Teague (@mtnbikenc)"
notes:
- Tested on vSphere 5.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
esxi_hostname:
description:
- ESXi hostname to be managed
required: True
device:
description:
- VMK interface name
required: True
current_switch_name:
description:
- Switch VMK interface is currently on
required: True
current_portgroup_name:
description:
- Portgroup name VMK interface is currently on
required: True
migrate_switch_name:
description:
- Switch name to migrate VMK interface to
required: True
migrate_portgroup_name:
description:
- Portgroup name to migrate VMK interface to
required: True
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
# Example from Ansible playbook
- name: Migrate Management vmk
local_action:
module: vmware_migrate_vmk
hostname: vcsa_host
username: vcsa_user
password: vcsa_pass
esxi_hostname: esxi_hostname
device: vmk1
current_switch_name: temp_vswitch
current_portgroup_name: esx-mgmt
migrate_switch_name: dvSwitch
migrate_portgroup_name: Management
'''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import (vmware_argument_spec, find_dvs_by_name, find_hostsystem_by_name,
connect_to_api, find_dvspg_by_name)
class VMwareMigrateVmk(object):
def __init__(self, module):
self.module = module
self.host_system = None
self.migrate_switch_name = self.module.params['migrate_switch_name']
self.migrate_portgroup_name = self.module.params['migrate_portgroup_name']
self.device = self.module.params['device']
self.esxi_hostname = self.module.params['esxi_hostname']
self.current_portgroup_name = self.module.params['current_portgroup_name']
self.current_switch_name = self.module.params['current_switch_name']
self.content = connect_to_api(module)
def process_state(self):
try:
vmk_migration_states = {
'migrate_vss_vds': self.state_migrate_vss_vds,
'migrate_vds_vss': self.state_migrate_vds_vss,
'migrated': self.state_exit_unchanged
}
vmk_migration_states[self.check_vmk_current_state()]()
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
except Exception as e:
self.module.fail_json(msg=str(e))
def state_exit_unchanged(self):
self.module.exit_json(changed=False)
def state_migrate_vds_vss(self):
self.module.exit_json(changed=False, msg="Currently Not Implemented")
def create_host_vnic_config(self, dv_switch_uuid, portgroup_key):
host_vnic_config = vim.host.VirtualNic.Config()
host_vnic_config.spec = vim.host.VirtualNic.Specification()
host_vnic_config.changeOperation = "edit"
host_vnic_config.device = self.device
host_vnic_config.portgroup = ""
host_vnic_config.spec.distributedVirtualPort = vim.dvs.PortConnection()
host_vnic_config.spec.distributedVirtualPort.switchUuid = dv_switch_uuid
host_vnic_config.spec.distributedVirtualPort.portgroupKey = portgroup_key
return host_vnic_config
def create_port_group_config(self):
port_group_config = vim.host.PortGroup.Config()
port_group_config.spec = vim.host.PortGroup.Specification()
port_group_config.changeOperation = "remove"
port_group_config.spec.name = self.current_portgroup_name
port_group_config.spec.vlanId = -1
port_group_config.spec.vswitchName = self.current_switch_name
port_group_config.spec.policy = vim.host.NetworkPolicy()
return port_group_config
def state_migrate_vss_vds(self):
host_network_system = self.host_system.configManager.networkSystem
dv_switch = find_dvs_by_name(self.content, self.migrate_switch_name)
pg = find_dvspg_by_name(dv_switch, self.migrate_portgroup_name)
config = vim.host.NetworkConfig()
config.portgroup = [self.create_port_group_config()]
config.vnic = [self.create_host_vnic_config(dv_switch.uuid, pg.key)]
host_network_system.UpdateNetworkConfig(config, "modify")
self.module.exit_json(changed=True)
def check_vmk_current_state(self):
self.host_system = find_hostsystem_by_name(self.content, self.esxi_hostname)
for vnic in self.host_system.configManager.networkSystem.networkInfo.vnic:
if vnic.device == self.device:
# self.vnic = vnic
if vnic.spec.distributedVirtualPort is None:
if vnic.portgroup == self.current_portgroup_name:
return "migrate_vss_vds"
else:
dvs = find_dvs_by_name(self.content, self.current_switch_name)
if dvs is None:
return "migrated"
if vnic.spec.distributedVirtualPort.switchUuid == dvs.uuid:
return "migrate_vds_vss"
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(esxi_hostname=dict(required=True, type='str'),
device=dict(required=True, type='str'),
current_switch_name=dict(required=True, type='str'),
current_portgroup_name=dict(required=True, type='str'),
migrate_switch_name=dict(required=True, type='str'),
migrate_portgroup_name=dict(required=True, type='str')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi required for this module')
vmware_migrate_vmk = VMwareMigrateVmk(module)
vmware_migrate_vmk.process_state()
if __name__ == '__main__':
main()
| gpl-3.0 |
adamgilman/ems-costing | tests/tests_postageapp.py | 1 | 1049 | from vendors import PostageApp
import unittest
class TestPostageApp(unittest.TestCase):
def setUp(self):
self.vendor = PostageApp()
def test_ZeroEmails(self):
self.assertEqual(self.vendor.getPrice(0), 9)
def test_Zero_10000(self):
self.assertEqual(self.vendor.getPrice(1), 9)
self.assertEqual(self.vendor.getPrice(10000), 9)
self.assertEqual(self.vendor.getPrice(11000), 10)
def test_40k_pm(self):
self.assertEqual(self.vendor.getPrice(40000), 29)
self.assertEqual(self.vendor.getPrice(41000), 30)
def test_100k_pm(self):
self.assertEqual(self.vendor.getPrice(100000), 79)
self.assertEqual(self.vendor.getPrice(101333), 80)
def test_400k_pm(self):
self.assertEqual(self.vendor.getPrice(400000), 199)
self.assertEqual(self.vendor.getPrice(401333), 200)
def test_500k_pm(self):
self.assertEqual(self.vendor.getPrice(500000), 274)
def test_700k_pm(self):
self.assertEqual(self.vendor.getPrice(700000), 424)
| mit |
pshahzeb/vsphere-storage-for-docker | esx_service/cli/vmdkops_admin.py | 1 | 58143 | #!/usr/bin/env python
# Copyright 2016 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Admin CLI for vmdk_opsd
import argparse
import os
import subprocess
import sys
import signal
import os.path
import shutil
import time
import vmdk_ops
# vmdkops python utils are in PY_LOC, so add to path.
sys.path.insert(0, vmdk_ops.PY_LOC)
import volume_kv as kv
import cli_table
import cli_xml
import vsan_policy
import vmdk_utils
import vsan_info
import local_sh
import log_config
import auth
import auth_data_const
import convert
import auth_api
import auth_data
from auth_data import DB_REF
from error_code import ErrorCode
from error_code import error_code_to_message
from error_code import generate_error_info
# generic strings
NOT_AVAILABLE = 'N/A'
UNSET = "Unset"
# Volume attributes
VOL_SIZE = 'size'
VOL_ALLOC = 'allocated'
# Return this to shell
# note: "1" is returned if a string is passed to sys.exit
CLI_ERR_ARGS_PARSE = 3
CLI_ERR_OPERATION_FAILURE = 2
CLI_SUCCESS = 0
def main():
'Main function for Admin CLI'
log_config.configure()
kv.init()
if not vmdk_ops.is_service_available():
sys.exit('Unable to connect to the host-agent on this host, ensure the ESXi host agent is running before retrying.')
args = parse_args()
if not args:
sys.exit(CLI_ERR_ARGS_PARSE)
if args.func(args) != None:
sys.exit(CLI_ERR_OPERATION_FAILURE)
sys.exit(CLI_SUCCESS) # not really needed, putting here as an eye candy
def commands():
"""
This function returns a dictionary representation of a CLI specification that is used to
generate a CLI parser. The dictionary is recursively walked in the `add_subparser()` function
and appropriate calls are made to the `argparse` module to create a CLI parser that fits the
specification.
Each key in the top level of the dictionary is a command string. Each command may contain the
following keys:
* func - The callback function to be called when the command is issued. This key is always
present unless there are subcommands, denoted by a 'cmds' key.
* help - The help string that is printed when the `-h` or `--help` paramters are given without
reference to a given command. (i.e. `./vmdkops_admin.py -h`). All top level help
strings are printed in this instance.
* args - A dictionary of any positional or optional arguments allowed for the given command. The
args dictionary may contain the following keys:
* help - The help for a given option which is displayed when the `-h` flag is given
with mention to a given command. (i.e. `./vmdkops_admin.py volume ls -h`). Help for
all options are shown for the command.
* action - The action to take when the option is given. This is directly passed to
argparse. Note that `store_true` just means pass the option to the callback
as a boolean `True` value and don't require option parameters.
(i.e. `./vmdkops_admin.py volume ls -l`). Other options for the action value can be
found in the argparse documentation.
https://docs.python.org/3/library/argparse.html#action
* metavar - A way to refer to each expected argument in help documentation. This is
directly passed to argparse.
See https://docs.python.org/3/library/argparse.html#metavar
* required - Whether or not the argument is required. This is directly passed to
argparse.
* type - A type conversion function that takes the option parameter and converts it
to a given type before passing it to the func callback. It prints an error and
exits if the given argument cannot be converted.
See https://docs.python.org/3/library/argparse.html#type
* choices - A list of choices that can be provided for the given option. This list is
not directly passed to argparse. Instead a type conversion function is
created that only allows one or more of the choices as a comma separated
list to be supplied. An error identical to the one presented when using the
'choices' option in argparse is printed if an invalid choice is given. The
rationale for not directly using the argparse choices option is that
argparse requires space separated arguments of the form: `-l a b c`, rather
than the defacto single argument, comma separated form: `-l a,b,c`, common
to most unix programs.
* cmds - A dictionary of subcommands where the key is the next word in the command line string.
For example, in `vmdkops_admin.py tenant create`, `tenant` is the command, and `create` is
the subcommand. Subcommands can have further subcommands, but currently there is only
one level of subcommands in this specification. Each subcommand can contain the same
attributes as top level commands: (func, help, args, cmds). These attributes have
identical usage to the top-level keys, except they only apply when the subcommand is
part of the command. For example the `--vm-list` argument only applies to `tenant
create` or `tenant set` commands. It will be invalid in any other context.
Note that the last subcommand in a chain is the one where the callback function is
defined. For example, `tenant create` has a callback, but if a user runs the program
like: `./vmdkops_admin.py tenant` they will get the following error:
```
usage: vmdkops_admin.py tenant [-h] {rm,create,volume,get} ...
vmdkops_admin.py tenant: error: too few arguments
```
"""
return {
'volume' : {
'help': "Manipulate volumes",
'cmds': {
'ls': {
'func': ls,
'help': 'List volumes',
'args': {
'-c': {
'help': 'Display selected columns',
'choices': ['volume', 'datastore', 'vmgroup', 'capacity', 'used',
'fstype', 'policy', 'disk-format', 'attached-to', 'access',
'attach-as', 'created-by', 'created'],
'metavar': 'Col1,Col2,...'
},
'--vmgroup' : {
'help': 'Displays volumes for a given vmgroup'
}
}
},
'set': {
'func': set_vol_opts,
'help': 'Edit settings for a given volume',
'args': {
'--volume': {
'help': 'Volume to set options for, specified as "volume@datastore".',
'required': True
},
'--vmgroup': {
'help': 'Name of the vmgroup the volume belongs to.',
'required': True
},
'--options': {
'help': 'Options (specifically, access) to be set on the volume.',
'required': True
}
}
}
}
},
'policy': {
'help': 'Configure and display storage policy information',
'cmds': {
'create': {
'func': policy_create,
'help': 'Create a storage policy',
'args': {
'--name': {
'help': 'The name of the policy',
'required': True
},
'--content': {
'help': 'The VSAN policy string',
'required': True
}
}
},
'rm': {
'func': policy_rm,
'help': 'Remove a storage policy',
'args': {
'--name': {
'help': 'Policy name',
'required': True
}
}
},
'ls': {
'func': policy_ls,
'help':
'List storage policies and volumes using those policies'
},
'update': {
'func': policy_update,
'help': ('Update the definition of a storage policy and all'
'VSAN objects using that policy'),
'args': {
'--name': {
'help': 'The name of the policy',
'required': True
},
'--content': {
'help': 'The VSAN policy string',
'required': True
}
}
}
}
},
'vmgroup': {
#
# vmgroup {create, update, rm , ls} - manipulates vmgroup
# vmgroup vm {add, rm, ls} - manipulates VMs for a vmgroup
# vmgroup access {add, set, rm, ls} - manipulates datastore access right for a vmgroup
#
# Internally, "vmgroup" is called "tenant".
# We decided to keep the name of functions as "tenant_*" for now
'help': 'Administer and monitor volume access control',
'cmds': {
'create': {
'func': tenant_create,
'help': 'Create a new vmgroup',
'args': {
'--name': {
'help': 'The name of the vmgroup',
'required': True
},
'--description': {
'help': 'The description of the vmgroup',
},
# a shortcut allowing to add VMs on vmgroup Create
'--vm-list': {
'help': 'A list of VM names to place in this vmgroup',
'metavar': 'vm1, vm2, ...',
'type': comma_separated_string
},
'--default-datastore': {
'help': 'Datastore to be used by default for volumes placement',
'required': True
}
}
},
'update': {
'func': tenant_update,
'help': 'Update an existing vmgroup',
'args': {
'--name': {
'help': 'The name of the vmgroup',
'required': True
},
'--new-name': {
'help': 'The new name of the vmgroup',
},
'--description': {
'help': 'The new description of the vmgroup',
},
'--default-datastore': {
'help': 'Datastore to be used by default for volumes placement',
}
}
},
'rm': {
'func': tenant_rm,
'help': 'Delete a vmgroup',
'args': {
'--name': {
'help': 'The name of the vmgroup',
'required': True
},
'--remove-volumes': {
'help': 'BE CAREFUL: Removes this vmgroup volumes when removing a vmgroup',
'action': 'store_true'
},
'--force': {
'help': 'Force operation, ignore warnings',
'action': 'store_true'
}
}
},
'ls': {
'func': tenant_ls,
'help': 'List vmgroups and the VMs they are applied to'
},
'vm': {
'help': 'Add, removes and lists VMs in a vmgroup',
'cmds': {
'add': {
'help': 'Add a VM(s) to a vmgroup',
'func': tenant_vm_add,
'args': {
'--name': {
'help': "Vmgroup to add the VM to",
'required': True
},
'--vm-list': {
'help': "A list of VM names to add to this vmgroup",
'type': comma_separated_string,
'required': True
}
}
},
'rm': {
'help': 'Remove VM(s) from a vmgroup',
'func': tenant_vm_rm,
'args': {
'--name': {
'help': "Vmgroup to remove the VM from",
'required': True
},
'--vm-list': {
'help': "A list of VM names to rm from this vmgroup",
'type': comma_separated_string,
'required': True
}
}
},
'replace': {
'help': 'Replace VM(s) for a vmgroup',
'func': tenant_vm_replace,
'args': {
'--name': {
'help': "Vmgroup to replace the VM for",
'required': True
},
'--vm-list': {
'help': "A list of VM names to replace for this vmgroup",
'type': comma_separated_string,
'required': True
}
}
},
'ls': {
'help': "list VMs in a vmgroup",
'func': tenant_vm_ls,
'args': {
'--name': {
'help': "Vmgroup to list the VMs for",
'required': True
}
}
}
}
},
'access': {
'help': 'Add or remove Datastore access and quotas for a vmgroup',
'cmds': {
'add': {
'func': tenant_access_add,
'help': 'Add a datastore access for a vmgroup',
'args': {
'--name': {
'help': 'The name of the vmgroup',
'required': True
},
'--datastore': {
'help': "Datastore which access is controlled",
'required': True
},
'--allow-create': {
'help': 'Allow create and delete on datastore if set',
'action': 'store_true'
},
'--volume-maxsize': {
'help': 'Maximum size of the volume that can be created',
'metavar': 'Num{MB,GB,TB} - e.g. 2TB'
},
'--volume-totalsize': {
'help':
'Maximum total size of all volume that can be created on the datastore for this vmgroup',
'metavar': 'Num{MB,GB,TB} - e.g. 2TB'
}
}
},
'set': {
'func': tenant_access_set,
'help': 'Modify datastore access for a vmgroup',
'args': {
'--name': {
'help': 'The name of the vmgroup',
'required': True
},
'--datastore': {
'help': "Datastore name",
'required': True
},
'--allow-create': {
'help':
'Allow create and delete on datastore if set to True; disallow create and delete on datastore if set to False',
'metavar': 'Value{True|False} - e.g. True'
},
'--volume-maxsize': {
'help': 'Maximum size of the volume that can be created',
'metavar': 'Num{MB,GB,TB} - e.g. 2TB'
},
'--volume-totalsize': {
'help':
'Maximum total size of all volume that can be created on the datastore for this vmgroup',
'metavar': 'Num{MB,GB,TB} - e.g. 2TB'
}
}
},
'rm': {
'func': tenant_access_rm,
'help': "Remove all access to a datastore for a vmgroup",
'args': {
'--name': {
'help': 'The name of the vmgroup',
'required': True
},
'--datastore': {
'help': "Datstore which access is controlled",
'required': True
}
}
},
'ls': {
'func': tenant_access_ls,
'help': 'List all access info for a vmgroup',
'args': {
'--name': {
'help': 'The name of the vmgroup',
'required': True
}
}
}
}
}
}
},
'config': {
'help': 'Init and manage Config DB to enable quotas and access control [EXPERIMENTAL]',
'cmds': {
'init': {
'func': config_init,
'help': 'Init ' + DB_REF + ' to allows quotas and access groups, aka vmgroups',
'args': {
'--datastore': {
'help': DB_REF + ' will be placed on a shared datastore',
},
'--local': {
'help': 'Allows local (SingleNode) Init',
'action': 'store_true'
},
'--force': {
'help': 'Force operation, ignore warnings',
'action': 'store_true'
}
}
},
'rm': {
'func': config_rm,
'help': 'Remove ' + DB_REF,
'args': {
'--local': {
'help': 'Remove only local link or local DB',
'action': 'store_true'
},
'--unlink': {
'help': 'Remove the local link to shared DB',
'action': 'store_true'
},
'--no-backup': {
'help': 'Do not create DB backup before removing',
'action': 'store_true'
},
'--confirm': {
'help': 'Explicitly confirm the operation',
'action': 'store_true'
}
}
},
'mv': {
'func': config_mv,
'help': 'Relocate ' + DB_REF + ' from its current location [NOT SUPPORTED YET]',
'args': {
'--force': {
'help': 'Force operation, ignore warnings',
'action': 'store_true'
},
'--to': {
'help': 'Where to move the DB to.',
'required': True
}
}
},
'status': {
'func': config_status,
'help': 'Show the status of the Config DB'
}
}
},
'status': {
'func': status,
'help': 'Show the status of the vmdk_ops service',
'args': {
'--fast': {
'help': 'Skip some of the data collection (port, version)',
'action': 'store_true'
}
}
}
}
def printList(output_format, header, rows):
"""
Prints the output generated from header and rows
in specified format
"""
if output_format == "xml":
print(cli_xml.create(header, rows))
else:
print(cli_table.create(header, rows))
def printMessage(output_format, message):
"""
Prints the message in specified output format
"""
if output_format == "xml":
print(cli_xml.createMessage(message))
else:
print(message)
def create_parser():
""" Create a CLI parser via argparse based on the dictionary returned from commands() """
parser = argparse.ArgumentParser(description='vSphere Docker Volume Service admin CLI')
parser.add_argument('--output-format', help='Specify output format. Supported format : xml. Default one is plaintext')
add_subparser(parser, commands(), title='Manage VMDK-based Volumes for Docker')
return parser
def add_subparser(parser, cmds_dict, title="", description=""):
""" Recursively add subcommand parsers based on a dictionary of commands """
subparsers = parser.add_subparsers(title=title, description=description, help="action")
for cmd, attributes in cmds_dict.items():
subparser = subparsers.add_parser(cmd, help=attributes['help'])
if 'func' in attributes:
subparser.set_defaults(func=attributes['func'])
if 'args' in attributes:
for arg, opts in attributes['args'].items():
opts = build_argparse_opts(opts)
subparser.add_argument(arg, **opts)
if 'cmds' in attributes:
add_subparser(subparser, attributes['cmds'], title=attributes['help'])
def build_argparse_opts(opts):
if 'choices' in opts:
opts['type'] = make_list_of_values(opts['choices'])
help_opts = opts['help']
opts['help'] = '{0}: Choices = {1}'.format(help_opts, opts['choices'])
del opts['choices']
return opts
def parse_args():
parser = create_parser()
args = parser.parse_args()
opts = vars(args)
if args != argparse.Namespace() and 'func' in opts.keys():
return args
else:
parser.print_help()
def comma_separated_string(string):
return string.split(',')
def make_list_of_values(allowed):
"""
Take a list of allowed values for an option and return a function that can be
used to typecheck a string of given values and ensure they match the allowed
values. This is required to support options that take comma separated lists
such as --rights in 'tenant set --rights=create,delete,mount'
"""
def list_of_values(string):
given = string.split(',')
for g in given:
if g not in allowed:
msg = (
'invalid choices: {0} (choices must be a comma separated list of '
'only the following words \n {1}. '
'No spaces are allowed between choices.)').format(g, repr(allowed).replace(' ', ''))
raise argparse.ArgumentTypeError(msg)
return given
return list_of_values
def ls(args):
"""
Print a table of all volumes and their datastores when called with no args.
If args.l is True then show all metadata in a table.
If args.c is not empty only display columns given in args.c (implies -l).
"""
tenant_reg = '*'
if args.vmgroup:
tenant_reg = args.vmgroup
if args.c:
(header, rows) = ls_dash_c(args.c, tenant_reg)
else:
header = all_ls_headers()
rows = generate_ls_rows(tenant_reg)
printList(args.output_format, header, rows)
def ls_dash_c(columns, tenant_reg):
""" Return only the columns requested in the format required for table construction """
all_headers = all_ls_headers()
all_rows = generate_ls_rows(tenant_reg)
indexes = []
headers = []
choices = commands()['volume']['cmds']['ls']['args']['-c']['choices']
for i, choice in enumerate(choices):
if choice in columns:
indexes.append(i)
headers.append(all_headers[i])
rows = []
for row in all_rows:
rows.append([row[i] for i in indexes])
return (headers, rows)
def all_ls_headers():
""" Return a list of all header for ls -l """
return ['Volume', 'Datastore', 'VMGroup', 'Capacity', 'Used', 'Filesystem', 'Policy',
'Disk Format', 'Attached-to', 'Access', 'Attach-as', 'Created By', 'Created Date']
def generate_ls_rows(tenant_reg):
""" Gather all volume metadata into rows that can be used to format a table """
rows = []
for v in vmdk_utils.get_volumes(tenant_reg):
if 'tenant' not in v or v['tenant'] == auth_data_const.ORPHAN_TENANT:
tenant = 'N/A'
else:
tenant = v['tenant']
path = os.path.join(v['path'], v['filename'])
name = vmdk_utils.strip_vmdk_extension(v['filename'])
metadata = get_metadata(path)
attached_to = get_attached_to(metadata)
policy = get_policy(metadata, path)
size_info = get_vmdk_size_info(path)
created, created_by = get_creation_info(metadata)
diskformat = get_diskformat(metadata)
fstype = get_fstype(metadata)
access = get_access(metadata)
attach_as = get_attach_as(metadata)
rows.append([name, v['datastore'], tenant, size_info['capacity'], size_info['used'], fstype, policy,
diskformat, attached_to, access, attach_as, created_by, created])
return rows
def get_creation_info(metadata):
"""
Return the creation time and creation vm for a volume given its metadata
"""
try:
return (metadata[kv.CREATED], metadata[kv.CREATED_BY])
except:
return (NOT_AVAILABLE, NOT_AVAILABLE)
def get_attached_to(metadata):
""" Return which VM a volume is attached to based on its metadata. """
try:
if kv.ATTACHED_VM_UUID in metadata:
vm_name = vmdk_ops.vm_uuid2name(metadata[kv.ATTACHED_VM_UUID])
if vm_name:
return vm_name
# If vm name couldn't be retrieved through uuid, use name from KV
elif kv.ATTACHED_VM_NAME in metadata:
return metadata[kv.ATTACHED_VM_NAME]
else:
return metadata[kv.ATTACHED_VM_UUID]
else:
return kv.DETACHED
except:
return kv.DETACHED
def get_attach_as(metadata):
""" Return which mode a volume is attached as based on its metadata """
try:
return metadata[kv.VOL_OPTS][kv.ATTACH_AS]
except:
return kv.DEFAULT_ATTACH_AS
def get_access(metadata):
""" Return the access mode of a volume based on its metadata """
try:
return metadata[kv.VOL_OPTS][kv.ACCESS]
except:
return kv.DEFAULT_ACCESS
def get_policy(metadata, path):
""" Return the policy for a volume given its volume options """
try:
return metadata[kv.VOL_OPTS][kv.VSAN_POLICY_NAME]
except:
pass
if vsan_info.is_on_vsan(path):
return kv.DEFAULT_VSAN_POLICY
else:
return NOT_AVAILABLE
def get_diskformat(metadata):
""" Return the Disk Format of the volume based on its metadata """
try:
return metadata[kv.VOL_OPTS][kv.DISK_ALLOCATION_FORMAT]
except:
return NOT_AVAILABLE
def get_fstype(metadata):
""" Return the Filesystem Type of the volume based on its metadata """
try:
return metadata[kv.VOL_OPTS][kv.FILESYSTEM_TYPE]
except:
return NOT_AVAILABLE
def get_metadata(volPath):
""" Take the absolute path to volume vmdk and return its metadata as a dict """
return kv.getAll(volPath)
def get_vmdk_size_info(path):
"""
Get the capacity and used space for a given VMDK given its absolute path.
Values are returned as strings in human readable form (e.g. 10MB)
Using get_vol_info api from volume kv. The info returned by this
api is in human readable form
"""
try:
vol_info = kv.get_vol_info(path)
if not vol_info: # race: volume is already gone
return {'capacity': NOT_AVAILABLE,
'used': NOT_AVAILABLE}
return {'capacity': vol_info[VOL_SIZE],
'used': vol_info[VOL_ALLOC]}
except subprocess.CalledProcessError:
sys.exit("Failed to retrieve volume info for {0}.".format(path) \
+ " VMDK corrupted. Please remove and then retry")
KB = 1024
MB = 1024*KB
GB = 1024*MB
TB = 1024*GB
def human_readable(size_in_bytes):
"""
Take an integer size in bytes and convert it to MB, GB, or TB depending
upon size.
"""
if size_in_bytes >= TB:
return '{:.2f}TB'.format(size_in_bytes/TB)
if size_in_bytes >= GB:
return '{:.2f}GB'.format(size_in_bytes/GB)
if size_in_bytes >= MB:
return '{:.2f}MB'.format(size_in_bytes/MB)
if size_in_bytes >= KB:
return '{:.2f}KB'.format(size_in_bytes/KB)
return '{0}B'.format(size_in_bytes)
def policy_create(args):
output = vsan_policy.create(args.name, args.content)
if output:
return err_out(output)
else:
printMessage(args.output_format, 'Successfully created policy: {0}'.format(args.name))
def policy_rm(args):
output = vsan_policy.delete(args.name)
if output:
return err_out(output)
else:
printMessage(args.output_format, 'Successfully removed policy: {0}'.format(args.name))
def policy_ls(args):
volumes = vsan_policy.list_volumes_and_policies()
policies = vsan_policy.get_policies()
header = ['Policy Name', 'Policy Content', 'Active']
rows = []
used_policies = {}
for v in volumes:
policy_name = v['policy']
if policy_name in used_policies:
used_policies[policy_name] = used_policies[policy_name] + 1
else:
used_policies[policy_name] = 1
for name, content in policies.items():
if name in used_policies:
active = 'In use by {0} volumes'.format(used_policies[name])
else:
active = 'Unused'
rows.append([name, content.strip(), active])
printList(args.output_format, header, rows)
def policy_update(args):
output = vsan_policy.update(args.name, args.content)
if output:
return err_out(output)
else:
printMessage(args.output_format, 'Successfully updated policy {0}'.format(args.name))
def status(args):
"""Prints misc. status information. Returns an array of 1 element dicts"""
result = []
# version is extracted from localcli... slow...
result.append({"=== Service": ""})
version = "?" if args.fast else str(get_version())
result.append({"Version": version})
(service_status, pid) = get_service_status()
result.append({"Status": str(service_status)})
if pid:
result.append({"Pid": str(pid)})
port = "?" if args.fast else str(get_listening_port(pid))
result.append({"Port": port})
result.append({"LogConfigFile": log_config.LOG_CONFIG_FILE})
result.append({"LogFile": log_config.LOG_FILE})
result.append({"LogLevel": log_config.get_log_level()})
result.append({"=== Authorization Config DB": ""})
result += config_db_get_status()
output_list = []
for r in result:
output_list.append("{}: {}".format(list(r.keys())[0], list(r.values())[0]))
printMessage(args.output_format,"\n".join(output_list))
return None
def set_vol_opts(args):
try:
set_ok = vmdk_ops.set_vol_opts(args.volume, args.vmgroup, args.options)
if set_ok:
printMessage(args.output_format, 'Successfully updated settings for {0}'.format(args.volume))
else:
return err_out('Failed to update {0} for {1}.'.format(args.options, args.volume))
except Exception as ex:
return err_out('Failed to update {0} for {1} - {2}.'.format(args.options,
args.volume,
str(ex)))
VMDK_OPSD = '/etc/init.d/vmdk-opsd'
PS = 'ps -c | grep '
GREP_V_GREP = ' | grep -v grep'
NOT_RUNNING_STATUS = ("Stopped", None)
def get_service_status():
"""
Determine whether the service is running and it's PID. Return the 2 tuple
containing a status string and PID. If the service is not running, PID is
None
"""
try:
output = subprocess.check_output([VMDK_OPSD, "status"]).split()
if output[2] == "not":
return NOT_RUNNING_STATUS
pidstr = output[3]
pidstr = pidstr.decode('utf-8')
pid = pidstr.split("=")[1]
return ("Running", pid)
except subprocess.CalledProcessError:
return NOT_RUNNING_STATUS
def get_listening_port(pid):
""" Return the configured port that the service is listening on """
try:
cmd = "{0}{1}{2}".format(PS, pid, GREP_V_GREP)
output = subprocess.check_output(cmd, shell=True).split()[6]
return output.decode('utf-8')
except:
return NOT_AVAILABLE
def get_version():
""" Return the version of the installed VIB """
try:
cmd = 'localcli software vib list | grep esx-vmdkops-service'
version_str = subprocess.check_output(cmd, shell=True).split()[1]
return version_str.decode('utf-8')
except:
return NOT_AVAILABLE
def tenant_ls_headers():
""" Return column names for tenant ls command """
headers = ['Uuid', 'Name', 'Description', 'Default_datastore', 'VM_list']
return headers
def generate_vm_list(vm_list):
""" Generate vm names with given list of (vm_uuid, vm_name) from db"""
# vm_list is a list of (vm_uuid, vm_name) from db
# the return value is a comma separated string of VM names like this vm1,vm2
res = ""
for vm_uuid, vm_name_from_db in vm_list:
vm_name = vmdk_utils.get_vm_name_by_uuid(vm_uuid)
# If the VM name cannot be resolved then use one from db
# If it is not available from db then mark it as NOT_AVAILABLE
if not vm_name:
vm_name = vm_name_from_db if vm_name_from_db else NOT_AVAILABLE
res += vm_name + ","
if res:
res = res[:-1]
return res
def generate_tenant_ls_rows(tenant_list):
""" Generate output for tenant ls command """
rows = []
for tenant in tenant_list:
uuid = tenant.id
name = tenant.name
description = tenant.description
# "default_datastore_url" should always be set, and cannot be empty
# it can only happen when DB has some corruption
if not tenant.default_datastore_url:
default_datastore = ""
error_info = generate_error_info(ErrorCode.DS_DEFAULT_NOT_SET, name)
return error_info, None
else:
default_datastore = vmdk_utils.get_datastore_name(tenant.default_datastore_url)
if default_datastore is None:
default_datastore = ""
vm_list = generate_vm_list(tenant.vms)
rows.append([uuid, name, description, default_datastore, vm_list])
return None, rows
def tenant_create(args):
""" Handle tenant create command """
desc = ""
if args.description:
desc = args.description
error_info, tenant = auth_api._tenant_create(name=args.name,
default_datastore=args.default_datastore,
description=desc,
vm_list=args.vm_list,
privileges=[])
if error_info:
return err_out(error_info.msg)
elif args.name != auth_data_const.DEFAULT_TENANT:
printMessage(args.output_format, "vmgroup '{}' is created. Do not forget to run 'vmgroup vm add' to add vm to vmgroup.".format(args.name))
else:
printMessage(args.output_format, "vmgroup '{}' is created.".format(args.name))
def tenant_update(args):
""" Handle tenant update command """
desc = ""
if args.description:
desc = args.description
error_info = auth_api._tenant_update(name=args.name,
new_name=args.new_name,
description=desc,
default_datastore=args.default_datastore)
if error_info:
return err_out(error_info.msg)
else:
printMessage(args.output_format, "vmgroup modify succeeded")
def tenant_rm(args):
""" Handle tenant rm command """
remove_volumes = False
# If args "remove_volumes" is not specified in CLI
# args.remove_volumes will be None
if args.remove_volumes:
remove_volumes = True
error_info = auth_api._tenant_rm(args.name, remove_volumes, args.force)
if error_info:
return err_out(error_info.msg)
else:
msg = "vmgroup rm succeeded"
printMessage(args.output_format, "All Volumes will be removed. " + msg if remove_volumes else msg)
def tenant_ls(args):
""" Handle tenant ls command """
error_info, tenant_list = auth_api._tenant_ls()
if error_info:
return err_out(error_info.msg)
header = tenant_ls_headers()
error_info, rows = generate_tenant_ls_rows(tenant_list)
if error_info:
return err_out(error_info.msg)
else:
printList(args.output_format, header, rows)
def tenant_vm_add(args):
""" Handle tenant vm add command """
error_info = auth_api._tenant_vm_add(args.name, args.vm_list)
if error_info:
return err_out(error_info.msg)
else:
printMessage(args.output_format, "vmgroup vm add succeeded")
def tenant_vm_rm(args):
""" Handle tenant vm rm command """
error_info = auth_api._tenant_vm_rm(args.name, args.vm_list)
if error_info:
return err_out(error_info.msg)
else:
printMessage(args.output_format, "vmgroup vm rm succeeded")
def tenant_vm_replace(args):
""" Handle tenant vm replace command """
error_info = auth_api._tenant_vm_replace(args.name, args.vm_list)
if error_info:
return err_out(error_info.msg)
else:
printMessage(args.output_format, "vmgroup vm replace succeeded")
def tenant_vm_ls_headers():
""" Return column names for tenant vm ls command """
headers = ['Uuid', 'Name']
return headers
def generate_tenant_vm_ls_rows(vms):
""" Generate output for tenant vm ls command """
rows = []
for vm_uuid, vm_name_from_db in vms:
vm_name = vmdk_utils.get_vm_name_by_uuid(vm_uuid)
# If the VM name cannot be resolved then use one from db
# If it is not available from db then mark it as NOT_AVAILABLE
if not vm_name:
vm_name = vm_name_from_db if vm_name_from_db else NOT_AVAILABLE
rows.append([vm_uuid, vm_name])
return rows
def tenant_vm_ls(args):
""" Handle tenant vm ls command """
# Handling _DEFAULT tenant case separately to print info message
# instead of printing empty list
if (args.name == auth_data_const.DEFAULT_TENANT):
return err_out("{0} tenant contains all VMs which were not added to other tenants".format(auth_data_const.DEFAULT_TENANT))
error_info, vms = auth_api._tenant_vm_ls(args.name)
if error_info:
return err_out(error_info.msg)
header = tenant_vm_ls_headers()
rows = generate_tenant_vm_ls_rows(vms)
printList(args.output_format, header, rows)
def tenant_access_add(args):
""" Handle tenant access command """
volume_maxsize_in_MB = None
volume_totalsize_in_MB = None
if args.volume_maxsize:
volume_maxsize_in_MB = convert.convert_to_MB(args.volume_maxsize)
if args.volume_totalsize:
volume_totalsize_in_MB = convert.convert_to_MB(args.volume_totalsize)
error_info = auth_api._tenant_access_add(name=args.name,
datastore=args.datastore,
allow_create=args.allow_create,
volume_maxsize_in_MB=volume_maxsize_in_MB,
volume_totalsize_in_MB=volume_totalsize_in_MB
)
if error_info:
return err_out(error_info.msg)
else:
printMessage(args.output_format, "vmgroup access add succeeded")
def tenant_access_set(args):
""" Handle tenant access set command """
volume_maxsize_in_MB = None
volume_totalsize_in_MB = None
if args.volume_maxsize:
volume_maxsize_in_MB = convert.convert_to_MB(args.volume_maxsize)
if args.volume_totalsize:
volume_totalsize_in_MB = convert.convert_to_MB(args.volume_totalsize)
error_info = auth_api._tenant_access_set(name=args.name,
datastore=args.datastore,
allow_create=args.allow_create,
volume_maxsize_in_MB=volume_maxsize_in_MB,
volume_totalsize_in_MB=volume_totalsize_in_MB)
if error_info:
return err_out(error_info.msg)
else:
printMessage(args.output_format, "vmgroup access set succeeded")
def tenant_access_rm(args):
""" Handle tenant access rm command """
error_info = auth_api._tenant_access_rm(args.name, args.datastore)
if error_info:
return err_out(error_info.msg)
else:
printMessage(args.output_format, "vmgroup access rm succeeded")
def tenant_access_ls_headers():
""" Return column names for tenant access ls command """
headers = ['Datastore', 'Allow_create', 'Max_volume_size', 'Total_size']
return headers
def generate_tenant_access_ls_rows(privileges, name):
""" Generate output for tenant access ls command """
rows = []
for p in privileges:
if not p.datastore_url:
datastore = ""
error_info = generate_error_info(ErrorCode.DS_DEFAULT_NOT_SET, name)
return error_info, None
else:
datastore = vmdk_utils.get_datastore_name(p.datastore_url)
if datastore is None:
datastore = ""
allow_create = ("False", "True")[p.allow_create]
# p[auth_data_const.COL_MAX_VOLUME_SIZE] is max_volume_size in MB
max_vol_size = UNSET if p.max_volume_size == 0 else human_readable(p.max_volume_size * MB)
# p[auth_data_const.COL_USAGE_QUOTA] is total_size in MB
total_size = UNSET if p.usage_quota == 0 else human_readable(p.usage_quota * MB)
rows.append([datastore, allow_create, max_vol_size, total_size])
return None, rows
def tenant_access_ls(args):
""" Handle tenant access ls command """
name = args.name
error_info, privileges = auth_api._tenant_access_ls(name)
if error_info:
return err_out(error_info.msg)
header = tenant_access_ls_headers()
error_info, rows = generate_tenant_access_ls_rows(privileges, name)
if error_info:
return err_out(error_info.msg)
else:
printList(args.output_format, header, rows)
# ==== CONFIG DB manipulation functions ====
def create_db_symlink(path, link_path):
"""Force-creates a symlink to path"""
if os.path.islink(link_path):
os.remove(link_path)
try:
os.symlink(path, link_path)
except Exception as ex:
print("Failed to create symlink at {} to {}".format(link_path, path))
sys.exit(ex)
def db_move_to_backup(path):
"""
Saves a DB copy side by side. Basically, glorified copy to a unique file name.
Returns target name
"""
target = "{}.bak_{}".format(path, time.asctime().replace(" ", "_"))
# since we generate unique file name, no need to check if it exists
shutil.move(path, target)
return target
def is_local_vmfs(datastore_name):
"""return True if datastore is local VMFS one"""
# TODO - check for datastore being on local VMFS volume.
# the code below is supposed to do it, but in ESX 6.5 it returns
# " local = <unset>", so leaving it out for now
# def vol_info_from_vim(datastore_name):
# si = pyVim.connect.Connect()
# host = pyVim.host.GetHostSystem(si)
# fss = host.configManager.storageSystem.fileSystemVolumeInfo.mountInfo
# vmfs_volume_info = [f.volume for f in fss if f.volume.name == datastore_name and
# f.volume.type == "VMFS"]
# return vmfs_volume_info and vmfs_volume_info.local
return False
def err_out(_msg, _info=None):
"""
A helper to print an error message with (optional) info if the vmdkops admin command fails.
Returns the message.
"""
_msg = ("ERROR:" + _msg)
if _info:
_msg = _msg + (". Additional information: {}".format(_info))
print(_msg)
return _msg
def err_override(_msg, _info):
"""A helper to print messages with extra help about --force flag"""
new_msg = "{}".format(_msg) + " . Add '--force' flag to force the request execution"
return err_out(new_msg, _info)
def config_elsewhere(datastore):
"""Returns a list of config DBs info on other datastore, or empty list"""
# Actual implementation: scan vim datastores, check for dockvols/file_name
# return None or list of (db_name, full_path) tuples for existing config DBs.
others = []
for (ds_name, _, dockvol_path) in vmdk_utils.get_datastores():
full_path = os.path.join(dockvol_path, auth_data.CONFIG_DB_NAME)
if ds_name != datastore and os.path.exists(full_path):
others.append((ds_name, full_path))
return others
def check_ds_local_args(args):
"""
checks consistency in --local and --datastore args, an datastore presense
:Return: None for success, errmsg for error
"""
if args.datastore:
ds_name = args.datastore
if not os.path.exists(os.path.join("/vmfs/volumes", ds_name)):
return err_out("No such datastore: {}".format(ds_name))
if args.datastore and args.local:
return err_out("Error: only one of '--datastore' or '--local' can be set")
if not args.datastore and not args.local:
return err_out("Error: one of '--datastore' or '--local' have to be set")
return None
def config_init(args):
"""
Init Config DB to allows quotas and access groups (vmgroups)
:return: None for success, string for error
"""
err = check_ds_local_args(args)
if err:
return err
output_list = []
output_list.append("Warning: this feature is EXPERIMENTAL")
if args.datastore:
ds_name = args.datastore
db_path = auth_data.AuthorizationDataManager.ds_to_db_path(ds_name)
else:
db_path = auth_data.AUTH_DB_PATH
link_path = auth_data.AUTH_DB_PATH # where was the DB, now is a link
# Check the existing config mode
with auth_data.AuthorizationDataManager() as auth:
try:
auth.connect()
info = auth.get_info()
mode = auth.mode # for usage outside of the 'with'
except auth_data.DbAccessError as ex:
return err_out(str(ex))
if mode == auth_data.DBMode.NotConfigured:
pass
elif mode == auth_data.DBMode.MultiNode or mode == auth_data.DBMode.SingleNode:
return err_out(DB_REF + " is already initialized. Use 'rm --local' or 'rm --unlink' to reset", info)
else:
return err_out("Fatal: Internal error - unknown mode: {}".format(mode))
if args.datastore:
# Check that the target datastore is NOT local VMFS, bail out if it is (--force to overide).
if is_local_vmfs(ds_name) and not args.force:
return err_override("{} is a local datastore.".format(ds_name) +
"Shared datastores are recommended.", "N/A")
# Check other datastores, bail out if dockvols/DB exists there.
other_ds_config = config_elsewhere(ds_name)
if len(other_ds_config) > 0 and not args.force:
return err_override("Found " + DB_REF + "on other datastores.",
other_ds_config)
if not os.path.exists(db_path):
output_list.append("Creating new DB at {}".format(db_path))
auth = auth_data.AuthorizationDataManager(db_path)
err = auth.new_db()
if err:
return err_out("Init failed: %s" % str(err))
# Almost done - just create link and refresh the service
if args.local:
output_list.append("Warning: Local configuration will not survive ESXi reboot." +
" See KB2043564 for details")
else:
output_list.append("Creating a symlink to {} at {}".format(db_path, link_path))
create_db_symlink(db_path, link_path)
output_list.append("Updating {}".format(local_sh.LOCAL_SH_PATH))
local_sh.update_symlink_info(args.datastore)
printMessage(args.output_format, "\n".join(output_list))
return None
def config_rm(args):
"""
Remove Local Config DB or local link. We NEVER remove shared DB.
:return: None for success, string for error
"""
# This asks for double confirmation, and removes the local link or DB (if any)
# NEVER deletes the shared database - instead prints help
if not args.local and not args.unlink:
return err_out("""
DB removal is irreversible operation. Please use '--local' flag for removing DB in SingleNode mode,
and use '--unlink' to unlink from DB in MultiNode mode.
Note that '--unlink' will not remove a shared DB, but simply configure the current ESXi host to stop using it.
For removing shared DB, run 'vmdkops_admin config rm --unlink' on ESXi hosts using this DB, and then manually
remove the actual DB file '{}' from shared storage.
""".format(auth_data.CONFIG_DB_NAME))
if args.local and args.unlink:
return err_out("""
Cannot use '--local' and '--unlink' together. Please use '--local' flag for removing DB in SingleNode mode,
and use '--unlink' to unlink from DB in MultiNode mode.
"""
)
if not args.confirm:
return err_out("Warning: For extra safety, removal operation requires '--confirm' flag.")
# Check the existing config mode
with auth_data.AuthorizationDataManager() as auth:
try:
auth.connect()
info = auth.get_info()
mode = auth.mode # for usage outside of the 'with'
except auth_data.DbAccessError as ex:
# the DB is broken and is being asked to be removed, so let's oblige
printMessage(args.output_format, "Received error - removing comfiguration anyways. Err: \"{}\"".format(str(ex)))
try:
os.remove(auth_data.AUTH_DB_PATH)
except:
pass
return None
# mode is NotConfigured, path does not exist, nothing to remove
if mode == auth_data.DBMode.NotConfigured:
return None
# mode is NotConfigured, path does not exist
if mode == auth_data.DBMode.NotConfigured:
printMessage(args.output_format, "Nothing to do - Mode={}.".format(str(mode)))
link_path = auth_data.AUTH_DB_PATH # local DB or link
if not os.path.lexists(link_path):
return None
if mode == auth_data.DBMode.MultiNode:
if args.local:
return err_out("'rm --local' is not supported when " + DB_REF + "is in MultiNode mode."
" Use 'rm --unlink' to remove the local link to shared DB.")
else:
output_list = []
try:
os.remove(link_path)
output_list.append("Removed link {}".format(link_path))
except Exception as ex:
output_list.append("Failed to remove {}: {}".format(link_path, ex))
output_list.append("Updating {}".format(local_sh.LOCAL_SH_PATH))
printMessage(args.output_format, "\n".join(output_list))
local_sh.update_symlink_info(add=False)
return None
if mode == auth_data.DBMode.SingleNode:
if args.unlink:
return err_out("'rm --unlink' is not supported when " +
DB_REF + "is in SingleNode mode."
" Use 'rm --local' to remove local DB configuration.")
else:
if not args.no_backup:
printMessage(args.output_format, "Moved {} to backup file {}".format(link_path,
db_move_to_backup(link_path)))
return None
# All other cases
printMessage(args.output_format, "Nothing to do - Mode={}.".format(str(mode)))
def config_mv(args):
"""[Not Supported Yet]
Relocate config DB from its current location
:return: None for success, string for error
"""
if not args.force:
return err_out(DB_REF + " move to {} ".format(args.to) +
"requires '--force' flag to execute the request.")
# TODO:
# this is pure convenience code, so it is very low priority; still, here are the steps:
# checks if target exists upfront, and fail if it does
# cp the DB instance 'to' , and flip the symlink.
# refresh service (not really needed as next vmci_command handlers will pick it up)
# need --dryrun or --confirm
# issue: works really with discovery only , as others need to find it out
printMessage(args.output_format, "Sorry, configuration move ('config mv' command) is not supported yet")
return None
def config_db_get_status():
'''A helper fot get config DB status. Returns an array of status info'''
result = []
with auth_data.AuthorizationDataManager() as auth:
try:
auth.connect()
except:
pass # connect() will set the status regardess of success
for (k, v) in auth.get_info().items():
result.append({k: v})
return result
def config_status(args):
"""A subset of 'config' command - prints the DB config only"""
output_list = []
for r in config_db_get_status():
output_list.append("{}: {}".format(list(r.keys())[0], list(r.values())[0]))
printMessage(args.output_format, "\n".join(output_list))
return None
# ==== Run it now ====
if __name__ == "__main__":
main()
| apache-2.0 |
dyim42/zerorpc-python | tests/test_pubpush.py | 102 | 3999 | # -*- coding: utf-8 -*-
# Open Source Initiative OSI - The MIT License (MIT):Licensing
#
# The MIT License (MIT)
# Copyright (c) 2012 DotCloud Inc ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import gevent
import gevent.event
import zerorpc
from testutils import teardown, random_ipc_endpoint
def test_pushpull_inheritance():
endpoint = random_ipc_endpoint()
pusher = zerorpc.Pusher()
pusher.bind(endpoint)
trigger = gevent.event.Event()
class Puller(zerorpc.Puller):
def lolita(self, a, b):
print 'lolita', a, b
assert a + b == 3
trigger.set()
puller = Puller()
puller.connect(endpoint)
gevent.spawn(puller.run)
trigger.clear()
pusher.lolita(1, 2)
trigger.wait()
print 'done'
def test_pubsub_inheritance():
endpoint = random_ipc_endpoint()
publisher = zerorpc.Publisher()
publisher.bind(endpoint)
trigger = gevent.event.Event()
class Subscriber(zerorpc.Subscriber):
def lolita(self, a, b):
print 'lolita', a, b
assert a + b == 3
trigger.set()
subscriber = Subscriber()
subscriber.connect(endpoint)
gevent.spawn(subscriber.run)
trigger.clear()
# We need this retry logic to wait that the subscriber.run coroutine starts
# reading (the published messages will go to /dev/null until then).
for attempt in xrange(0, 10):
publisher.lolita(1, 2)
if trigger.wait(0.2):
print 'done'
return
raise RuntimeError("The subscriber didn't receive any published message")
def test_pushpull_composite():
endpoint = random_ipc_endpoint()
trigger = gevent.event.Event()
class Puller(object):
def lolita(self, a, b):
print 'lolita', a, b
assert a + b == 3
trigger.set()
pusher = zerorpc.Pusher()
pusher.bind(endpoint)
service = Puller()
puller = zerorpc.Puller(service)
puller.connect(endpoint)
gevent.spawn(puller.run)
trigger.clear()
pusher.lolita(1, 2)
trigger.wait()
print 'done'
def test_pubsub_composite():
endpoint = random_ipc_endpoint()
trigger = gevent.event.Event()
class Subscriber(object):
def lolita(self, a, b):
print 'lolita', a, b
assert a + b == 3
trigger.set()
publisher = zerorpc.Publisher()
publisher.bind(endpoint)
service = Subscriber()
subscriber = zerorpc.Subscriber(service)
subscriber.connect(endpoint)
gevent.spawn(subscriber.run)
trigger.clear()
# We need this retry logic to wait that the subscriber.run coroutine starts
# reading (the published messages will go to /dev/null until then).
for attempt in xrange(0, 10):
publisher.lolita(1, 2)
if trigger.wait(0.2):
print 'done'
return
raise RuntimeError("The subscriber didn't receive any published message")
| mit |
jusjusjus/pyedf | pyedf/score/score.py | 1 | 6055 | #! /usr/bin/python
from __future__ import with_statement
import logging
from .state import State
import numpy as np
import os
import re
class Score(object):
logger = logging.getLogger(name='Score')
commentSymbol = '#' # used for comments in state.annot
lineSeparator = ',' # used as separators in the line
states_dict = dict()
def __init__(self, filename=None, states=[], verbose=0):
self.logger.debug("__init__(filename={}, num_states={})".format(filename, states))
self.verbose = verbose
self.states = []
self.set_states(states)
self.filename = filename
if not self.filename is None:
if not os.path.exists(self.filename):
raise AttributeError("Score file %s does not exist." % (filename))
self.set_states(self.load(filename))
if self.verbose > 0: print("score: score file '%s' found." % (filename))
if self.verbose == 2: print("score: the states", self.states)
def set_states(self, states):
for state in states:
self.states.append(state)
def interpret_states(self):
pass
def isComment(self, line):
line.strip(' ')
if line[0] == self.commentSymbol: # if line starts with the commentSymbol, it is a comment ..
return True # .. don't process it.
else:
return False # else: split the line at separators.
def load(self, filename):
self.logger.debug("load(filename='{}')".format(filename))
states = []
with open(filename, 'r') as score_file:
for line in score_file:
try:
if self.isComment(line):
continue
line = line.strip('\n').strip('\r').strip(' ')
x = line.split(self.lineSeparator)
if len(x) > 0: # for example 1
start = x[0].strip(' ')
if len(x) == 1:
annot = ''
duration = ''
if len(x) == 2:
annot = x[1]
duration = ''
elif len(x) > 2: # for example 3.
duration = x[1].strip(' ')
annot = x[2]
if duration == '':
duration = '-1'
states.append( State(start=start, duration=duration, annot=annot) )
except Exception as e:
self.logger.debug("# line not readable: {}\n{}".format(line, e))
return states
def save(self, filename):
print("# opening", filename, "to write ...")
with open(filename, 'w') as score_file:
string = '# start, duration, annotation\n'+self.__str__()
score_file.write(string + '\n')
def append(self, new_state=None, start=None, duration=None, annot=None):
if new_state is None:
new_state = State(start=start, duration=duration, annot=annot)
self.states.append(new_state)
def __str__(self):
if hasattr(self, 'states'):
return '\n'.join([str(state) for state in self.states])
else:
return 'Score'
def select_by_function(self, function, **kwargs):
selection = []
for state in self.states:
if function(state, **kwargs):
selection.append(state)
score_select = object.__new__(type(self))
score_select.__init__(states=selection)
return score_select
def intersect(self, other_score):
intersection = []
for state in self.states:
section_j = state.intersect(other_score.states)
intersection.extend( section_j )
return type(self)(states=intersection)
def duration(self, annot=None):
duration = 0.0
if annot == None:
duration = np.sum([state.duration for state in self.states])
else:
for state in self.states:
if state.annot == annot:
duration += state.duration
return duration
def count(self, annot=None):
if annot == None:
count = len(self.states)
else:
count = 0
for state in self.states:
if state.annot == annot:
count += 1
return count
def connect_adjacent_states(self, close=0.01):
if len(self.states) == 0: return
new_states = []
last_annot = self.states[0].annot
last_duration = self.states[0].duration
last_start = self.states[0] # will be interpreted as datetime.datetime
for state in self.states[1:]:
dt = np.abs((state-last_start.end).total_seconds())
if dt < close and last_annot == state.annot:
last_duration += (state.end-last_start.end).total_seconds()
else:
new_state = State(start=last_start, duration=last_duration, annot=last_annot)
new_states.append(new_state)
last_annot = state.annot
last_duration = state.duration
last_start = state # will be interpreted as datetime.datetime
new_state = State(start=last_start, duration=last_duration, annot=last_annot)
new_states.append(new_state)
self.logger.debug("Length of individual states: {} seconds.".format(sum(state.duration for state in self.states)))
self.logger.debug("Length of connected states: {} seconds.".format(sum(state.duration for state in new_states)))
self.set_states(new_states)
score = Score
if __name__ == "__main__":
score_filename = '../../example/sample.csv'
testscore = score(filename=score_filename)
print(testscore)
| gpl-3.0 |
Perferom/android_external_chromium_org | sync/tools/testserver/chromiumsync_test.py | 154 | 30090 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests exercising chromiumsync and SyncDataModel."""
import pickle
import unittest
import autofill_specifics_pb2
import bookmark_specifics_pb2
import chromiumsync
import managed_user_specifics_pb2
import sync_pb2
import theme_specifics_pb2
class SyncDataModelTest(unittest.TestCase):
def setUp(self):
self.model = chromiumsync.SyncDataModel()
# The Synced Bookmarks folder is not created by default
self._expect_synced_bookmarks_folder = False
def AddToModel(self, proto):
self.model._entries[proto.id_string] = proto
def GetChangesFromTimestamp(self, requested_types, timestamp):
message = sync_pb2.GetUpdatesMessage()
message.from_timestamp = timestamp
for data_type in requested_types:
getattr(message.requested_types,
chromiumsync.SYNC_TYPE_TO_DESCRIPTOR[
data_type].name).SetInParent()
return self.model.GetChanges(
chromiumsync.UpdateSieve(message, self.model.migration_history))
def FindMarkerByNumber(self, markers, datatype):
"""Search a list of progress markers and find the one for a datatype."""
for marker in markers:
if marker.data_type_id == datatype.number:
return marker
self.fail('Required marker not found: %s' % datatype.name)
def testPermanentItemSpecs(self):
specs = chromiumsync.SyncDataModel._PERMANENT_ITEM_SPECS
declared_specs = set(['0'])
for spec in specs:
self.assertTrue(spec.parent_tag in declared_specs, 'parent tags must '
'be declared before use')
declared_specs.add(spec.tag)
unique_datatypes = set([x.sync_type for x in specs])
self.assertEqual(unique_datatypes,
set(chromiumsync.ALL_TYPES[1:]),
'Every sync datatype should have a permanent folder '
'associated with it')
def testSaveEntry(self):
proto = sync_pb2.SyncEntity()
proto.id_string = 'abcd'
proto.version = 0
self.assertFalse(self.model._ItemExists(proto.id_string))
self.model._SaveEntry(proto)
self.assertEqual(1, proto.version)
self.assertTrue(self.model._ItemExists(proto.id_string))
self.model._SaveEntry(proto)
self.assertEqual(2, proto.version)
proto.version = 0
self.assertTrue(self.model._ItemExists(proto.id_string))
self.assertEqual(2, self.model._entries[proto.id_string].version)
def testCreatePermanentItems(self):
self.model._CreateDefaultPermanentItems(chromiumsync.ALL_TYPES)
self.assertEqual(len(chromiumsync.ALL_TYPES) + 1,
len(self.model._entries))
def ExpectedPermanentItemCount(self, sync_type):
if sync_type == chromiumsync.BOOKMARK:
if self._expect_synced_bookmarks_folder:
return 4
else:
return 3
else:
return 1
def testGetChangesFromTimestampZeroForEachType(self):
all_types = chromiumsync.ALL_TYPES[1:]
for sync_type in all_types:
self.model = chromiumsync.SyncDataModel()
request_types = [sync_type]
version, changes, remaining = (
self.GetChangesFromTimestamp(request_types, 0))
expected_count = self.ExpectedPermanentItemCount(sync_type)
self.assertEqual(expected_count, version)
self.assertEqual(expected_count, len(changes))
for change in changes:
self.assertTrue(change.HasField('server_defined_unique_tag'))
self.assertEqual(change.version, change.sync_timestamp)
self.assertTrue(change.version <= version)
# Test idempotence: another GetUpdates from ts=0 shouldn't recreate.
version, changes, remaining = (
self.GetChangesFromTimestamp(request_types, 0))
self.assertEqual(expected_count, version)
self.assertEqual(expected_count, len(changes))
self.assertEqual(0, remaining)
# Doing a wider GetUpdates from timestamp zero shouldn't recreate either.
new_version, changes, remaining = (
self.GetChangesFromTimestamp(all_types, 0))
if self._expect_synced_bookmarks_folder:
self.assertEqual(len(chromiumsync.SyncDataModel._PERMANENT_ITEM_SPECS),
new_version)
else:
self.assertEqual(
len(chromiumsync.SyncDataModel._PERMANENT_ITEM_SPECS) -1,
new_version)
self.assertEqual(new_version, len(changes))
self.assertEqual(0, remaining)
version, changes, remaining = (
self.GetChangesFromTimestamp(request_types, 0))
self.assertEqual(new_version, version)
self.assertEqual(expected_count, len(changes))
self.assertEqual(0, remaining)
def testBatchSize(self):
for sync_type in chromiumsync.ALL_TYPES[1:]:
specifics = chromiumsync.GetDefaultEntitySpecifics(sync_type)
self.model = chromiumsync.SyncDataModel()
request_types = [sync_type]
for i in range(self.model._BATCH_SIZE*3):
entry = sync_pb2.SyncEntity()
entry.id_string = 'batch test %d' % i
entry.specifics.CopyFrom(specifics)
self.model._SaveEntry(entry)
last_bit = self.ExpectedPermanentItemCount(sync_type)
version, changes, changes_remaining = (
self.GetChangesFromTimestamp(request_types, 0))
self.assertEqual(self.model._BATCH_SIZE, version)
self.assertEqual(self.model._BATCH_SIZE*2 + last_bit, changes_remaining)
version, changes, changes_remaining = (
self.GetChangesFromTimestamp(request_types, version))
self.assertEqual(self.model._BATCH_SIZE*2, version)
self.assertEqual(self.model._BATCH_SIZE + last_bit, changes_remaining)
version, changes, changes_remaining = (
self.GetChangesFromTimestamp(request_types, version))
self.assertEqual(self.model._BATCH_SIZE*3, version)
self.assertEqual(last_bit, changes_remaining)
version, changes, changes_remaining = (
self.GetChangesFromTimestamp(request_types, version))
self.assertEqual(self.model._BATCH_SIZE*3 + last_bit, version)
self.assertEqual(0, changes_remaining)
# Now delete a third of the items.
for i in xrange(self.model._BATCH_SIZE*3 - 1, 0, -3):
entry = sync_pb2.SyncEntity()
entry.id_string = 'batch test %d' % i
entry.deleted = True
self.model._SaveEntry(entry)
# The batch counts shouldn't change.
version, changes, changes_remaining = (
self.GetChangesFromTimestamp(request_types, 0))
self.assertEqual(self.model._BATCH_SIZE, len(changes))
self.assertEqual(self.model._BATCH_SIZE*2 + last_bit, changes_remaining)
version, changes, changes_remaining = (
self.GetChangesFromTimestamp(request_types, version))
self.assertEqual(self.model._BATCH_SIZE, len(changes))
self.assertEqual(self.model._BATCH_SIZE + last_bit, changes_remaining)
version, changes, changes_remaining = (
self.GetChangesFromTimestamp(request_types, version))
self.assertEqual(self.model._BATCH_SIZE, len(changes))
self.assertEqual(last_bit, changes_remaining)
version, changes, changes_remaining = (
self.GetChangesFromTimestamp(request_types, version))
self.assertEqual(last_bit, len(changes))
self.assertEqual(self.model._BATCH_SIZE*4 + last_bit, version)
self.assertEqual(0, changes_remaining)
def testCommitEachDataType(self):
for sync_type in chromiumsync.ALL_TYPES[1:]:
specifics = chromiumsync.GetDefaultEntitySpecifics(sync_type)
self.model = chromiumsync.SyncDataModel()
my_cache_guid = '112358132134'
parent = 'foobar'
commit_session = {}
# Start with a GetUpdates from timestamp 0, to populate permanent items.
original_version, original_changes, changes_remaining = (
self.GetChangesFromTimestamp([sync_type], 0))
def DoCommit(original=None, id_string='', name=None, parent=None,
position=0):
proto = sync_pb2.SyncEntity()
if original is not None:
proto.version = original.version
proto.id_string = original.id_string
proto.parent_id_string = original.parent_id_string
proto.name = original.name
else:
proto.id_string = id_string
proto.version = 0
proto.specifics.CopyFrom(specifics)
if name is not None:
proto.name = name
if parent:
proto.parent_id_string = parent.id_string
proto.insert_after_item_id = 'please discard'
proto.position_in_parent = position
proto.folder = True
proto.deleted = False
result = self.model.CommitEntry(proto, my_cache_guid, commit_session)
self.assertTrue(result)
return (proto, result)
# Commit a new item.
proto1, result1 = DoCommit(name='namae', id_string='Foo',
parent=original_changes[-1], position=100)
# Commit an item whose parent is another item (referenced via the
# pre-commit ID).
proto2, result2 = DoCommit(name='Secondo', id_string='Bar',
parent=proto1, position=-100)
# Commit a sibling of the second item.
proto3, result3 = DoCommit(name='Third!', id_string='Baz',
parent=proto1, position=-50)
self.assertEqual(3, len(commit_session))
for p, r in [(proto1, result1), (proto2, result2), (proto3, result3)]:
self.assertNotEqual(r.id_string, p.id_string)
self.assertEqual(r.originator_client_item_id, p.id_string)
self.assertEqual(r.originator_cache_guid, my_cache_guid)
self.assertTrue(r is not self.model._entries[r.id_string],
"Commit result didn't make a defensive copy.")
self.assertTrue(p is not self.model._entries[r.id_string],
"Commit result didn't make a defensive copy.")
self.assertEqual(commit_session.get(p.id_string), r.id_string)
self.assertTrue(r.version > original_version)
self.assertEqual(result1.parent_id_string, proto1.parent_id_string)
self.assertEqual(result2.parent_id_string, result1.id_string)
version, changes, remaining = (
self.GetChangesFromTimestamp([sync_type], original_version))
self.assertEqual(3, len(changes))
self.assertEqual(0, remaining)
self.assertEqual(original_version + 3, version)
self.assertEqual([result1, result2, result3], changes)
for c in changes:
self.assertTrue(c is not self.model._entries[c.id_string],
"GetChanges didn't make a defensive copy.")
self.assertTrue(result2.position_in_parent < result3.position_in_parent)
self.assertEqual(-100, result2.position_in_parent)
# Now update the items so that the second item is the parent of the
# first; with the first sandwiched between two new items (4 and 5).
# Do this in a new commit session, meaning we'll reference items from
# the first batch by their post-commit, server IDs.
commit_session = {}
old_cache_guid = my_cache_guid
my_cache_guid = 'A different GUID'
proto2b, result2b = DoCommit(original=result2,
parent=original_changes[-1])
proto4, result4 = DoCommit(id_string='ID4', name='Four',
parent=result2, position=-200)
proto1b, result1b = DoCommit(original=result1,
parent=result2, position=-150)
proto5, result5 = DoCommit(id_string='ID5', name='Five', parent=result2,
position=150)
self.assertEqual(2, len(commit_session), 'Only new items in second '
'batch should be in the session')
for p, r, original in [(proto2b, result2b, proto2),
(proto4, result4, proto4),
(proto1b, result1b, proto1),
(proto5, result5, proto5)]:
self.assertEqual(r.originator_client_item_id, original.id_string)
if original is not p:
self.assertEqual(r.id_string, p.id_string,
'Ids should be stable after first commit')
self.assertEqual(r.originator_cache_guid, old_cache_guid)
else:
self.assertNotEqual(r.id_string, p.id_string)
self.assertEqual(r.originator_cache_guid, my_cache_guid)
self.assertEqual(commit_session.get(p.id_string), r.id_string)
self.assertTrue(r is not self.model._entries[r.id_string],
"Commit result didn't make a defensive copy.")
self.assertTrue(p is not self.model._entries[r.id_string],
"Commit didn't make a defensive copy.")
self.assertTrue(r.version > p.version)
version, changes, remaining = (
self.GetChangesFromTimestamp([sync_type], original_version))
self.assertEqual(5, len(changes))
self.assertEqual(0, remaining)
self.assertEqual(original_version + 7, version)
self.assertEqual([result3, result2b, result4, result1b, result5], changes)
for c in changes:
self.assertTrue(c is not self.model._entries[c.id_string],
"GetChanges didn't make a defensive copy.")
self.assertTrue(result4.parent_id_string ==
result1b.parent_id_string ==
result5.parent_id_string ==
result2b.id_string)
self.assertTrue(result4.position_in_parent <
result1b.position_in_parent <
result5.position_in_parent)
def testUpdateSieve(self):
# from_timestamp, legacy mode
autofill = chromiumsync.SYNC_TYPE_FIELDS['autofill']
theme = chromiumsync.SYNC_TYPE_FIELDS['theme']
msg = sync_pb2.GetUpdatesMessage()
msg.from_timestamp = 15412
msg.requested_types.autofill.SetInParent()
msg.requested_types.theme.SetInParent()
sieve = chromiumsync.UpdateSieve(msg)
self.assertEqual(sieve._state,
{chromiumsync.TOP_LEVEL: 15412,
chromiumsync.AUTOFILL: 15412,
chromiumsync.THEME: 15412})
response = sync_pb2.GetUpdatesResponse()
sieve.SaveProgress(15412, response)
self.assertEqual(0, len(response.new_progress_marker))
self.assertFalse(response.HasField('new_timestamp'))
response = sync_pb2.GetUpdatesResponse()
sieve.SaveProgress(15413, response)
self.assertEqual(0, len(response.new_progress_marker))
self.assertTrue(response.HasField('new_timestamp'))
self.assertEqual(15413, response.new_timestamp)
# Existing tokens
msg = sync_pb2.GetUpdatesMessage()
marker = msg.from_progress_marker.add()
marker.data_type_id = autofill.number
marker.token = pickle.dumps((15412, 1))
marker = msg.from_progress_marker.add()
marker.data_type_id = theme.number
marker.token = pickle.dumps((15413, 1))
sieve = chromiumsync.UpdateSieve(msg)
self.assertEqual(sieve._state,
{chromiumsync.TOP_LEVEL: 15412,
chromiumsync.AUTOFILL: 15412,
chromiumsync.THEME: 15413})
response = sync_pb2.GetUpdatesResponse()
sieve.SaveProgress(15413, response)
self.assertEqual(1, len(response.new_progress_marker))
self.assertFalse(response.HasField('new_timestamp'))
marker = response.new_progress_marker[0]
self.assertEqual(marker.data_type_id, autofill.number)
self.assertEqual(pickle.loads(marker.token), (15413, 1))
self.assertFalse(marker.HasField('timestamp_token_for_migration'))
# Empty tokens indicating from timestamp = 0
msg = sync_pb2.GetUpdatesMessage()
marker = msg.from_progress_marker.add()
marker.data_type_id = autofill.number
marker.token = pickle.dumps((412, 1))
marker = msg.from_progress_marker.add()
marker.data_type_id = theme.number
marker.token = ''
sieve = chromiumsync.UpdateSieve(msg)
self.assertEqual(sieve._state,
{chromiumsync.TOP_LEVEL: 0,
chromiumsync.AUTOFILL: 412,
chromiumsync.THEME: 0})
response = sync_pb2.GetUpdatesResponse()
sieve.SaveProgress(1, response)
self.assertEqual(1, len(response.new_progress_marker))
self.assertFalse(response.HasField('new_timestamp'))
marker = response.new_progress_marker[0]
self.assertEqual(marker.data_type_id, theme.number)
self.assertEqual(pickle.loads(marker.token), (1, 1))
self.assertFalse(marker.HasField('timestamp_token_for_migration'))
response = sync_pb2.GetUpdatesResponse()
sieve.SaveProgress(412, response)
self.assertEqual(1, len(response.new_progress_marker))
self.assertFalse(response.HasField('new_timestamp'))
marker = response.new_progress_marker[0]
self.assertEqual(marker.data_type_id, theme.number)
self.assertEqual(pickle.loads(marker.token), (412, 1))
self.assertFalse(marker.HasField('timestamp_token_for_migration'))
response = sync_pb2.GetUpdatesResponse()
sieve.SaveProgress(413, response)
self.assertEqual(2, len(response.new_progress_marker))
self.assertFalse(response.HasField('new_timestamp'))
marker = self.FindMarkerByNumber(response.new_progress_marker, theme)
self.assertEqual(pickle.loads(marker.token), (413, 1))
self.assertFalse(marker.HasField('timestamp_token_for_migration'))
marker = self.FindMarkerByNumber(response.new_progress_marker, autofill)
self.assertEqual(pickle.loads(marker.token), (413, 1))
self.assertFalse(marker.HasField('timestamp_token_for_migration'))
# Migration token timestamps (client gives timestamp, server returns token)
# These are for migrating from the old 'timestamp' protocol to the
# progressmarker protocol, and have nothing to do with the MIGRATION_DONE
# error code.
msg = sync_pb2.GetUpdatesMessage()
marker = msg.from_progress_marker.add()
marker.data_type_id = autofill.number
marker.timestamp_token_for_migration = 15213
marker = msg.from_progress_marker.add()
marker.data_type_id = theme.number
marker.timestamp_token_for_migration = 15211
sieve = chromiumsync.UpdateSieve(msg)
self.assertEqual(sieve._state,
{chromiumsync.TOP_LEVEL: 15211,
chromiumsync.AUTOFILL: 15213,
chromiumsync.THEME: 15211})
response = sync_pb2.GetUpdatesResponse()
sieve.SaveProgress(16000, response) # There were updates
self.assertEqual(2, len(response.new_progress_marker))
self.assertFalse(response.HasField('new_timestamp'))
marker = self.FindMarkerByNumber(response.new_progress_marker, theme)
self.assertEqual(pickle.loads(marker.token), (16000, 1))
self.assertFalse(marker.HasField('timestamp_token_for_migration'))
marker = self.FindMarkerByNumber(response.new_progress_marker, autofill)
self.assertEqual(pickle.loads(marker.token), (16000, 1))
self.assertFalse(marker.HasField('timestamp_token_for_migration'))
msg = sync_pb2.GetUpdatesMessage()
marker = msg.from_progress_marker.add()
marker.data_type_id = autofill.number
marker.timestamp_token_for_migration = 3000
marker = msg.from_progress_marker.add()
marker.data_type_id = theme.number
marker.timestamp_token_for_migration = 3000
sieve = chromiumsync.UpdateSieve(msg)
self.assertEqual(sieve._state,
{chromiumsync.TOP_LEVEL: 3000,
chromiumsync.AUTOFILL: 3000,
chromiumsync.THEME: 3000})
response = sync_pb2.GetUpdatesResponse()
sieve.SaveProgress(3000, response) # Already up to date
self.assertEqual(2, len(response.new_progress_marker))
self.assertFalse(response.HasField('new_timestamp'))
marker = self.FindMarkerByNumber(response.new_progress_marker, theme)
self.assertEqual(pickle.loads(marker.token), (3000, 1))
self.assertFalse(marker.HasField('timestamp_token_for_migration'))
marker = self.FindMarkerByNumber(response.new_progress_marker, autofill)
self.assertEqual(pickle.loads(marker.token), (3000, 1))
self.assertFalse(marker.HasField('timestamp_token_for_migration'))
def testCheckRaiseTransientError(self):
testserver = chromiumsync.TestServer()
http_code, raw_respon = testserver.HandleSetTransientError()
self.assertEqual(http_code, 200)
try:
testserver.CheckTransientError()
self.fail('Should have raised transient error exception')
except chromiumsync.TransientError:
self.assertTrue(testserver.transient_error)
def testUpdateSieveStoreMigration(self):
autofill = chromiumsync.SYNC_TYPE_FIELDS['autofill']
theme = chromiumsync.SYNC_TYPE_FIELDS['theme']
migrator = chromiumsync.MigrationHistory()
msg = sync_pb2.GetUpdatesMessage()
marker = msg.from_progress_marker.add()
marker.data_type_id = autofill.number
marker.token = pickle.dumps((15412, 1))
marker = msg.from_progress_marker.add()
marker.data_type_id = theme.number
marker.token = pickle.dumps((15413, 1))
sieve = chromiumsync.UpdateSieve(msg, migrator)
sieve.CheckMigrationState()
migrator.Bump([chromiumsync.BOOKMARK, chromiumsync.PASSWORD]) # v=2
sieve = chromiumsync.UpdateSieve(msg, migrator)
sieve.CheckMigrationState()
self.assertEqual(sieve._state,
{chromiumsync.TOP_LEVEL: 15412,
chromiumsync.AUTOFILL: 15412,
chromiumsync.THEME: 15413})
migrator.Bump([chromiumsync.AUTOFILL, chromiumsync.PASSWORD]) # v=3
sieve = chromiumsync.UpdateSieve(msg, migrator)
try:
sieve.CheckMigrationState()
self.fail('Should have raised.')
except chromiumsync.MigrationDoneError, error:
# We want this to happen.
self.assertEqual([chromiumsync.AUTOFILL], error.datatypes)
msg = sync_pb2.GetUpdatesMessage()
marker = msg.from_progress_marker.add()
marker.data_type_id = autofill.number
marker.token = ''
marker = msg.from_progress_marker.add()
marker.data_type_id = theme.number
marker.token = pickle.dumps((15413, 1))
sieve = chromiumsync.UpdateSieve(msg, migrator)
sieve.CheckMigrationState()
response = sync_pb2.GetUpdatesResponse()
sieve.SaveProgress(15412, response) # There were updates
self.assertEqual(1, len(response.new_progress_marker))
self.assertFalse(response.HasField('new_timestamp'))
self.assertFalse(marker.HasField('timestamp_token_for_migration'))
marker = self.FindMarkerByNumber(response.new_progress_marker, autofill)
self.assertEqual(pickle.loads(marker.token), (15412, 3))
self.assertFalse(marker.HasField('timestamp_token_for_migration'))
msg = sync_pb2.GetUpdatesMessage()
marker = msg.from_progress_marker.add()
marker.data_type_id = autofill.number
marker.token = pickle.dumps((15412, 3))
marker = msg.from_progress_marker.add()
marker.data_type_id = theme.number
marker.token = pickle.dumps((15413, 1))
sieve = chromiumsync.UpdateSieve(msg, migrator)
sieve.CheckMigrationState()
migrator.Bump([chromiumsync.THEME, chromiumsync.AUTOFILL]) # v=4
migrator.Bump([chromiumsync.AUTOFILL]) # v=5
sieve = chromiumsync.UpdateSieve(msg, migrator)
try:
sieve.CheckMigrationState()
self.fail("Should have raised.")
except chromiumsync.MigrationDoneError, error:
# We want this to happen.
self.assertEqual(set([chromiumsync.THEME, chromiumsync.AUTOFILL]),
set(error.datatypes))
msg = sync_pb2.GetUpdatesMessage()
marker = msg.from_progress_marker.add()
marker.data_type_id = autofill.number
marker.token = ''
marker = msg.from_progress_marker.add()
marker.data_type_id = theme.number
marker.token = pickle.dumps((15413, 1))
sieve = chromiumsync.UpdateSieve(msg, migrator)
try:
sieve.CheckMigrationState()
self.fail("Should have raised.")
except chromiumsync.MigrationDoneError, error:
# We want this to happen.
self.assertEqual([chromiumsync.THEME], error.datatypes)
msg = sync_pb2.GetUpdatesMessage()
marker = msg.from_progress_marker.add()
marker.data_type_id = autofill.number
marker.token = ''
marker = msg.from_progress_marker.add()
marker.data_type_id = theme.number
marker.token = ''
sieve = chromiumsync.UpdateSieve(msg, migrator)
sieve.CheckMigrationState()
response = sync_pb2.GetUpdatesResponse()
sieve.SaveProgress(15412, response) # There were updates
self.assertEqual(2, len(response.new_progress_marker))
self.assertFalse(response.HasField('new_timestamp'))
self.assertFalse(marker.HasField('timestamp_token_for_migration'))
marker = self.FindMarkerByNumber(response.new_progress_marker, autofill)
self.assertEqual(pickle.loads(marker.token), (15412, 5))
self.assertFalse(marker.HasField('timestamp_token_for_migration'))
marker = self.FindMarkerByNumber(response.new_progress_marker, theme)
self.assertEqual(pickle.loads(marker.token), (15412, 4))
self.assertFalse(marker.HasField('timestamp_token_for_migration'))
msg = sync_pb2.GetUpdatesMessage()
marker = msg.from_progress_marker.add()
marker.data_type_id = autofill.number
marker.token = pickle.dumps((15412, 5))
marker = msg.from_progress_marker.add()
marker.data_type_id = theme.number
marker.token = pickle.dumps((15413, 4))
sieve = chromiumsync.UpdateSieve(msg, migrator)
sieve.CheckMigrationState()
def testCreateSyncedBookmarks(self):
version1, changes, remaining = (
self.GetChangesFromTimestamp([chromiumsync.BOOKMARK], 0))
id_string = self.model._MakeCurrentId(chromiumsync.BOOKMARK,
'<server tag>synced_bookmarks')
self.assertFalse(self.model._ItemExists(id_string))
self._expect_synced_bookmarks_folder = True
self.model.TriggerCreateSyncedBookmarks()
self.assertTrue(self.model._ItemExists(id_string))
# Check that the version changed when the folder was created and the only
# change was the folder creation.
version2, changes, remaining = (
self.GetChangesFromTimestamp([chromiumsync.BOOKMARK], version1))
self.assertEqual(len(changes), 1)
self.assertEqual(changes[0].id_string, id_string)
self.assertNotEqual(version1, version2)
self.assertEqual(
self.ExpectedPermanentItemCount(chromiumsync.BOOKMARK),
version2)
# Ensure getting from timestamp 0 includes the folder.
version, changes, remaining = (
self.GetChangesFromTimestamp([chromiumsync.BOOKMARK], 0))
self.assertEqual(
self.ExpectedPermanentItemCount(chromiumsync.BOOKMARK),
len(changes))
self.assertEqual(version2, version)
def testAcknowledgeManagedUser(self):
# Create permanent items.
self.GetChangesFromTimestamp([chromiumsync.MANAGED_USER], 0)
proto = sync_pb2.SyncEntity()
proto.id_string = 'abcd'
proto.version = 0
# Make sure the managed_user field exists.
proto.specifics.managed_user.acknowledged = False
self.assertTrue(proto.specifics.HasField('managed_user'))
self.AddToModel(proto)
version1, changes1, remaining1 = (
self.GetChangesFromTimestamp([chromiumsync.MANAGED_USER], 0))
for change in changes1:
self.assertTrue(not change.specifics.managed_user.acknowledged)
# Turn on managed user acknowledgement
self.model.acknowledge_managed_users = True
version2, changes2, remaining2 = (
self.GetChangesFromTimestamp([chromiumsync.MANAGED_USER], 0))
for change in changes2:
self.assertTrue(change.specifics.managed_user.acknowledged)
def testGetKey(self):
[key1] = self.model.GetKeystoreKeys()
[key2] = self.model.GetKeystoreKeys()
self.assertTrue(len(key1))
self.assertEqual(key1, key2)
# Trigger the rotation. A subsequent GetUpdates should return the nigori
# node (whose timestamp was bumped by the rotation).
version1, changes, remaining = (
self.GetChangesFromTimestamp([chromiumsync.NIGORI], 0))
self.model.TriggerRotateKeystoreKeys()
version2, changes, remaining = (
self.GetChangesFromTimestamp([chromiumsync.NIGORI], version1))
self.assertNotEqual(version1, version2)
self.assertEquals(len(changes), 1)
self.assertEquals(changes[0].name, "Nigori")
# The current keys should contain the old keys, with the new key appended.
[key1, key3] = self.model.GetKeystoreKeys()
self.assertEquals(key1, key2)
self.assertNotEqual(key1, key3)
self.assertTrue(len(key3) > 0)
def testTriggerEnableKeystoreEncryption(self):
version1, changes, remaining = (
self.GetChangesFromTimestamp([chromiumsync.EXPERIMENTS], 0))
keystore_encryption_id_string = (
self.model._ClientTagToId(
chromiumsync.EXPERIMENTS,
chromiumsync.KEYSTORE_ENCRYPTION_EXPERIMENT_TAG))
self.assertFalse(self.model._ItemExists(keystore_encryption_id_string))
self.model.TriggerEnableKeystoreEncryption()
self.assertTrue(self.model._ItemExists(keystore_encryption_id_string))
# The creation of the experiment should be downloaded on the next
# GetUpdates.
version2, changes, remaining = (
self.GetChangesFromTimestamp([chromiumsync.EXPERIMENTS], version1))
self.assertEqual(len(changes), 1)
self.assertEqual(changes[0].id_string, keystore_encryption_id_string)
self.assertNotEqual(version1, version2)
# Verify the experiment was created properly and is enabled.
self.assertEqual(chromiumsync.KEYSTORE_ENCRYPTION_EXPERIMENT_TAG,
changes[0].client_defined_unique_tag)
self.assertTrue(changes[0].HasField("specifics"))
self.assertTrue(changes[0].specifics.HasField("experiments"))
self.assertTrue(
changes[0].specifics.experiments.HasField("keystore_encryption"))
self.assertTrue(
changes[0].specifics.experiments.keystore_encryption.enabled)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
Omegaphora/external_chromium_org | third_party/protobuf/python/google/protobuf/descriptor_database.py | 230 | 4411 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Provides a container for DescriptorProtos."""
__author__ = '[email protected] (Matt Toia)'
class DescriptorDatabase(object):
"""A container accepting FileDescriptorProtos and maps DescriptorProtos."""
def __init__(self):
self._file_desc_protos_by_file = {}
self._file_desc_protos_by_symbol = {}
def Add(self, file_desc_proto):
"""Adds the FileDescriptorProto and its types to this database.
Args:
file_desc_proto: The FileDescriptorProto to add.
"""
self._file_desc_protos_by_file[file_desc_proto.name] = file_desc_proto
package = file_desc_proto.package
for message in file_desc_proto.message_type:
self._file_desc_protos_by_symbol.update(
(name, file_desc_proto) for name in _ExtractSymbols(message, package))
for enum in file_desc_proto.enum_type:
self._file_desc_protos_by_symbol[
'.'.join((package, enum.name))] = file_desc_proto
def FindFileByName(self, name):
"""Finds the file descriptor proto by file name.
Typically the file name is a relative path ending to a .proto file. The
proto with the given name will have to have been added to this database
using the Add method or else an error will be raised.
Args:
name: The file name to find.
Returns:
The file descriptor proto matching the name.
Raises:
KeyError if no file by the given name was added.
"""
return self._file_desc_protos_by_file[name]
def FindFileContainingSymbol(self, symbol):
"""Finds the file descriptor proto containing the specified symbol.
The symbol should be a fully qualified name including the file descriptor's
package and any containing messages. Some examples:
'some.package.name.Message'
'some.package.name.Message.NestedEnum'
The file descriptor proto containing the specified symbol must be added to
this database using the Add method or else an error will be raised.
Args:
symbol: The fully qualified symbol name.
Returns:
The file descriptor proto containing the symbol.
Raises:
KeyError if no file contains the specified symbol.
"""
return self._file_desc_protos_by_symbol[symbol]
def _ExtractSymbols(desc_proto, package):
"""Pulls out all the symbols from a descriptor proto.
Args:
desc_proto: The proto to extract symbols from.
package: The package containing the descriptor type.
Yields:
The fully qualified name found in the descriptor.
"""
message_name = '.'.join((package, desc_proto.name))
yield message_name
for nested_type in desc_proto.nested_type:
for symbol in _ExtractSymbols(nested_type, message_name):
yield symbol
for enum_type in desc_proto.enum_type:
yield '.'.join((message_name, enum_type.name))
| bsd-3-clause |
arongdari/sparse-graph-prior | sgp/GraphUtil.py | 1 | 1649 | import numpy as np
import networkx as nx
from collections import defaultdict
from scipy.sparse import csr_matrix, csc_matrix, triu
def sparse_to_networkx(G):
nnz = G.nonzero()
_G = nx.Graph()
_G.add_edges_from(zip(nnz[0], nnz[1]))
return _G
def compute_growth_rate(G, n_repeat=10):
"""
Compute the growth rate of graph G
:param G: sparse matrix (csc_matrix or csr_matrix)
:param n_repeat: int
:return:
"""
n_n = G.shape[0]
nnz = G.nonzero()
n_link = defaultdict(list)
for si in range(n_repeat):
rnd_nodes = np.arange(n_n, dtype=int)
np.random.shuffle(rnd_nodes)
node_dic = {i: n for i, n in enumerate(rnd_nodes)}
row_idx = list(map(lambda x: node_dic[x], nnz[0]))
col_idx = list(map(lambda x: node_dic[x], nnz[1]))
rnd_row = csr_matrix((G.data, (row_idx, col_idx)), shape=G.shape)
rnd_col = csc_matrix((G.data, (row_idx, col_idx)), shape=G.shape)
n_link[0].append(0)
for i in range(1, n_n):
# counting triples by expanding tensor
cnt = 0
cnt += rnd_row.getrow(i)[:, :i].nnz
cnt += rnd_col.getcol(i)[:i - 1, :].nnz
n_link[i].append(cnt + n_link[i - 1][-1])
return np.array([np.mean(n_link[x]) for x in range(n_n)])
def degree_distribution(G):
d = defaultdict(int)
# degree = triu(G).sum(0)
degree = G.sum(0) + G.sum(1)
degree /= 2
max_d = degree.max()
for _d in degree.tolist()[0]:
d[int(_d)] += 1
return d, [d[i] for i in range(int(max_d))]
def degree_one_nodes(G):
return np.sum(G.sum(0) / 2 == 1)
| mit |
bjodah/aqchem | chempy/tests/test_reactionsystem.py | 1 | 13409 | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
from itertools import chain
import pytest
from ..util.testing import requires
from ..util.parsing import parsing_library
from ..units import default_units, units_library, allclose
from ..chemistry import Substance, Reaction
from ..reactionsystem import ReactionSystem
@requires(parsing_library, 'numpy')
def test_ReactionSystem():
import numpy as np
kw = dict(substance_factory=Substance.from_formula)
r1 = Reaction.from_string('H2O -> H+ + OH-', 'H2O H+ OH-', name='r1')
rs = ReactionSystem([r1], 'H2O H+ OH-', **kw)
r2 = Reaction.from_string('H2O -> 2 H+ + OH-', 'H2O H+ OH-', name='r2')
with pytest.raises(ValueError):
ReactionSystem([r2], 'H2O H+ OH-', **kw)
with pytest.raises(ValueError):
ReactionSystem([r1, r1], 'H2O H+ OH-', **kw)
assert rs.as_substance_index('H2O') == 0
assert rs.as_substance_index(0) == 0
varied, varied_keys = rs.per_substance_varied({'H2O': 55.4, 'H+': 1e-7, 'OH-': 1e-7},
{'H+': [1e-8, 1e-9, 1e-10, 1e-11], 'OH-': [1e-3, 1e-2]})
assert varied_keys == ('H+', 'OH-')
assert len(varied.shape) == 3
assert varied.shape[:-1] == (4, 2)
assert varied.shape[-1] == 3
assert np.all(varied[..., 0] == 55.4)
assert np.all(varied[:, 1, 2] == 1e-2)
assert rs['r1'] is r1
rs.rxns.append(r2)
assert rs['r2'] is r2
with pytest.raises(KeyError):
rs['r3']
rs.rxns.append(Reaction({}, {}, 0, name='r2', checks=()))
with pytest.raises(ValueError):
rs['r2']
empty_rs = ReactionSystem([])
rs2 = empty_rs + rs
assert rs2 == rs
rs3 = rs + empty_rs
assert rs3 == rs
@requires(parsing_library)
def test_ReactionSystem__missing_substances_from_keys():
r1 = Reaction({'H2O'}, {'H+', 'OH-'})
with pytest.raises(ValueError):
ReactionSystem([r1], substances={'H2O': Substance.from_formula('H2O')})
kw = dict(missing_substances_from_keys=True, substance_factory=Substance.from_formula)
rs = ReactionSystem([r1], substances={'H2O': Substance.from_formula('H2O')}, **kw)
assert rs.substances['OH-'].composition == {0: -1, 1: 1, 8: 1}
@requires(parsing_library)
def test_ReactionSystem__check_balance():
rs1 = ReactionSystem.from_string('\n'.join(['2 NH3 -> N2 + 3 H2', 'N2H4 -> N2 + 2 H2']))
assert rs1.check_balance(strict=True)
rs2 = ReactionSystem.from_string('\n'.join(['2 A -> B', 'B -> 2A']),
substance_factory=Substance)
assert not rs2.check_balance(strict=True)
assert rs2.composition_balance_vectors() == ([], [])
def test_ReactionSystem__per_reaction_effect_on_substance():
rs = ReactionSystem([Reaction({'H2': 2, 'O2': 1}, {'H2O': 2})])
assert rs.per_reaction_effect_on_substance('H2') == {0: -2}
assert rs.per_reaction_effect_on_substance('O2') == {0: -1}
assert rs.per_reaction_effect_on_substance('H2O') == {0: 2}
def test_ReactionSystem__rates():
rs = ReactionSystem([Reaction({'H2O'}, {'H+', 'OH-'}, 11)])
assert rs.rates({'H2O': 3, 'H+': 5, 'OH-': 7}) == {'H2O': -11*3, 'H+': 11*3, 'OH-': 11*3}
def test_ReactionSystem__rates__cstr():
k = 11
rs = ReactionSystem([Reaction({'H2O2': 2}, {'O2': 1, 'H2O': 2}, k)])
c0 = {'H2O2': 3, 'O2': 5, 'H2O': 53}
fr = 7
fc = {'H2O2': 13, 'O2': 17, 'H2O': 23}
r = k*c0['H2O2']**2
ref = {
'H2O2': -2*r + fr*fc['H2O2'] - fr*c0['H2O2'],
'O2': r + fr*fc['O2'] - fr*c0['O2'],
'H2O': 2*r + fr*fc['H2O'] - fr*c0['H2O']
}
variables = dict(chain(c0.items(), [('fc_'+key, val) for key, val in fc.items()], [('fr', fr)]))
assert rs.rates(variables, cstr_fr_fc=('fr', {sk: 'fc_'+sk for sk in rs.substances})) == ref
@requires('numpy')
def test_ReactionSystem__html_tables():
r1 = Reaction({'A': 2}, {'A'}, name='R1')
r2 = Reaction({'A'}, {'A': 2}, name='R2')
rs = ReactionSystem([r1, r2])
ut, unc = rs.unimolecular_html_table()
assert unc == {0}
from chempy.printing import html
assert html(ut, with_name=False) == u'<table><tr><td>A</td><td ><a title="1: A → 2 A">R2</a></td></tr></table>'
bt, bnc = rs.bimolecular_html_table()
assert bnc == {1}
assert html(bt, with_name=False) == (
u'<table><th></th><th>A</th>\n<tr><td>A</td><td ><a title="0: 2 A → A">R1</a></td></tr></table>')
@requires(parsing_library, 'numpy')
def test_ReactionSystem__substance_factory():
r1 = Reaction.from_string('H2O -> H+ + OH-', 'H2O H+ OH-')
rs = ReactionSystem([r1], 'H2O H+ OH-',
substance_factory=Substance.from_formula)
assert rs.net_stoichs(['H2O']) == [-1]
assert rs.net_stoichs(['H+']) == [1]
assert rs.net_stoichs(['OH-']) == [1]
assert rs.substances['H2O'].composition[8] == 1
assert rs.substances['OH-'].composition[0] == -1
assert rs.substances['H+'].charge == 1
@requires(units_library)
def test_ReactionSystem__as_per_substance_array_dict():
mol = default_units.mol
m = default_units.metre
M = default_units.molar
rs = ReactionSystem([], [Substance('H2O')])
c = rs.as_per_substance_array({'H2O': 1*M}, unit=M)
assert c.dimensionality == M.dimensionality
assert abs(c[0]/(1000*mol/m**3) - 1) < 1e-16
c = rs.as_per_substance_array({'H2O': 1})
with pytest.raises(KeyError):
c = rs.as_per_substance_array({'H': 1})
assert rs.as_per_substance_dict([42]) == {'H2O': 42}
@requires(parsing_library)
def test_ReactionSystem__add():
rs1 = ReactionSystem.from_string('\n'.join(['2 H2O2 -> O2 + 2 H2O', 'H2 + O2 -> H2O2']))
rs2 = ReactionSystem.from_string('\n'.join(['2 NH3 -> N2 + 3 H2']))
rs3 = rs1 + rs2
assert rs1 == rs1
assert rs1 != rs2
assert rs3 != rs1
assert len(rs1.rxns) == 2 and len(rs2.rxns) == 1 and len(rs3.rxns) == 3
for k in 'H2O2 O2 H2O H2 NH3 N2'.split():
assert k in rs3.substances
rs1 += rs2
assert len(rs1.rxns) == 3 and len(rs2.rxns) == 1
assert rs1 == rs3
rs4 = ReactionSystem.from_string("H2O -> H+ + OH-; 1e-4")
rs4 += [Reaction({'H+', 'OH-'}, {'H2O'}, 1e10)]
res = rs4.rates({'H2O': 1, 'H+': 1e-7, 'OH-': 1e-7})
for k in 'H2O H+ OH-'.split():
assert abs(res[k]) < 1e-16
rs5 = ReactionSystem.from_string("H3O+ -> H+ + H2O")
rs6 = rs4 + rs5
rs7 = rs6 + (Reaction.from_string("H+ + H2O -> H3O+"),)
assert len(rs7.rxns) == 4
rs1 = ReactionSystem.from_string('O2 + H2 -> H2O2')
rs1.substances['H2O2'].data['D'] = 123
rs2 = ReactionSystem.from_string('H2O2 -> 2 OH')
rs2.substances['H2O2'].data['D'] = 456
rs2.substances['OH'].data['D'] = 789
rs3 = rs2 + rs1
assert (rs3.substances['H2O2'].data['D'] == 123 and rs3.substances['OH'].data['D'] == 789)
rs2 += rs1
assert (rs2.substances['H2O2'].data['D'] == 123 and rs2.substances['OH'].data['D'] == 789)
@requires(parsing_library)
def test_ReactionSystem__from_string():
rs = ReactionSystem.from_string('-> H + OH; Radiolytic(2.1e-7)', checks=())
assert rs.rxns[0].reac == {}
assert rs.rxns[0].prod == {'H': 1, 'OH': 1}
assert rs.rxns[0].param.args == [2.1e-7]
ref = 2.1e-7 * 0.15 * 998
assert rs.rates({'doserate': .15, 'density': 998}) == {'H': ref, 'OH': ref}
r2, = ReactionSystem.from_string("H2O + H2O + H+ -> H3O+ + H2O").rxns
assert r2.reac == {'H2O': 2, 'H+': 1}
assert r2.prod == {'H2O': 1, 'H3O+': 1}
@requires(parsing_library, units_library)
def test_ReactionSystem__from_string__units():
r3, = ReactionSystem.from_string('(H2O) -> e-(aq) + H+ + OH; Radiolytic(2.1e-7*mol/J)').rxns
assert len(r3.reac) == 0 and r3.inact_reac == {'H2O': 1}
assert r3.prod == {'e-(aq)': 1, 'H+': 1, 'OH': 1}
from chempy.kinetics.rates import Radiolytic
mol, J = default_units.mol, default_units.J
assert r3.param == Radiolytic(2.1e-7*mol/J)
assert r3.param != Radiolytic(2.0e-7*mol/J)
assert r3.param != Radiolytic(2.1e-7)
assert r3.order() == 0
k = 1e-4/default_units.second
rs = ReactionSystem.from_string("""
H2O -> H+ + OH-; {}
""".format(repr(k)))
assert allclose(rs.rxns[0].param, k)
def test_ReactionSystem__from_string___special_naming():
rs = ReactionSystem.from_string("""
H2O* + H2O -> 2 H2O
H2O* -> OH + H
""") # excited water
for sk in 'H2O* H2O OH H'.split():
assert sk in rs.substances
assert rs.substances['H2O*'].composition == {1: 2, 8: 1}
assert rs.categorize_substances() == dict(accumulated={'OH', 'H', 'H2O'}, depleted={'H2O*'},
unaffected=set(), nonparticipating=set())
@requires(parsing_library)
def test_ReactionSystem__from_string__string_rate_const():
rsys = ReactionSystem.from_string("H+ + OH- -> H2O; 'kf'")
r2, = rsys.rxns
assert r2.reac == {'OH-': 1, 'H+': 1}
assert r2.prod == {'H2O': 1}
r2str = r2.string(rsys.substances, with_param=True)
assert r2str.endswith('; kf')
@requires('numpy')
def test_ReactionSystem__upper_conc_bounds():
rs = ReactionSystem.from_string('\n'.join(['2 NH3 -> N2 + 3 H2', 'N2H4 -> N2 + 2 H2']))
c0 = {'NH3': 5, 'N2': 7, 'H2': 11, 'N2H4': 2}
_N = 5 + 14 + 4
_H = 15 + 22 + 8
ref = {
'NH3': min(_N, _H/3),
'N2': _N/2,
'H2': _H/2,
'N2H4': min(_N/2, _H/4),
}
res = rs.as_per_substance_dict(rs.upper_conc_bounds(c0))
assert res == ref
@requires('numpy')
def test_ReactionSystem__upper_conc_bounds__a_substance_no_composition():
rs = ReactionSystem.from_string("""
H2O -> e-(aq) + H2O+
H2O+ + e-(aq) -> H2O
""")
c0 = {'H2O': 55.0, 'e-(aq)': 2e-3, 'H2O+': 3e-3}
_O = 55 + 3e-3
_H = 2*55 + 2*3e-3
ref = {
'H2O': min(_O, _H/2),
'e-(aq)': float('inf'),
'H2O+': min(_O, _H/2),
}
res = rs.as_per_substance_dict(rs.upper_conc_bounds(c0))
assert res == ref
@requires(parsing_library)
def test_ReactionSystem__identify_equilibria():
rsys = ReactionSystem.from_string("""
2 H2 + O2 -> 2 H2O ; 1e-3
H2O -> H+ + OH- ; 1e-4/55.35
H+ + OH- -> H2O ; 1e10
2 H2O -> 2 H2 + O2
""")
assert rsys.identify_equilibria() == [(0, 3), (1, 2)]
@requires(parsing_library)
def test_ReactionSystem__categorize_substances():
rsys1 = ReactionSystem.from_string("""
2 H2 + O2 -> 2 H2O ; 1e-3
H2O -> H+ + OH- ; 1e-4/55.35
H+ + OH- -> H2O ; 1e10
2 H2O -> 2 H2 + O2
""")
assert all(not s for s in rsys1.categorize_substances().values())
rsys2 = ReactionSystem.from_string('\n'.join(['2 NH3 -> N2 + 3 H2', 'N2H4 -> N2 + 2 H2']))
assert rsys2.categorize_substances() == dict(accumulated={'N2', 'H2'}, depleted={'NH3', 'N2H4'},
unaffected=set(), nonparticipating=set())
rsys3 = ReactionSystem.from_string("H+ + OH- -> H2O; 'kf'")
assert rsys3.categorize_substances() == dict(accumulated={'H2O'}, depleted={'H+', 'OH-'},
unaffected=set(), nonparticipating=set())
rsys4 = ReactionSystem([Reaction({'H2': 2, 'O2': 1}, {'H2O': 2})], 'H2 O2 H2O N2 Ar')
assert rsys4.categorize_substances() == dict(accumulated={'H2O'}, depleted={'H2', 'O2'},
unaffected=set(), nonparticipating={'N2', 'Ar'})
rsys5 = ReactionSystem.from_string("""
A -> B; MassAction(unique_keys=('k1',))
B + C -> A + C; MassAction(unique_keys=('k2',))
2 B -> B + C; MassAction(unique_keys=('k3',))
""", substance_factory=lambda formula: Substance(formula))
assert rsys5.categorize_substances() == dict(accumulated={'C'}, depleted=set(),
unaffected=set(), nonparticipating=set())
rsys6 = ReactionSystem.from_string("""H2O2 + Fe+3 + (H2O2) -> 2 H2O + O2 + Fe+3""")
assert rsys6.rxns[0].order() == 2 # the additional H2O2 within parenthesis
assert rsys6.categorize_substances() == dict(accumulated={'H2O', 'O2'}, depleted={'H2O2'},
unaffected={'Fe+3'}, nonparticipating=set())
@requires(parsing_library)
def test_ReactionSystem__split():
a = """
2 H2 + O2 -> 2 H2O ; 1e-3
H2O -> H+ + OH- ; 1e-4/55.35
H+ + OH- -> H2O ; 1e10
2 H2O -> 2 H2 + O2"""
b = """
2 N -> N2"""
c = """
2 ClBr -> Cl2 + Br2
"""
rsys1 = ReactionSystem.from_string(a+b+c)
res = rsys1.split()
ref = list(map(ReactionSystem.from_string, [a, b, c]))
for rs in chain(res, ref):
rs.sort_substances_inplace()
res1a, res1b, res1c = res
ref1a, ref1b, ref1c = ref
assert res1a == ref1a
assert res1b == ref1b
assert res1c == ref1c
assert res1c != ref1a
assert rsys1.categorize_substances() == dict(
accumulated={'N2', 'Cl2', 'Br2'}, depleted={'N', 'ClBr'},
unaffected=set(), nonparticipating=set())
def test_ReactionSystem__subset():
r1 = Reaction({'NH3': 2}, {'N2': 1, 'H2': 3})
r2 = Reaction({'N2H4': 1}, {'N2': 1, 'H2': 2})
rs1 = ReactionSystem([r1, r2])
rs2 = rs1.subset(lambda r: 'N2H4' in r.keys())
assert len(rs1.rxns) == 2 and len(rs2.rxns) == 1
assert rs2 == ReactionSystem([r2])
| bsd-2-clause |
ianmiell/docker-selinux | docker_selinux.py | 1 | 6584 | """ShutIt module. See http://shutit.tk
"""
from shutit_module import ShutItModule
class docker_selinux(ShutItModule):
def build(self, shutit):
# Some useful API calls for reference see shutit's docs for more info and options:
# shutit.send(send) - send a command
# shutit.multisend(send,send_dict) - send a command, dict contains {expect1:response1,expect2:response2,...}
# shutit.log(msg) - send a message to the log
# shutit.run_script(script) - run the passed-in string as a script
# shutit.send_file(path, contents) - send file to path on target with given contents as a string
# shutit.send_host_file(path, hostfilepath) - send file from host machine to path on the target
# shutit.send_host_dir(path, hostfilepath) - send directory and contents to path on the target
# shutit.host_file_exists(filename, directory=False) - returns True if file exists on host
# shutit.file_exists(filename, directory=False) - returns True if file exists on target
# shutit.add_to_bashrc(line) - add a line to bashrc
# shutit.get_url(filename, locations) - get a file via url from locations specified in a list
# shutit.user_exists(user) - returns True if the user exists on the target
# shutit.package_installed(package) - returns True if the package exists on the target
# shutit.pause_point(msg='') - give control of the terminal to the user
# shutit.step_through(msg='') - give control to the user and allow them to step through commands
# shutit.send_and_get_output(send) - returns the output of the sent command
# shutit.send_and_match_output(send, matches) - returns True if any lines in output match any of
# the regexp strings in the matches list
# shutit.install(package) - install a package
# shutit.remove(package) - remove a package
# shutit.login(user='root', command='su -') - log user in with given command, and set up prompt and expects
# shutit.logout() - clean up from a login
# shutit.set_password(password, user='') - set password for a given user on target
# shutit.get_config(module_id,option,default=None,boolean=False) - get configuration value
# shutit.get_ip_address() - returns the ip address of the target
# shutit.add_line_to_file(line, filename) - add line (or lines in an array) to the filename
vagrant_dir = shutit.cfg[self.module_id]['vagrant_dir']
setenforce = shutit.cfg[self.module_id]['setenforce']
compile_policy = shutit.cfg[self.module_id]['compile_policy']
shutit.install('linux-generic linux-image-generic linux-headers-generic linux-signed-generic')
shutit.install('virtualbox')
shutit.send('wget -qO- https://dl.bintray.com/mitchellh/vagrant/vagrant_1.7.2_x86_64.deb > /tmp/vagrant.deb')
shutit.send('dpkg -i /tmp/vagrant.deb')
shutit.send('rm /tmp/vagrant.deb')
shutit.send('mkdir -p ' + vagrant_dir)
shutit.send('cd ' + vagrant_dir)
# If the Vagrantfile exists, we assume we've already init'd appropriately.
shutit.send('rm -f Vagrantfile')
shutit.send(r'''cat > Vagrantfile << END
Vagrant.configure(2) do |config|
config.vm.box = "jdiprizio/centos-docker-io"
config.vm.provider "virtualbox" do |vb|
vb.memory = "1024"
end
end
END''')
# Query the status - if it's powered off or not created, bring it up.
if shutit.send_and_match_output('vagrant status',['.*poweroff.*','.*not created.*','.*aborted.*']):
shutit.send('vagrant up')
# It should be up now, ssh into it and get root.
shutit.login(command='vagrant ssh')
shutit.login(command='sudo su')
# Ensure required software's installed.
shutit.send('yum install -y wget selinux-policy-devel')
shutit.send('rm -rf /root/selinux')
# Ensure we have the latest version of docker.
shutit.send('wget -qO- https://get.docker.com/builds/Linux/x86_64/docker-latest > docker')
shutit.send('mv -f docker /usr/bin/docker')
shutit.send('chmod +x /usr/bin/docker')
# Optional code for enforcing>
if setenforce:
shutit.send('''sed -i 's/=permissive/=enforcing/' /etc/selinux/config''')
else:
shutit.send('''sed -i 's/=enforcing/=permissive/' /etc/selinux/config''')
# Log out to ensure the prompt stack is stable.
shutit.logout()
shutit.logout(command='sudo reboot')
# Give it time...
shutit.send('sleep 30')
# Go back in.
shutit.login(command='vagrant ssh')
# Get back to root.
shutit.login(command='sudo su')
# Remove any pre-existing containers.
# Recycle docker service.
shutit.send('systemctl stop docker')
shutit.send('systemctl start docker')
# Remove any pre-existing containers.
shutit.send('docker rm -f selinuxdock || /bin/true')
if compile_policy:
# Ensure we've cleaned up the files we're adding here.
shutit.send('mkdir -p /root/selinux')
shutit.send('cd /root/selinux')
shutit.send('rm -rf /root/selinux/docker_apache.tc /root/selinux/script.sh /root/selinux/docker_apache.te')
shutit.add_line_to_file('''policy_module(docker_apache,1.0)
virt_sandbox_domain_template(docker_apache)
allow docker_apache_t self: capability { chown dac_override kill setgid setuid net_bind_service sys_chroot sys_nice sys_tty_config } ;
allow docker_apache_t self:tcp_socket create_stream_socket_perms;
allow docker_apache_t self:udp_socket create_socket_perms;
corenet_tcp_bind_all_nodes(docker_apache_t)
corenet_tcp_bind_http_port(docker_apache_t)
corenet_udp_bind_all_nodes(docker_apache_t)
corenet_udp_bind_http_port(docker_apache_t)
sysnet_dns_name_resolve(docker_apache_t)
'''.split('\n'),'/root/selinux/docker_apache.te')
shutit.add_line_to_file('''make -f /usr/share/selinux/devel/Makefile docker_apache.pp
semodule -i docker_apache.pp
docker run -d --name selinuxdock --security-opt label:type:docker_apache_t httpd
'''.split('\n'),'/root/selinux/script.sh')
shutit.send('chmod +x ./script.sh')
shutit.send('./script.sh')
shutit.send('sleep 2 && docker logs selinuxdock')
# Have a look at the log output.
shutit.send('grep -w denied /var/log/audit/audit.log')
shutit.pause_point('Have a shell:')
# Log out.
shutit.logout()
shutit.logout()
return True
def get_config(self, shutit):
shutit.get_config(self.module_id, 'vagrant_dir', '/tmp/vagrant_dir')
shutit.get_config(self.module_id, 'setenforce', False, boolean=True)
shutit.get_config(self.module_id, 'compile_policy', True, boolean=True)
return True
def module():
return docker_selinux(
'io.dockerinpractice.docker_selinux.docker_selinux', 1184271914.00,
description='Test of docker selinux on a vagrant box',
maintainer='[email protected]',
depends=['shutit.tk.setup']
)
| mit |
ycaihua/kbengine | kbe/src/lib/python/Lib/test/test_traceback.py | 80 | 16912 | """Test cases for traceback module"""
from io import StringIO
import sys
import unittest
import re
from test.support import run_unittest, Error, captured_output
from test.support import TESTFN, unlink, cpython_only
import traceback
class SyntaxTracebackCases(unittest.TestCase):
# For now, a very minimal set of tests. I want to be sure that
# formatting of SyntaxErrors works based on changes for 2.1.
def get_exception_format(self, func, exc):
try:
func()
except exc as value:
return traceback.format_exception_only(exc, value)
else:
raise ValueError("call did not raise exception")
def syntax_error_with_caret(self):
compile("def fact(x):\n\treturn x!\n", "?", "exec")
def syntax_error_with_caret_2(self):
compile("1 +\n", "?", "exec")
def syntax_error_bad_indentation(self):
compile("def spam():\n print(1)\n print(2)", "?", "exec")
def syntax_error_with_caret_non_ascii(self):
compile('Python = "\u1e54\xfd\u0163\u0125\xf2\xf1" +', "?", "exec")
def syntax_error_bad_indentation2(self):
compile(" print(2)", "?", "exec")
def test_caret(self):
err = self.get_exception_format(self.syntax_error_with_caret,
SyntaxError)
self.assertEqual(len(err), 4)
self.assertTrue(err[1].strip() == "return x!")
self.assertIn("^", err[2]) # third line has caret
self.assertEqual(err[1].find("!"), err[2].find("^")) # in the right place
err = self.get_exception_format(self.syntax_error_with_caret_2,
SyntaxError)
self.assertIn("^", err[2]) # third line has caret
self.assertEqual(err[2].count('\n'), 1) # and no additional newline
self.assertEqual(err[1].find("+"), err[2].find("^")) # in the right place
err = self.get_exception_format(self.syntax_error_with_caret_non_ascii,
SyntaxError)
self.assertIn("^", err[2]) # third line has caret
self.assertEqual(err[2].count('\n'), 1) # and no additional newline
self.assertEqual(err[1].find("+"), err[2].find("^")) # in the right place
def test_nocaret(self):
exc = SyntaxError("error", ("x.py", 23, None, "bad syntax"))
err = traceback.format_exception_only(SyntaxError, exc)
self.assertEqual(len(err), 3)
self.assertEqual(err[1].strip(), "bad syntax")
def test_bad_indentation(self):
err = self.get_exception_format(self.syntax_error_bad_indentation,
IndentationError)
self.assertEqual(len(err), 4)
self.assertEqual(err[1].strip(), "print(2)")
self.assertIn("^", err[2])
self.assertEqual(err[1].find(")"), err[2].find("^"))
err = self.get_exception_format(self.syntax_error_bad_indentation2,
IndentationError)
self.assertEqual(len(err), 4)
self.assertEqual(err[1].strip(), "print(2)")
self.assertIn("^", err[2])
self.assertEqual(err[1].find("p"), err[2].find("^"))
def test_base_exception(self):
# Test that exceptions derived from BaseException are formatted right
e = KeyboardInterrupt()
lst = traceback.format_exception_only(e.__class__, e)
self.assertEqual(lst, ['KeyboardInterrupt\n'])
def test_format_exception_only_bad__str__(self):
class X(Exception):
def __str__(self):
1/0
err = traceback.format_exception_only(X, X())
self.assertEqual(len(err), 1)
str_value = '<unprintable %s object>' % X.__name__
if X.__module__ in ('__main__', 'builtins'):
str_name = X.__name__
else:
str_name = '.'.join([X.__module__, X.__name__])
self.assertEqual(err[0], "%s: %s\n" % (str_name, str_value))
def test_without_exception(self):
err = traceback.format_exception_only(None, None)
self.assertEqual(err, ['None\n'])
def test_encoded_file(self):
# Test that tracebacks are correctly printed for encoded source files:
# - correct line number (Issue2384)
# - respect file encoding (Issue3975)
import tempfile, sys, subprocess, os
# The spawned subprocess has its stdout redirected to a PIPE, and its
# encoding may be different from the current interpreter, on Windows
# at least.
process = subprocess.Popen([sys.executable, "-c",
"import sys; print(sys.stdout.encoding)"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = process.communicate()
output_encoding = str(stdout, 'ascii').splitlines()[0]
def do_test(firstlines, message, charset, lineno):
# Raise the message in a subprocess, and catch the output
try:
output = open(TESTFN, "w", encoding=charset)
output.write("""{0}if 1:
import traceback;
raise RuntimeError('{1}')
""".format(firstlines, message))
output.close()
process = subprocess.Popen([sys.executable, TESTFN],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = process.communicate()
stdout = stdout.decode(output_encoding).splitlines()
finally:
unlink(TESTFN)
# The source lines are encoded with the 'backslashreplace' handler
encoded_message = message.encode(output_encoding,
'backslashreplace')
# and we just decoded them with the output_encoding.
message_ascii = encoded_message.decode(output_encoding)
err_line = "raise RuntimeError('{0}')".format(message_ascii)
err_msg = "RuntimeError: {0}".format(message_ascii)
self.assertIn(("line %s" % lineno), stdout[1],
"Invalid line number: {0!r} instead of {1}".format(
stdout[1], lineno))
self.assertTrue(stdout[2].endswith(err_line),
"Invalid traceback line: {0!r} instead of {1!r}".format(
stdout[2], err_line))
self.assertTrue(stdout[3] == err_msg,
"Invalid error message: {0!r} instead of {1!r}".format(
stdout[3], err_msg))
do_test("", "foo", "ascii", 3)
for charset in ("ascii", "iso-8859-1", "utf-8", "GBK"):
if charset == "ascii":
text = "foo"
elif charset == "GBK":
text = "\u4E02\u5100"
else:
text = "h\xe9 ho"
do_test("# coding: {0}\n".format(charset),
text, charset, 4)
do_test("#!shebang\n# coding: {0}\n".format(charset),
text, charset, 5)
do_test(" \t\f\n# coding: {0}\n".format(charset),
text, charset, 5)
# Issue #18960: coding spec should has no effect
do_test("0\n# coding: GBK\n", "h\xe9 ho", 'utf-8', 5)
class TracebackFormatTests(unittest.TestCase):
def some_exception(self):
raise KeyError('blah')
@cpython_only
def check_traceback_format(self, cleanup_func=None):
from _testcapi import traceback_print
try:
self.some_exception()
except KeyError:
type_, value, tb = sys.exc_info()
if cleanup_func is not None:
# Clear the inner frames, not this one
cleanup_func(tb.tb_next)
traceback_fmt = 'Traceback (most recent call last):\n' + \
''.join(traceback.format_tb(tb))
file_ = StringIO()
traceback_print(tb, file_)
python_fmt = file_.getvalue()
# Call all _tb and _exc functions
with captured_output("stderr") as tbstderr:
traceback.print_tb(tb)
tbfile = StringIO()
traceback.print_tb(tb, file=tbfile)
with captured_output("stderr") as excstderr:
traceback.print_exc()
excfmt = traceback.format_exc()
excfile = StringIO()
traceback.print_exc(file=excfile)
else:
raise Error("unable to create test traceback string")
# Make sure that Python and the traceback module format the same thing
self.assertEqual(traceback_fmt, python_fmt)
# Now verify the _tb func output
self.assertEqual(tbstderr.getvalue(), tbfile.getvalue())
# Now verify the _exc func output
self.assertEqual(excstderr.getvalue(), excfile.getvalue())
self.assertEqual(excfmt, excfile.getvalue())
# Make sure that the traceback is properly indented.
tb_lines = python_fmt.splitlines()
self.assertEqual(len(tb_lines), 5)
banner = tb_lines[0]
location, source_line = tb_lines[-2:]
self.assertTrue(banner.startswith('Traceback'))
self.assertTrue(location.startswith(' File'))
self.assertTrue(source_line.startswith(' raise'))
def test_traceback_format(self):
self.check_traceback_format()
def test_traceback_format_with_cleared_frames(self):
# Check that traceback formatting also works with a clear()ed frame
def cleanup_tb(tb):
tb.tb_frame.clear()
self.check_traceback_format(cleanup_tb)
def test_stack_format(self):
# Verify _stack functions. Note we have to use _getframe(1) to
# compare them without this frame appearing in the output
with captured_output("stderr") as ststderr:
traceback.print_stack(sys._getframe(1))
stfile = StringIO()
traceback.print_stack(sys._getframe(1), file=stfile)
self.assertEqual(ststderr.getvalue(), stfile.getvalue())
stfmt = traceback.format_stack(sys._getframe(1))
self.assertEqual(ststderr.getvalue(), "".join(stfmt))
cause_message = (
"\nThe above exception was the direct cause "
"of the following exception:\n\n")
context_message = (
"\nDuring handling of the above exception, "
"another exception occurred:\n\n")
boundaries = re.compile(
'(%s|%s)' % (re.escape(cause_message), re.escape(context_message)))
class BaseExceptionReportingTests:
def get_exception(self, exception_or_callable):
if isinstance(exception_or_callable, Exception):
return exception_or_callable
try:
exception_or_callable()
except Exception as e:
return e
def zero_div(self):
1/0 # In zero_div
def check_zero_div(self, msg):
lines = msg.splitlines()
self.assertTrue(lines[-3].startswith(' File'))
self.assertIn('1/0 # In zero_div', lines[-2])
self.assertTrue(lines[-1].startswith('ZeroDivisionError'), lines[-1])
def test_simple(self):
try:
1/0 # Marker
except ZeroDivisionError as _:
e = _
lines = self.get_report(e).splitlines()
self.assertEqual(len(lines), 4)
self.assertTrue(lines[0].startswith('Traceback'))
self.assertTrue(lines[1].startswith(' File'))
self.assertIn('1/0 # Marker', lines[2])
self.assertTrue(lines[3].startswith('ZeroDivisionError'))
def test_cause(self):
def inner_raise():
try:
self.zero_div()
except ZeroDivisionError as e:
raise KeyError from e
def outer_raise():
inner_raise() # Marker
blocks = boundaries.split(self.get_report(outer_raise))
self.assertEqual(len(blocks), 3)
self.assertEqual(blocks[1], cause_message)
self.check_zero_div(blocks[0])
self.assertIn('inner_raise() # Marker', blocks[2])
def test_context(self):
def inner_raise():
try:
self.zero_div()
except ZeroDivisionError:
raise KeyError
def outer_raise():
inner_raise() # Marker
blocks = boundaries.split(self.get_report(outer_raise))
self.assertEqual(len(blocks), 3)
self.assertEqual(blocks[1], context_message)
self.check_zero_div(blocks[0])
self.assertIn('inner_raise() # Marker', blocks[2])
def test_context_suppression(self):
try:
try:
raise Exception
except:
raise ZeroDivisionError from None
except ZeroDivisionError as _:
e = _
lines = self.get_report(e).splitlines()
self.assertEqual(len(lines), 4)
self.assertTrue(lines[0].startswith('Traceback'))
self.assertTrue(lines[1].startswith(' File'))
self.assertIn('ZeroDivisionError from None', lines[2])
self.assertTrue(lines[3].startswith('ZeroDivisionError'))
def test_cause_and_context(self):
# When both a cause and a context are set, only the cause should be
# displayed and the context should be muted.
def inner_raise():
try:
self.zero_div()
except ZeroDivisionError as _e:
e = _e
try:
xyzzy
except NameError:
raise KeyError from e
def outer_raise():
inner_raise() # Marker
blocks = boundaries.split(self.get_report(outer_raise))
self.assertEqual(len(blocks), 3)
self.assertEqual(blocks[1], cause_message)
self.check_zero_div(blocks[0])
self.assertIn('inner_raise() # Marker', blocks[2])
def test_cause_recursive(self):
def inner_raise():
try:
try:
self.zero_div()
except ZeroDivisionError as e:
z = e
raise KeyError from e
except KeyError as e:
raise z from e
def outer_raise():
inner_raise() # Marker
blocks = boundaries.split(self.get_report(outer_raise))
self.assertEqual(len(blocks), 3)
self.assertEqual(blocks[1], cause_message)
# The first block is the KeyError raised from the ZeroDivisionError
self.assertIn('raise KeyError from e', blocks[0])
self.assertNotIn('1/0', blocks[0])
# The second block (apart from the boundary) is the ZeroDivisionError
# re-raised from the KeyError
self.assertIn('inner_raise() # Marker', blocks[2])
self.check_zero_div(blocks[2])
def test_syntax_error_offset_at_eol(self):
# See #10186.
def e():
raise SyntaxError('', ('', 0, 5, 'hello'))
msg = self.get_report(e).splitlines()
self.assertEqual(msg[-2], " ^")
def e():
exec("x = 5 | 4 |")
msg = self.get_report(e).splitlines()
self.assertEqual(msg[-2], ' ^')
class PyExcReportingTests(BaseExceptionReportingTests, unittest.TestCase):
#
# This checks reporting through the 'traceback' module, with both
# format_exception() and print_exception().
#
def get_report(self, e):
e = self.get_exception(e)
s = ''.join(
traceback.format_exception(type(e), e, e.__traceback__))
with captured_output("stderr") as sio:
traceback.print_exception(type(e), e, e.__traceback__)
self.assertEqual(sio.getvalue(), s)
return s
class CExcReportingTests(BaseExceptionReportingTests, unittest.TestCase):
#
# This checks built-in reporting by the interpreter.
#
@cpython_only
def get_report(self, e):
from _testcapi import exception_print
e = self.get_exception(e)
with captured_output("stderr") as s:
exception_print(e)
return s.getvalue()
class MiscTracebackCases(unittest.TestCase):
#
# Check non-printing functions in traceback module
#
def test_clear(self):
def outer():
middle()
def middle():
inner()
def inner():
i = 1
1/0
try:
outer()
except:
type_, value, tb = sys.exc_info()
# Initial assertion: there's one local in the inner frame.
inner_frame = tb.tb_next.tb_next.tb_next.tb_frame
self.assertEqual(len(inner_frame.f_locals), 1)
# Clear traceback frames
traceback.clear_frames(tb)
# Local variable dict should now be empty.
self.assertEqual(len(inner_frame.f_locals), 0)
def test_main():
run_unittest(__name__)
if __name__ == "__main__":
test_main()
| lgpl-3.0 |
kvar/ansible | lib/ansible/module_utils/ec2.py | 20 | 28571 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Michael DeHaan <[email protected]>, 2012-2013
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import re
import traceback
from ansible.module_utils.ansible_release import __version__
from ansible.module_utils.basic import missing_required_lib, env_fallback
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.cloud import CloudRetry
from ansible.module_utils.six import string_types, binary_type, text_type
from ansible.module_utils.common.dict_transformations import (
camel_dict_to_snake_dict, snake_dict_to_camel_dict,
_camel_to_snake, _snake_to_camel,
)
BOTO_IMP_ERR = None
try:
import boto
import boto.ec2 # boto does weird import stuff
HAS_BOTO = True
except ImportError:
BOTO_IMP_ERR = traceback.format_exc()
HAS_BOTO = False
BOTO3_IMP_ERR = None
try:
import boto3
import botocore
HAS_BOTO3 = True
except Exception:
BOTO3_IMP_ERR = traceback.format_exc()
HAS_BOTO3 = False
try:
# Although this is to allow Python 3 the ability to use the custom comparison as a key, Python 2.7 also
# uses this (and it works as expected). Python 2.6 will trigger the ImportError.
from functools import cmp_to_key
PY3_COMPARISON = True
except ImportError:
PY3_COMPARISON = False
class AnsibleAWSError(Exception):
pass
def _botocore_exception_maybe():
"""
Allow for boto3 not being installed when using these utils by wrapping
botocore.exceptions instead of assigning from it directly.
"""
if HAS_BOTO3:
return botocore.exceptions.ClientError
return type(None)
class AWSRetry(CloudRetry):
base_class = _botocore_exception_maybe()
@staticmethod
def status_code_from_exception(error):
return error.response['Error']['Code']
@staticmethod
def found(response_code, catch_extra_error_codes=None):
# This list of failures is based on this API Reference
# http://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html
#
# TooManyRequestsException comes from inside botocore when it
# does retrys, unfortunately however it does not try long
# enough to allow some services such as API Gateway to
# complete configuration. At the moment of writing there is a
# botocore/boto3 bug open to fix this.
#
# https://github.com/boto/boto3/issues/876 (and linked PRs etc)
retry_on = [
'RequestLimitExceeded', 'Unavailable', 'ServiceUnavailable',
'InternalFailure', 'InternalError', 'TooManyRequestsException',
'Throttling'
]
if catch_extra_error_codes:
retry_on.extend(catch_extra_error_codes)
not_found = re.compile(r'^\w+.NotFound')
return response_code in retry_on or not_found.search(response_code)
def boto3_conn(module, conn_type=None, resource=None, region=None, endpoint=None, **params):
try:
return _boto3_conn(conn_type=conn_type, resource=resource, region=region, endpoint=endpoint, **params)
except ValueError as e:
module.fail_json(msg="Couldn't connect to AWS: %s" % to_native(e))
except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError,
botocore.exceptions.NoCredentialsError, botocore.exceptions.ConfigParseError) as e:
module.fail_json(msg=to_native(e))
except botocore.exceptions.NoRegionError as e:
module.fail_json(msg="The %s module requires a region and none was found in configuration, "
"environment variables or module parameters" % module._name)
def _boto3_conn(conn_type=None, resource=None, region=None, endpoint=None, **params):
profile = params.pop('profile_name', None)
if conn_type not in ['both', 'resource', 'client']:
raise ValueError('There is an issue in the calling code. You '
'must specify either both, resource, or client to '
'the conn_type parameter in the boto3_conn function '
'call')
if params.get('config'):
config = params.pop('config')
config.user_agent_extra = 'Ansible/{0}'.format(__version__)
else:
config = botocore.config.Config(
user_agent_extra='Ansible/{0}'.format(__version__),
)
session = boto3.session.Session(
profile_name=profile,
)
if conn_type == 'resource':
return session.resource(resource, config=config, region_name=region, endpoint_url=endpoint, **params)
elif conn_type == 'client':
return session.client(resource, config=config, region_name=region, endpoint_url=endpoint, **params)
else:
client = session.client(resource, region_name=region, endpoint_url=endpoint, **params)
resource = session.resource(resource, region_name=region, endpoint_url=endpoint, **params)
return client, resource
boto3_inventory_conn = _boto3_conn
def boto_exception(err):
"""
Extracts the error message from a boto exception.
:param err: Exception from boto
:return: Error message
"""
if hasattr(err, 'error_message'):
error = err.error_message
elif hasattr(err, 'message'):
error = str(err.message) + ' ' + str(err) + ' - ' + str(type(err))
else:
error = '%s: %s' % (Exception, err)
return error
def aws_common_argument_spec():
return dict(
debug_botocore_endpoint_logs=dict(fallback=(env_fallback, ['ANSIBLE_DEBUG_BOTOCORE_LOGS']), default=False, type='bool'),
ec2_url=dict(),
aws_secret_key=dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True),
aws_access_key=dict(aliases=['ec2_access_key', 'access_key']),
validate_certs=dict(default=True, type='bool'),
security_token=dict(aliases=['access_token'], no_log=True),
profile=dict(),
)
def ec2_argument_spec():
spec = aws_common_argument_spec()
spec.update(
dict(
region=dict(aliases=['aws_region', 'ec2_region']),
)
)
return spec
def get_aws_connection_info(module, boto3=False):
# Check module args for credentials, then check environment vars
# access_key
ec2_url = module.params.get('ec2_url')
access_key = module.params.get('aws_access_key')
secret_key = module.params.get('aws_secret_key')
security_token = module.params.get('security_token')
region = module.params.get('region')
profile_name = module.params.get('profile')
validate_certs = module.params.get('validate_certs')
if not ec2_url:
if 'AWS_URL' in os.environ:
ec2_url = os.environ['AWS_URL']
elif 'EC2_URL' in os.environ:
ec2_url = os.environ['EC2_URL']
if not access_key:
if os.environ.get('AWS_ACCESS_KEY_ID'):
access_key = os.environ['AWS_ACCESS_KEY_ID']
elif os.environ.get('AWS_ACCESS_KEY'):
access_key = os.environ['AWS_ACCESS_KEY']
elif os.environ.get('EC2_ACCESS_KEY'):
access_key = os.environ['EC2_ACCESS_KEY']
elif HAS_BOTO and boto.config.get('Credentials', 'aws_access_key_id'):
access_key = boto.config.get('Credentials', 'aws_access_key_id')
elif HAS_BOTO and boto.config.get('default', 'aws_access_key_id'):
access_key = boto.config.get('default', 'aws_access_key_id')
else:
# in case access_key came in as empty string
access_key = None
if not secret_key:
if os.environ.get('AWS_SECRET_ACCESS_KEY'):
secret_key = os.environ['AWS_SECRET_ACCESS_KEY']
elif os.environ.get('AWS_SECRET_KEY'):
secret_key = os.environ['AWS_SECRET_KEY']
elif os.environ.get('EC2_SECRET_KEY'):
secret_key = os.environ['EC2_SECRET_KEY']
elif HAS_BOTO and boto.config.get('Credentials', 'aws_secret_access_key'):
secret_key = boto.config.get('Credentials', 'aws_secret_access_key')
elif HAS_BOTO and boto.config.get('default', 'aws_secret_access_key'):
secret_key = boto.config.get('default', 'aws_secret_access_key')
else:
# in case secret_key came in as empty string
secret_key = None
if not region:
if 'AWS_REGION' in os.environ:
region = os.environ['AWS_REGION']
elif 'AWS_DEFAULT_REGION' in os.environ:
region = os.environ['AWS_DEFAULT_REGION']
elif 'EC2_REGION' in os.environ:
region = os.environ['EC2_REGION']
else:
if not boto3:
if HAS_BOTO:
# boto.config.get returns None if config not found
region = boto.config.get('Boto', 'aws_region')
if not region:
region = boto.config.get('Boto', 'ec2_region')
else:
module.fail_json(msg=missing_required_lib('boto'), exception=BOTO_IMP_ERR)
elif HAS_BOTO3:
# here we don't need to make an additional call, will default to 'us-east-1' if the below evaluates to None.
try:
region = botocore.session.Session(profile=profile_name).get_config_variable('region')
except botocore.exceptions.ProfileNotFound as e:
pass
else:
module.fail_json(msg=missing_required_lib('boto3'), exception=BOTO3_IMP_ERR)
if not security_token:
if os.environ.get('AWS_SECURITY_TOKEN'):
security_token = os.environ['AWS_SECURITY_TOKEN']
elif os.environ.get('AWS_SESSION_TOKEN'):
security_token = os.environ['AWS_SESSION_TOKEN']
elif os.environ.get('EC2_SECURITY_TOKEN'):
security_token = os.environ['EC2_SECURITY_TOKEN']
elif HAS_BOTO and boto.config.get('Credentials', 'aws_security_token'):
security_token = boto.config.get('Credentials', 'aws_security_token')
elif HAS_BOTO and boto.config.get('default', 'aws_security_token'):
security_token = boto.config.get('default', 'aws_security_token')
else:
# in case secret_token came in as empty string
security_token = None
if HAS_BOTO3 and boto3:
boto_params = dict(aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
aws_session_token=security_token)
boto_params['verify'] = validate_certs
if profile_name:
boto_params = dict(aws_access_key_id=None, aws_secret_access_key=None, aws_session_token=None)
boto_params['profile_name'] = profile_name
else:
boto_params = dict(aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
security_token=security_token)
# only set profile_name if passed as an argument
if profile_name:
boto_params['profile_name'] = profile_name
boto_params['validate_certs'] = validate_certs
for param, value in boto_params.items():
if isinstance(value, binary_type):
boto_params[param] = text_type(value, 'utf-8', 'strict')
return region, ec2_url, boto_params
def get_ec2_creds(module):
''' for compatibility mode with old modules that don't/can't yet
use ec2_connect method '''
region, ec2_url, boto_params = get_aws_connection_info(module)
return ec2_url, boto_params['aws_access_key_id'], boto_params['aws_secret_access_key'], region
def boto_fix_security_token_in_profile(conn, profile_name):
''' monkey patch for boto issue boto/boto#2100 '''
profile = 'profile ' + profile_name
if boto.config.has_option(profile, 'aws_security_token'):
conn.provider.set_security_token(boto.config.get(profile, 'aws_security_token'))
return conn
def connect_to_aws(aws_module, region, **params):
try:
conn = aws_module.connect_to_region(region, **params)
except(boto.provider.ProfileNotFoundError):
raise AnsibleAWSError("Profile given for AWS was not found. Please fix and retry.")
if not conn:
if region not in [aws_module_region.name for aws_module_region in aws_module.regions()]:
raise AnsibleAWSError("Region %s does not seem to be available for aws module %s. If the region definitely exists, you may need to upgrade "
"boto or extend with endpoints_path" % (region, aws_module.__name__))
else:
raise AnsibleAWSError("Unknown problem connecting to region %s for aws module %s." % (region, aws_module.__name__))
if params.get('profile_name'):
conn = boto_fix_security_token_in_profile(conn, params['profile_name'])
return conn
def ec2_connect(module):
""" Return an ec2 connection"""
region, ec2_url, boto_params = get_aws_connection_info(module)
# If we have a region specified, connect to its endpoint.
if region:
try:
ec2 = connect_to_aws(boto.ec2, region, **boto_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError, boto.provider.ProfileNotFoundError) as e:
module.fail_json(msg=str(e))
# Otherwise, no region so we fallback to the old connection method
elif ec2_url:
try:
ec2 = boto.connect_ec2_endpoint(ec2_url, **boto_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError, boto.provider.ProfileNotFoundError) as e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="Either region or ec2_url must be specified")
return ec2
def ansible_dict_to_boto3_filter_list(filters_dict):
""" Convert an Ansible dict of filters to list of dicts that boto3 can use
Args:
filters_dict (dict): Dict of AWS filters.
Basic Usage:
>>> filters = {'some-aws-id': 'i-01234567'}
>>> ansible_dict_to_boto3_filter_list(filters)
{
'some-aws-id': 'i-01234567'
}
Returns:
List: List of AWS filters and their values
[
{
'Name': 'some-aws-id',
'Values': [
'i-01234567',
]
}
]
"""
filters_list = []
for k, v in filters_dict.items():
filter_dict = {'Name': k}
if isinstance(v, string_types):
filter_dict['Values'] = [v]
else:
filter_dict['Values'] = v
filters_list.append(filter_dict)
return filters_list
def boto3_tag_list_to_ansible_dict(tags_list, tag_name_key_name=None, tag_value_key_name=None):
""" Convert a boto3 list of resource tags to a flat dict of key:value pairs
Args:
tags_list (list): List of dicts representing AWS tags.
tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key")
tag_value_key_name (str): Value to use as the key for all tag values (useful because boto3 doesn't always use "Value")
Basic Usage:
>>> tags_list = [{'Key': 'MyTagKey', 'Value': 'MyTagValue'}]
>>> boto3_tag_list_to_ansible_dict(tags_list)
[
{
'Key': 'MyTagKey',
'Value': 'MyTagValue'
}
]
Returns:
Dict: Dict of key:value pairs representing AWS tags
{
'MyTagKey': 'MyTagValue',
}
"""
if tag_name_key_name and tag_value_key_name:
tag_candidates = {tag_name_key_name: tag_value_key_name}
else:
tag_candidates = {'key': 'value', 'Key': 'Value'}
if not tags_list:
return {}
for k, v in tag_candidates.items():
if k in tags_list[0] and v in tags_list[0]:
return dict((tag[k], tag[v]) for tag in tags_list)
raise ValueError("Couldn't find tag key (candidates %s) in tag list %s" % (str(tag_candidates), str(tags_list)))
def ansible_dict_to_boto3_tag_list(tags_dict, tag_name_key_name='Key', tag_value_key_name='Value'):
""" Convert a flat dict of key:value pairs representing AWS resource tags to a boto3 list of dicts
Args:
tags_dict (dict): Dict representing AWS resource tags.
tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key")
tag_value_key_name (str): Value to use as the key for all tag values (useful because boto3 doesn't always use "Value")
Basic Usage:
>>> tags_dict = {'MyTagKey': 'MyTagValue'}
>>> ansible_dict_to_boto3_tag_list(tags_dict)
{
'MyTagKey': 'MyTagValue'
}
Returns:
List: List of dicts containing tag keys and values
[
{
'Key': 'MyTagKey',
'Value': 'MyTagValue'
}
]
"""
tags_list = []
for k, v in tags_dict.items():
tags_list.append({tag_name_key_name: k, tag_value_key_name: to_native(v)})
return tags_list
def get_ec2_security_group_ids_from_names(sec_group_list, ec2_connection, vpc_id=None, boto3=True):
""" Return list of security group IDs from security group names. Note that security group names are not unique
across VPCs. If a name exists across multiple VPCs and no VPC ID is supplied, all matching IDs will be returned. This
will probably lead to a boto exception if you attempt to assign both IDs to a resource so ensure you wrap the call in
a try block
"""
def get_sg_name(sg, boto3):
if boto3:
return sg['GroupName']
else:
return sg.name
def get_sg_id(sg, boto3):
if boto3:
return sg['GroupId']
else:
return sg.id
sec_group_id_list = []
if isinstance(sec_group_list, string_types):
sec_group_list = [sec_group_list]
# Get all security groups
if boto3:
if vpc_id:
filters = [
{
'Name': 'vpc-id',
'Values': [
vpc_id,
]
}
]
all_sec_groups = ec2_connection.describe_security_groups(Filters=filters)['SecurityGroups']
else:
all_sec_groups = ec2_connection.describe_security_groups()['SecurityGroups']
else:
if vpc_id:
filters = {'vpc-id': vpc_id}
all_sec_groups = ec2_connection.get_all_security_groups(filters=filters)
else:
all_sec_groups = ec2_connection.get_all_security_groups()
unmatched = set(sec_group_list).difference(str(get_sg_name(all_sg, boto3)) for all_sg in all_sec_groups)
sec_group_name_list = list(set(sec_group_list) - set(unmatched))
if len(unmatched) > 0:
# If we have unmatched names that look like an ID, assume they are
import re
sec_group_id_list[:] = [sg for sg in unmatched if re.match('sg-[a-fA-F0-9]+$', sg)]
still_unmatched = [sg for sg in unmatched if not re.match('sg-[a-fA-F0-9]+$', sg)]
if len(still_unmatched) > 0:
raise ValueError("The following group names are not valid: %s" % ', '.join(still_unmatched))
sec_group_id_list += [str(get_sg_id(all_sg, boto3)) for all_sg in all_sec_groups if str(get_sg_name(all_sg, boto3)) in sec_group_name_list]
return sec_group_id_list
def _hashable_policy(policy, policy_list):
"""
Takes a policy and returns a list, the contents of which are all hashable and sorted.
Example input policy:
{'Version': '2012-10-17',
'Statement': [{'Action': 's3:PutObjectAcl',
'Sid': 'AddCannedAcl2',
'Resource': 'arn:aws:s3:::test_policy/*',
'Effect': 'Allow',
'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']}
}]}
Returned value:
[('Statement', ((('Action', (u's3:PutObjectAcl',)),
('Effect', (u'Allow',)),
('Principal', ('AWS', ((u'arn:aws:iam::XXXXXXXXXXXX:user/username1',), (u'arn:aws:iam::XXXXXXXXXXXX:user/username2',)))),
('Resource', (u'arn:aws:s3:::test_policy/*',)), ('Sid', (u'AddCannedAcl2',)))),
('Version', (u'2012-10-17',)))]
"""
if isinstance(policy, list):
for each in policy:
tupleified = _hashable_policy(each, [])
if isinstance(tupleified, list):
tupleified = tuple(tupleified)
policy_list.append(tupleified)
elif isinstance(policy, string_types) or isinstance(policy, binary_type):
policy = to_text(policy)
# convert root account ARNs to just account IDs
if policy.startswith('arn:aws:iam::') and policy.endswith(':root'):
policy = policy.split(':')[4]
return [policy]
elif isinstance(policy, dict):
sorted_keys = list(policy.keys())
sorted_keys.sort()
for key in sorted_keys:
tupleified = _hashable_policy(policy[key], [])
if isinstance(tupleified, list):
tupleified = tuple(tupleified)
policy_list.append((key, tupleified))
# ensure we aren't returning deeply nested structures of length 1
if len(policy_list) == 1 and isinstance(policy_list[0], tuple):
policy_list = policy_list[0]
if isinstance(policy_list, list):
if PY3_COMPARISON:
policy_list.sort(key=cmp_to_key(py3cmp))
else:
policy_list.sort()
return policy_list
def py3cmp(a, b):
""" Python 2 can sort lists of mixed types. Strings < tuples. Without this function this fails on Python 3."""
try:
if a > b:
return 1
elif a < b:
return -1
else:
return 0
except TypeError as e:
# check to see if they're tuple-string
# always say strings are less than tuples (to maintain compatibility with python2)
str_ind = to_text(e).find('str')
tup_ind = to_text(e).find('tuple')
if -1 not in (str_ind, tup_ind):
if str_ind < tup_ind:
return -1
elif tup_ind < str_ind:
return 1
raise
def compare_policies(current_policy, new_policy):
""" Compares the existing policy and the updated policy
Returns True if there is a difference between policies.
"""
return set(_hashable_policy(new_policy, [])) != set(_hashable_policy(current_policy, []))
def sort_json_policy_dict(policy_dict):
""" Sort any lists in an IAM JSON policy so that comparison of two policies with identical values but
different orders will return true
Args:
policy_dict (dict): Dict representing IAM JSON policy.
Basic Usage:
>>> my_iam_policy = {'Principle': {'AWS':["31","7","14","101"]}
>>> sort_json_policy_dict(my_iam_policy)
Returns:
Dict: Will return a copy of the policy as a Dict but any List will be sorted
{
'Principle': {
'AWS': [ '7', '14', '31', '101' ]
}
}
"""
def value_is_list(my_list):
checked_list = []
for item in my_list:
if isinstance(item, dict):
checked_list.append(sort_json_policy_dict(item))
elif isinstance(item, list):
checked_list.append(value_is_list(item))
else:
checked_list.append(item)
# Sort list. If it's a list of dictionaries, sort by tuple of key-value
# pairs, since Python 3 doesn't allow comparisons such as `<` between dictionaries.
checked_list.sort(key=lambda x: sorted(x.items()) if isinstance(x, dict) else x)
return checked_list
ordered_policy_dict = {}
for key, value in policy_dict.items():
if isinstance(value, dict):
ordered_policy_dict[key] = sort_json_policy_dict(value)
elif isinstance(value, list):
ordered_policy_dict[key] = value_is_list(value)
else:
ordered_policy_dict[key] = value
return ordered_policy_dict
def map_complex_type(complex_type, type_map):
"""
Allows to cast elements within a dictionary to a specific type
Example of usage:
DEPLOYMENT_CONFIGURATION_TYPE_MAP = {
'maximum_percent': 'int',
'minimum_healthy_percent': 'int'
}
deployment_configuration = map_complex_type(module.params['deployment_configuration'],
DEPLOYMENT_CONFIGURATION_TYPE_MAP)
This ensures all keys within the root element are casted and valid integers
"""
if complex_type is None:
return
new_type = type(complex_type)()
if isinstance(complex_type, dict):
for key in complex_type:
if key in type_map:
if isinstance(type_map[key], list):
new_type[key] = map_complex_type(
complex_type[key],
type_map[key][0])
else:
new_type[key] = map_complex_type(
complex_type[key],
type_map[key])
else:
return complex_type
elif isinstance(complex_type, list):
for i in range(len(complex_type)):
new_type.append(map_complex_type(
complex_type[i],
type_map))
elif type_map:
return globals()['__builtins__'][type_map](complex_type)
return new_type
def compare_aws_tags(current_tags_dict, new_tags_dict, purge_tags=True):
"""
Compare two dicts of AWS tags. Dicts are expected to of been created using 'boto3_tag_list_to_ansible_dict' helper function.
Two dicts are returned - the first is tags to be set, the second is any tags to remove. Since the AWS APIs differ
these may not be able to be used out of the box.
:param current_tags_dict:
:param new_tags_dict:
:param purge_tags:
:return: tag_key_value_pairs_to_set: a dict of key value pairs that need to be set in AWS. If all tags are identical this dict will be empty
:return: tag_keys_to_unset: a list of key names (type str) that need to be unset in AWS. If no tags need to be unset this list will be empty
"""
tag_key_value_pairs_to_set = {}
tag_keys_to_unset = []
for key in current_tags_dict.keys():
if key not in new_tags_dict and purge_tags:
tag_keys_to_unset.append(key)
for key in set(new_tags_dict.keys()) - set(tag_keys_to_unset):
if to_text(new_tags_dict[key]) != current_tags_dict.get(key):
tag_key_value_pairs_to_set[key] = new_tags_dict[key]
return tag_key_value_pairs_to_set, tag_keys_to_unset
| gpl-3.0 |
armink/rt-thread | bsp/avr32uc3b0/rtconfig.py | 18 | 1667 | import os
# toolchains options
ARCH = 'avr32'
CPU = 'uc3'
PART = 'uc3b0256'
BOARD = 'USERBOARD'
CROSS_TOOL = 'gcc'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = 'C:/Program Files/Atmel/AVR Tools/AVR Toolchain/bin'
elif CROSS_TOOL == 'keil':
print('================ERROR============================')
print('Not support keil yet!')
print('=================================================')
exit(0)
elif CROSS_TOOL == 'iar':
print('================ERROR============================')
print('Not support iar yet!')
print('=================================================')
exit(0)
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
#BUILD = 'debug'
BUILD = 'release'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'avr32-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mpart=' + PART
CFLAGS = DEVICE + ' -DBOARD=' + BOARD + ' -fmessage-length=0 -ffunction-sections -masm-addr-pseudos'
AFLAGS = ' -c -x assembler-with-cpp' + DEVICE
LFLAGS = DEVICE + ' -Wl,--gc-sections --rodata-writable -Wl,--direct-data -LSOFTWARE_FRAMEWORK/UTILS/LIBS/NEWLIB_ADDONS -T avr32elf_uc3b0256.lds'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -g3 -Wall'
AFLAGS += ' -g3'
else:
CFLAGS += ' -O2 -Wall'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
| apache-2.0 |
azumimuo/family-xbmc-addon | plugin.video.specto/resources/lib/resolvers/cloudyvideos.py | 10 | 2259 | # -*- coding: utf-8 -*-
'''
Specto Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,time
from resources.lib.libraries import client
from resources.lib.libraries import jsunpack
def resolve(url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
page = 'http://cloudyvideos.com/%s' % url
result = client.request(page, close=False)
if '>File Not Found<' in result: raise Exception()
post = {}
f = client.parseDOM(result, 'Form', attrs = {'action': ''})
k = client.parseDOM(f, 'input', ret='name', attrs = {'type': 'hidden'})
for i in k: post.update({i: client.parseDOM(f, 'input', ret='value', attrs = {'name': i})[0]})
post = urllib.urlencode(post)
for i in range(0, 5):
try:
result = client.request(page, post=post, close=False)
url = re.compile("file *: *'(.+?)'").findall(result)
if len(url) == 0:
result = re.compile('(eval.*?\)\)\))').findall(result)
result = [i for i in result if '|download|' in i][0]
result = jsunpack.unpack(result)
url = client.parseDOM(result, 'embed', ret='src')
url += re.compile("file *: *[\'|\"](.+?)[\'|\"]").findall(result)
url = [i for i in url if not i.endswith('.srt')]
url = 'http://' + url[0].split('://', 1)[-1]
return url
except:
time.sleep(1)
except:
return
| gpl-2.0 |
456838/usefulCode | YHamburgGit/freeline.py | 11 | 1696 | #!/usr/bin/python
# -*- coding:utf-8 -*-
from __future__ import print_function
import sys
from argparse import ArgumentParser
from freeline_core.dispatcher import Dispatcher
from freeline_core.init import init
class Freeline(object):
def __init__(self):
self.dispatcher = Dispatcher()
def call(self, args=None):
if 'init' in args and args.init:
print('init freeline project...')
init()
exit()
self.dispatcher.call_command(args)
def get_parser():
parser = ArgumentParser()
parser.add_argument('-v', '--version', action='store_true', help='show version')
parser.add_argument('-f', '--cleanBuild', action='store_true', help='force to execute a clean build')
parser.add_argument('-w', '--wait', action='store_true', help='make application wait for debugger')
parser.add_argument('-a', '--all', action='store_true',
help="together with '-f', freeline will force to clean build all projects.")
parser.add_argument('-c', '--clean', action='store_true', help='clean cache directory and workspace')
parser.add_argument('-d', '--debug', action='store_true', help='show freeline debug output (NOT DEBUG APPLICATION)')
# parser.add_argument('-i', '--init', action='store_true', help='init freeline project')
parser.parse_args()
return parser
def main():
if sys.version_info > (3, 0):
print('Freeline only support Python 2.7+ now. Please use the correct version of Python for freeline.')
exit()
parser = get_parser()
args = parser.parse_args()
freeline = Freeline()
freeline.call(args=args)
if __name__ == '__main__':
main()
| apache-2.0 |
indro/t2c | libs/external_libs/Pygments-0.11.1/pygments/styles/friendly.py | 24 | 2508 | # -*- coding: utf-8 -*-
"""
pygments.styles.friendly
~~~~~~~~~~~~~~~~~~~~~~~~
A modern style based on the VIM pyte theme.
:copyright: 2006-2007 by Georg Brandl, Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class FriendlyStyle(Style):
"""
A modern style based on the VIM pyte theme.
"""
background_color = "#f0f0f0"
default_style = ""
styles = {
Whitespace: "#bbbbbb",
Comment: "italic #60a0b0",
Comment.Preproc: "noitalic #007020",
Comment.Special: "noitalic bg:#fff0f0",
Keyword: "bold #007020",
Keyword.Pseudo: "nobold",
Keyword.Type: "nobold #902000",
Operator: "#666666",
Operator.Word: "bold #007020",
Name.Builtin: "#007020",
Name.Function: "#06287e",
Name.Class: "bold #0e84b5",
Name.Namespace: "bold #0e84b5",
Name.Exception: "#007020",
Name.Variable: "#bb60d5",
Name.Constant: "#60add5",
Name.Label: "bold #002070",
Name.Entity: "bold #d55537",
Name.Attribute: "#4070a0",
Name.Tag: "bold #062873",
Name.Decorator: "bold #555555",
String: "#4070a0",
String.Doc: "italic",
String.Interpol: "italic #70a0d0",
String.Escape: "bold #4070a0",
String.Regex: "#235388",
String.Symbol: "#517918",
String.Other: "#c65d09",
Number: "#40a070",
Generic.Heading: "bold #000080",
Generic.Subheading: "bold #800080",
Generic.Deleted: "#A00000",
Generic.Inserted: "#00A000",
Generic.Error: "#FF0000",
Generic.Emph: "italic",
Generic.Strong: "bold",
Generic.Prompt: "bold #c65d09",
Generic.Output: "#888",
Generic.Traceback: "#04D",
Error: "border:#FF0000"
}
| mit |
ahu-odoo/odoo | addons/report_intrastat/__init__.py | 377 | 1079 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import report_intrastat
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
kuiwei/edx-platform | common/lib/xmodule/xmodule/tests/test_peer_grading.py | 33 | 16061 | import unittest
import json
import logging
from mock import Mock, patch
from webob.multidict import MultiDict
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from opaque_keys.edx.locations import Location, SlashSeparatedCourseKey
from xmodule.tests import get_test_system, get_test_descriptor_system
from xmodule.tests.test_util_open_ended import DummyModulestore
from xmodule.open_ended_grading_classes.peer_grading_service import MockPeerGradingService
from xmodule.peer_grading_module import PeerGradingModule, PeerGradingDescriptor, MAX_ALLOWED_FEEDBACK_LENGTH
from xmodule.modulestore.exceptions import ItemNotFoundError, NoPathToItem
log = logging.getLogger(__name__)
class PeerGradingModuleTest(unittest.TestCase, DummyModulestore):
"""
Test peer grading xmodule at the unit level. More detailed tests are difficult, as the module relies on an
external grading service.
"""
course_id = SlashSeparatedCourseKey('edX', 'open_ended', '2012_Fall')
problem_location = course_id.make_usage_key("peergrading", "PeerGradingSample")
coe_location = course_id.make_usage_key("combinedopenended", "SampleQuestion")
calibrated_dict = {'location': "blah"}
coe_dict = {'location': coe_location.to_deprecated_string()}
save_dict = MultiDict({
'location': "blah",
'submission_id': 1,
'submission_key': "",
'score': 1,
'feedback': "",
'submission_flagged': False,
'answer_unknown': False,
})
save_dict.extend(('rubric_scores[]', val) for val in (0, 1))
def get_module_system(self, descriptor):
test_system = get_test_system(self.course_id)
test_system.open_ended_grading_interface = None
return test_system
def setUp(self):
"""
Create a peer grading module from a test system
@return:
"""
self.setup_modulestore(self.course_id.course)
self.peer_grading = self.get_module_from_location(self.problem_location)
self.coe = self.get_module_from_location(self.coe_location)
def test_module_closed(self):
"""
Test if peer grading is closed
@return:
"""
closed = self.peer_grading.closed()
self.assertFalse(closed)
def test_get_html(self):
"""
Test to see if the module can be rendered
@return:
"""
_html = self.peer_grading.get_html()
def test_get_data(self):
"""
Try getting data from the external grading service
@return:
"""
success, _data = self.peer_grading.query_data_for_location(self.problem_location)
self.assertTrue(success)
def test_get_score_none(self):
"""
Test getting the score.
"""
score = self.peer_grading.get_score()
# Score should be None.
self.assertIsNone(score['score'])
def test_get_max_score(self):
"""
Test getting the max score
@return:
"""
max_score = self.peer_grading.max_score()
self.assertEquals(max_score, None)
def get_next_submission(self):
"""
Test to see if we can get the next mock submission
@return:
"""
success, _next_submission = self.peer_grading.get_next_submission({'location': 'blah'})
self.assertEqual(success, True)
def test_save_grade(self):
"""
Test if we can save the grade
@return:
"""
response = self.peer_grading.save_grade(self.save_dict)
self.assertEqual(response['success'], True)
def test_is_student_calibrated(self):
"""
Check to see if the student has calibrated yet
@return:
"""
response = self.peer_grading.is_student_calibrated(self.calibrated_dict)
self.assertTrue(response['success'])
def test_show_calibration_essay(self):
"""
Test showing the calibration essay
@return:
"""
response = self.peer_grading.show_calibration_essay(self.calibrated_dict)
self.assertTrue(response['success'])
def test_save_calibration_essay(self):
"""
Test saving the calibration essay
@return:
"""
response = self.peer_grading.save_calibration_essay(self.save_dict)
self.assertTrue(response['success'])
def test_peer_grading_problem(self):
"""
See if we can render a single problem
@return:
"""
response = self.peer_grading.peer_grading_problem(self.coe_dict)
self.assertTrue(response['success'])
def test___find_corresponding_module_for_location_exceptions(self):
"""
Unit test for the exception cases of __find_corresponding_module_for_location
Mainly for diff coverage
@return:
"""
# pylint: disable=protected-access
with self.assertRaises(ItemNotFoundError):
self.peer_grading._find_corresponding_module_for_location(
Location('org', 'course', 'run', 'category', 'name', 'revision')
)
def test_get_instance_state(self):
"""
Get the instance state dict
@return:
"""
self.peer_grading.get_instance_state()
def test_save_grade_with_long_feedback(self):
"""
Test if feedback is too long save_grade() should return error message.
"""
feedback_fragment = "This is very long feedback."
self.save_dict["feedback"] = feedback_fragment * (
(MAX_ALLOWED_FEEDBACK_LENGTH / len(feedback_fragment) + 1)
)
response = self.peer_grading.save_grade(self.save_dict)
# Should not succeed.
self.assertEqual(response['success'], False)
self.assertEqual(
response['error'],
"Feedback is too long, Max length is {0} characters.".format(
MAX_ALLOWED_FEEDBACK_LENGTH
)
)
def test_get_score_success_fails(self):
"""
Test if query_data_for_location not succeed, their score is None.
"""
score_dict = self.get_score(False, 0, 0)
# Score dict should be None.
self.assertIsNone(score_dict)
def test_get_score(self):
"""
Test if the student has graded equal to required submissions,
their score is 1.0.
"""
score_dict = self.get_score(True, 3, 3)
# Score should be 1.0.
self.assertEqual(score_dict["score"], 1.0)
# Testing score after data is stored in student_data_for_location in xmodule.
_score_dict = self.peer_grading.get_score()
# Score should be 1.0.
self.assertEqual(_score_dict["score"], 1.0)
def test_get_score_zero(self):
"""
Test if the student has graded not equal to required submissions,
their score is 0.0.
"""
score_dict = self.get_score(True, 2, 3)
# Score should be 0.0.
self.assertEqual(score_dict["score"], 0.0)
def get_score(self, success, count_graded, count_required):
self.peer_grading.use_for_single_location_local = True
self.peer_grading.graded = True
# Patch for external grading service.
with patch('xmodule.peer_grading_module.PeerGradingModule.query_data_for_location') as mock_query_data_for_location:
mock_query_data_for_location.return_value = (
success,
{"count_graded": count_graded, "count_required": count_required}
)
# Returning score dict.
return self.peer_grading.get_score()
class MockPeerGradingServiceProblemList(MockPeerGradingService):
def get_problem_list(self, course_id, grader_id):
return {'success': True,
'problem_list': [
{
"num_graded": 3,
"num_pending": 681,
"num_required": 3,
"location": course_id.make_usage_key('combinedopenended', 'SampleQuestion'),
"problem_name": "Peer-Graded Essay"
},
]}
class PeerGradingModuleScoredTest(unittest.TestCase, DummyModulestore):
"""
Test peer grading xmodule at the unit level. More detailed tests are difficult, as the module relies on an
external grading service.
"""
course_id = SlashSeparatedCourseKey('edX', 'open_ended', '2012_Fall')
problem_location = course_id.make_usage_key("peergrading", "PeerGradingScored")
def get_module_system(self, descriptor):
test_system = get_test_system(self.course_id)
test_system.open_ended_grading_interface = None
return test_system
def setUp(self):
"""
Create a peer grading module from a test system
@return:
"""
self.setup_modulestore(self.course_id.course)
def test_metadata_load(self):
peer_grading = self.get_module_from_location(self.problem_location)
self.assertFalse(peer_grading.closed())
def test_problem_list(self):
"""
Test to see if a peer grading problem list can be correctly initialized.
"""
# Initialize peer grading module.
peer_grading = self.get_module_from_location(self.problem_location)
# Ensure that it cannot find any peer grading.
html = peer_grading.peer_grading()
self.assertNotIn("Peer-Graded", html)
# Swap for our mock class, which will find peer grading.
peer_grading.peer_gs = MockPeerGradingServiceProblemList()
html = peer_grading.peer_grading()
self.assertIn("Peer-Graded", html)
class PeerGradingModuleLinkedTest(unittest.TestCase, DummyModulestore):
"""
Test peer grading that is linked to an open ended module.
"""
course_id = SlashSeparatedCourseKey('edX', 'open_ended', '2012_Fall')
problem_location = course_id.make_usage_key("peergrading", "PeerGradingLinked")
coe_location = course_id.make_usage_key("combinedopenended", "SampleQuestion")
def get_module_system(self, descriptor):
test_system = get_test_system(self.course_id)
test_system.open_ended_grading_interface = None
return test_system
def setUp(self):
"""
Create a peer grading module from a test system.
"""
self.setup_modulestore(self.course_id.course)
@property
def field_data(self):
"""
Setup the proper field data for a peer grading module.
"""
return DictFieldData({
'data': '<peergrading/>',
'location': self.problem_location,
'use_for_single_location': True,
'link_to_location': self.coe_location.to_deprecated_string(),
'graded': True,
})
@property
def scope_ids(self):
"""
Return the proper scope ids for the peer grading module.
"""
return ScopeIds(None, None, self.problem_location, self.problem_location)
def _create_peer_grading_descriptor_with_linked_problem(self):
# Initialize the peer grading module.
system = get_test_descriptor_system()
return system.construct_xblock_from_class(
PeerGradingDescriptor,
field_data=self.field_data,
scope_ids=self.scope_ids
)
def _create_peer_grading_with_linked_problem(self, location, valid_linked_descriptor=True):
"""
Create a peer grading problem with a linked location.
"""
# Mock the linked problem descriptor.
linked_descriptor = Mock()
linked_descriptor.location = location
# Mock the peer grading descriptor.
pg_descriptor = Mock()
pg_descriptor.location = self.problem_location
if valid_linked_descriptor:
pg_descriptor.get_required_module_descriptors = lambda: [linked_descriptor, ]
else:
pg_descriptor.get_required_module_descriptors = lambda: []
test_system = self.get_module_system(pg_descriptor)
# Initialize the peer grading module.
peer_grading = PeerGradingModule(
pg_descriptor,
test_system,
self.field_data,
self.scope_ids,
)
return peer_grading
def _get_descriptor_with_invalid_link(self, exception_to_raise):
"""
Ensure that a peer grading descriptor with an invalid link will return an empty list.
"""
# Create a descriptor, and make loading an item throw an error.
descriptor = self._create_peer_grading_descriptor_with_linked_problem()
descriptor.system.load_item = Mock(side_effect=exception_to_raise)
# Ensure that modules is a list of length 0.
modules = descriptor.get_required_module_descriptors()
self.assertIsInstance(modules, list)
self.assertEqual(len(modules), 0)
def test_descriptor_with_nopath(self):
"""
Test to see if a descriptor with a NoPathToItem error when trying to get
its linked module behaves properly.
"""
self._get_descriptor_with_invalid_link(NoPathToItem)
def test_descriptor_with_item_not_found(self):
"""
Test to see if a descriptor with an ItemNotFound error when trying to get
its linked module behaves properly.
"""
self._get_descriptor_with_invalid_link(ItemNotFoundError)
def test_invalid_link(self):
"""
Ensure that a peer grading problem with no linked locations stays in panel mode.
"""
# Setup the peer grading module with no linked locations.
peer_grading = self._create_peer_grading_with_linked_problem(self.coe_location, valid_linked_descriptor=False)
self.assertFalse(peer_grading.use_for_single_location_local)
self.assertTrue(peer_grading.use_for_single_location)
def test_linked_problem(self):
"""
Ensure that a peer grading problem with a linked location loads properly.
"""
# Setup the peer grading module with the proper linked location.
peer_grading = self._create_peer_grading_with_linked_problem(self.coe_location)
# Ensure that it is properly setup.
self.assertTrue(peer_grading.use_for_single_location)
def test_linked_ajax(self):
"""
Ensure that a peer grading problem with a linked location responds to ajax calls.
"""
# Setup the peer grading module with the proper linked location.
peer_grading = self._create_peer_grading_with_linked_problem(self.coe_location)
# If we specify a location, it will render the problem for that location.
data = peer_grading.handle_ajax('problem', {'location': self.coe_location.to_deprecated_string()})
self.assertTrue(json.loads(data)['success'])
# If we don't specify a location, it should use the linked location.
data = peer_grading.handle_ajax('problem', {})
self.assertTrue(json.loads(data)['success'])
def test_linked_score(self):
"""
Ensure that a peer grading problem with a linked location is properly scored.
"""
# Setup the peer grading module with the proper linked location.
peer_grading = self._create_peer_grading_with_linked_problem(self.coe_location)
score_dict = peer_grading.get_score()
self.assertEqual(score_dict['score'], 1)
self.assertEqual(score_dict['total'], 1)
def test_get_next_submission(self):
"""
Ensure that a peer grading problem with a linked location can get a submission to score.
"""
# Setup the peer grading module with the proper linked location.
peer_grading = self._create_peer_grading_with_linked_problem(self.coe_location)
data = peer_grading.handle_ajax('get_next_submission', {'location': self.coe_location})
self.assertEqual(json.loads(data)['submission_id'], 1)
| agpl-3.0 |
chrisnatali/networkx | networkx/drawing/tests/test_layout.py | 43 | 1870 | """Unit tests for layout functions."""
import sys
from nose import SkipTest
from nose.tools import assert_equal
import networkx as nx
class TestLayout(object):
numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
@classmethod
def setupClass(cls):
global numpy
try:
import numpy
except ImportError:
raise SkipTest('numpy not available.')
def setUp(self):
self.Gi=nx.grid_2d_graph(5,5)
self.Gs=nx.Graph()
self.Gs.add_path('abcdef')
self.bigG=nx.grid_2d_graph(25,25) #bigger than 500 nodes for sparse
def test_smoke_int(self):
G=self.Gi
vpos=nx.random_layout(G)
vpos=nx.circular_layout(G)
vpos=nx.spring_layout(G)
vpos=nx.fruchterman_reingold_layout(G)
vpos=nx.spectral_layout(G)
vpos=nx.spectral_layout(self.bigG)
vpos=nx.shell_layout(G)
def test_smoke_string(self):
G=self.Gs
vpos=nx.random_layout(G)
vpos=nx.circular_layout(G)
vpos=nx.spring_layout(G)
vpos=nx.fruchterman_reingold_layout(G)
vpos=nx.spectral_layout(G)
vpos=nx.shell_layout(G)
def test_adjacency_interface_numpy(self):
A=nx.to_numpy_matrix(self.Gs)
pos=nx.drawing.layout._fruchterman_reingold(A)
pos=nx.drawing.layout._fruchterman_reingold(A,dim=3)
assert_equal(pos.shape,(6,3))
def test_adjacency_interface_scipy(self):
try:
import scipy
except ImportError:
raise SkipTest('scipy not available.')
A=nx.to_scipy_sparse_matrix(self.Gs,dtype='d')
pos=nx.drawing.layout._sparse_fruchterman_reingold(A)
pos=nx.drawing.layout._sparse_spectral(A)
pos=nx.drawing.layout._sparse_fruchterman_reingold(A,dim=3)
assert_equal(pos.shape,(6,3))
| bsd-3-clause |
AnishShah/tensorflow | tensorflow/compiler/tests/adadelta_test.py | 16 | 5553 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Adadelta Optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adadelta
class AdadeltaOptimizerTest(xla_test.XLATestCase):
def testBasic(self):
num_updates = 4 # number of ADADELTA steps to perform
for dtype in self.float_types:
with self.cached_session(), self.test_scope():
for grad in [0.2, 0.1, 0.01]:
for lr in [1.0, 0.5, 0.1]:
var0_init = [1.0, 2.0]
var1_init = [3.0, 4.0]
var0 = resource_variable_ops.ResourceVariable(
var0_init, dtype=dtype)
var1 = resource_variable_ops.ResourceVariable(
var1_init, dtype=dtype)
grads = constant_op.constant([grad, grad], dtype=dtype)
accum = 0.0
accum_update = 0.0
# ADADELTA gradient optimizer
rho = 0.95
epsilon = 1e-8
adadelta_opt = adadelta.AdadeltaOptimizer(
learning_rate=lr, rho=rho, epsilon=epsilon)
adadelta_update = adadelta_opt.apply_gradients(
zip([grads, grads], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
opt_vars = adadelta_opt.variables()
self.assertStartsWith(opt_vars[0].name, var0._shared_name)
self.assertStartsWith(opt_vars[1].name, var0._shared_name)
self.assertStartsWith(opt_vars[2].name, var1._shared_name)
self.assertStartsWith(opt_vars[3].name, var1._shared_name)
self.assertEqual(4, len(opt_vars))
# Assign slots
slot = [None] * 2
slot_update = [None] * 2
self.assertEqual(["accum", "accum_update"],
adadelta_opt.get_slot_names())
slot[0] = adadelta_opt.get_slot(var0, "accum")
self.assertEquals(slot[0].get_shape(), var0.get_shape())
self.assertFalse(slot[0] in variables.trainable_variables())
slot_update[0] = adadelta_opt.get_slot(var0, "accum_update")
self.assertEquals(slot_update[0].get_shape(), var0.get_shape())
self.assertFalse(slot_update[0] in variables.trainable_variables())
slot[1] = adadelta_opt.get_slot(var1, "accum")
self.assertEquals(slot[1].get_shape(), var1.get_shape())
self.assertFalse(slot[1] in variables.trainable_variables())
slot_update[1] = adadelta_opt.get_slot(var1, "accum_update")
self.assertEquals(slot_update[1].get_shape(), var1.get_shape())
self.assertFalse(slot_update[1] in variables.trainable_variables())
# Fetch params to validate initial values
self.assertAllClose(var0_init, self.evaluate(var0))
self.assertAllClose(var1_init, self.evaluate(var1))
update = [None] * num_updates
tot_update = 0
for step in range(num_updates):
# Run adadelta update for comparison
self.evaluate(adadelta_update)
# Perform initial update without previous accum values
accum = accum * rho + (grad**2) * (1 - rho)
update[step] = (
np.sqrt(accum_update + epsilon) *
(1. / np.sqrt(accum + epsilon)) * grad)
accum_update = (
accum_update * rho + (update[step]**2) * (1.0 - rho))
tot_update += update[step] * lr
# Check that the accumulators have been updated
for slot_idx in range(2):
self.assertAllCloseAccordingToType(
np.array([accum, accum], dtype=dtype),
self.evaluate(slot[slot_idx]),
rtol=1e-5)
self.assertAllCloseAccordingToType(
np.array([accum_update, accum_update], dtype=dtype),
self.evaluate(slot_update[slot_idx]),
rtol=1e-5)
# Check that the parameters have been updated
self.assertAllCloseAccordingToType(
np.array(
[var0_init[0] - tot_update, var0_init[1] - tot_update],
dtype=dtype),
self.evaluate(var0),
rtol=1e-5)
self.assertAllCloseAccordingToType(
np.array(
[var1_init[0] - tot_update, var1_init[1] - tot_update],
dtype=dtype),
self.evaluate(var1),
rtol=1e-5)
if __name__ == "__main__":
test.main()
| apache-2.0 |
LoHChina/nova | nova/api/openstack/compute/contrib/cells.py | 31 | 13462 | # Copyright 2011-2012 OpenStack Foundation
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The cells extension."""
from oslo_config import cfg
import oslo_messaging as messaging
from oslo_utils import strutils
from oslo_utils import timeutils
import six
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.cells import rpcapi as cells_rpcapi
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova import rpc
CONF = cfg.CONF
CONF.import_opt('name', 'nova.cells.opts', group='cells')
CONF.import_opt('capabilities', 'nova.cells.opts', group='cells')
authorize = extensions.extension_authorizer('compute', 'cells')
def _filter_keys(item, keys):
"""Filters all model attributes except for keys
item is a dict
"""
return {k: v for k, v in six.iteritems(item) if k in keys}
def _fixup_cell_info(cell_info, keys):
"""If the transport_url is present in the cell, derive username,
rpc_host, and rpc_port from it.
"""
if 'transport_url' not in cell_info:
return
# Disassemble the transport URL
transport_url = cell_info.pop('transport_url')
try:
transport_url = rpc.get_transport_url(transport_url)
except messaging.InvalidTransportURL:
# Just go with None's
for key in keys:
cell_info.setdefault(key, None)
return
if not transport_url.hosts:
return
transport_host = transport_url.hosts[0]
transport_field_map = {'rpc_host': 'hostname', 'rpc_port': 'port'}
for key in keys:
if key in cell_info:
continue
transport_field = transport_field_map.get(key, key)
cell_info[key] = getattr(transport_host, transport_field)
def _scrub_cell(cell, detail=False):
keys = ['name', 'username', 'rpc_host', 'rpc_port']
if detail:
keys.append('capabilities')
cell_info = _filter_keys(cell, keys + ['transport_url'])
_fixup_cell_info(cell_info, keys)
cell_info['type'] = 'parent' if cell['is_parent'] else 'child'
return cell_info
class Controller(object):
"""Controller for Cell resources."""
def __init__(self, ext_mgr):
self.cells_rpcapi = cells_rpcapi.CellsAPI()
self.ext_mgr = ext_mgr
def _get_cells(self, ctxt, req, detail=False):
"""Return all cells."""
# Ask the CellsManager for the most recent data
items = self.cells_rpcapi.get_cell_info_for_neighbors(ctxt)
items = common.limited(items, req)
items = [_scrub_cell(item, detail=detail) for item in items]
return dict(cells=items)
@common.check_cells_enabled
def index(self, req):
"""Return all cells in brief."""
ctxt = req.environ['nova.context']
authorize(ctxt)
return self._get_cells(ctxt, req)
@common.check_cells_enabled
def detail(self, req):
"""Return all cells in detail."""
ctxt = req.environ['nova.context']
authorize(ctxt)
return self._get_cells(ctxt, req, detail=True)
@common.check_cells_enabled
def info(self, req):
"""Return name and capabilities for this cell."""
context = req.environ['nova.context']
authorize(context)
cell_capabs = {}
my_caps = CONF.cells.capabilities
for cap in my_caps:
key, value = cap.split('=')
cell_capabs[key] = value
cell = {'name': CONF.cells.name,
'type': 'self',
'rpc_host': None,
'rpc_port': 0,
'username': None,
'capabilities': cell_capabs}
return dict(cell=cell)
@common.check_cells_enabled
def capacities(self, req, id=None):
"""Return capacities for a given cell or all cells."""
# TODO(kaushikc): return capacities as a part of cell info and
# cells detail calls in v3, along with capabilities
if not self.ext_mgr.is_loaded('os-cell-capacities'):
raise exc.HTTPNotFound()
context = req.environ['nova.context']
authorize(context)
try:
capacities = self.cells_rpcapi.get_capacities(context,
cell_name=id)
except exception.CellNotFound:
msg = (_("Cell %(id)s not found.") % {'id': id})
raise exc.HTTPNotFound(explanation=msg)
return dict(cell={"capacities": capacities})
@common.check_cells_enabled
def show(self, req, id):
"""Return data about the given cell name. 'id' is a cell name."""
context = req.environ['nova.context']
authorize(context)
try:
cell = self.cells_rpcapi.cell_get(context, id)
except exception.CellNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return dict(cell=_scrub_cell(cell))
@common.check_cells_enabled
def delete(self, req, id):
"""Delete a child or parent cell entry. 'id' is a cell name."""
context = req.environ['nova.context']
authorize(context)
authorize(context, action="delete")
# NOTE(eliqiao): back-compatible with db layer hard-code admin
# permission checks.
nova_context.require_admin_context(context)
try:
num_deleted = self.cells_rpcapi.cell_delete(context, id)
except exception.CellsUpdateUnsupported as e:
raise exc.HTTPForbidden(explanation=e.format_message())
if num_deleted == 0:
raise exc.HTTPNotFound()
return {}
def _validate_cell_name(self, cell_name):
"""Validate cell name is not empty and doesn't contain '!' or '.'."""
if not cell_name:
msg = _("Cell name cannot be empty")
raise exc.HTTPBadRequest(explanation=msg)
if '!' in cell_name or '.' in cell_name:
msg = _("Cell name cannot contain '!' or '.'")
raise exc.HTTPBadRequest(explanation=msg)
def _validate_cell_type(self, cell_type):
"""Validate cell_type is 'parent' or 'child'."""
if cell_type not in ['parent', 'child']:
msg = _("Cell type must be 'parent' or 'child'")
raise exc.HTTPBadRequest(explanation=msg)
def _normalize_cell(self, cell, existing=None):
"""Normalize input cell data. Normalizations include:
* Converting cell['type'] to is_parent boolean.
* Merging existing transport URL with transport information.
"""
# Start with the cell type conversion
if 'type' in cell:
self._validate_cell_type(cell['type'])
cell['is_parent'] = cell['type'] == 'parent'
del cell['type']
# Avoid cell type being overwritten to 'child'
elif existing:
cell['is_parent'] = existing['is_parent']
else:
cell['is_parent'] = False
# Now we disassemble the existing transport URL...
transport_url = existing.get('transport_url') if existing else None
transport_url = rpc.get_transport_url(transport_url)
if 'rpc_virtual_host' in cell:
transport_url.virtual_host = cell.pop('rpc_virtual_host')
if not transport_url.hosts:
transport_url.hosts.append(messaging.TransportHost())
transport_host = transport_url.hosts[0]
if cell.get('rpc_port') is not None:
try:
cell['rpc_port'] = int(cell['rpc_port'])
except ValueError:
raise exc.HTTPBadRequest(
explanation=_('rpc_port must be integer'))
# Copy over the input fields
transport_field_map = {
'username': 'username',
'password': 'password',
'hostname': 'rpc_host',
'port': 'rpc_port',
}
for key, input_field in transport_field_map.items():
# Only override the value if we're given an override
if input_field in cell:
setattr(transport_host, key, cell.pop(input_field))
# Now set the transport URL
cell['transport_url'] = str(transport_url)
@common.check_cells_enabled
def create(self, req, body):
"""Create a child cell entry."""
context = req.environ['nova.context']
authorize(context)
authorize(context, action="create")
# NOTE(eliqiao): back-compatible with db layer hard-code admin
# permission checks.
nova_context.require_admin_context(context)
if 'cell' not in body:
msg = _("No cell information in request")
raise exc.HTTPBadRequest(explanation=msg)
cell = body['cell']
if 'name' not in cell:
msg = _("No cell name in request")
raise exc.HTTPBadRequest(explanation=msg)
self._validate_cell_name(cell['name'])
self._normalize_cell(cell)
try:
cell = self.cells_rpcapi.cell_create(context, cell)
except exception.CellsUpdateUnsupported as e:
raise exc.HTTPForbidden(explanation=e.format_message())
return dict(cell=_scrub_cell(cell))
@common.check_cells_enabled
def update(self, req, id, body):
"""Update a child cell entry. 'id' is the cell name to update."""
context = req.environ['nova.context']
authorize(context)
authorize(context, action="update")
# NOTE(eliqiao): back-compatible with db layer hard-code admin
# permission checks.
nova_context.require_admin_context(context)
if 'cell' not in body:
msg = _("No cell information in request")
raise exc.HTTPBadRequest(explanation=msg)
cell = body['cell']
cell.pop('id', None)
if 'name' in cell:
self._validate_cell_name(cell['name'])
try:
# NOTE(Vek): There is a race condition here if multiple
# callers are trying to update the cell
# information simultaneously. Since this
# operation is administrative in nature, and
# will be going away in the future, I don't see
# it as much of a problem...
existing = self.cells_rpcapi.cell_get(context, id)
except exception.CellNotFound:
raise exc.HTTPNotFound()
self._normalize_cell(cell, existing)
try:
cell = self.cells_rpcapi.cell_update(context, id, cell)
except exception.CellNotFound:
raise exc.HTTPNotFound()
except exception.CellsUpdateUnsupported as e:
raise exc.HTTPForbidden(explanation=e.format_message())
return dict(cell=_scrub_cell(cell))
@common.check_cells_enabled
def sync_instances(self, req, body):
"""Tell all cells to sync instance info."""
context = req.environ['nova.context']
authorize(context)
authorize(context, action="sync_instances")
project_id = body.pop('project_id', None)
deleted = body.pop('deleted', False)
updated_since = body.pop('updated_since', None)
if body:
msg = _("Only 'updated_since', 'project_id' and 'deleted' are "
"understood.")
raise exc.HTTPBadRequest(explanation=msg)
if isinstance(deleted, six.string_types):
try:
deleted = strutils.bool_from_string(deleted, strict=True)
except ValueError as err:
raise exc.HTTPBadRequest(explanation=six.text_type(err))
if updated_since:
try:
timeutils.parse_isotime(updated_since)
except ValueError:
msg = _('Invalid changes-since value')
raise exc.HTTPBadRequest(explanation=msg)
self.cells_rpcapi.sync_instances(context, project_id=project_id,
updated_since=updated_since, deleted=deleted)
class Cells(extensions.ExtensionDescriptor):
"""Enables cells-related functionality such as adding neighbor cells,
listing neighbor cells, and getting the capabilities of the local cell.
"""
name = "Cells"
alias = "os-cells"
namespace = "http://docs.openstack.org/compute/ext/cells/api/v1.1"
updated = "2013-05-14T00:00:00Z"
def get_resources(self):
coll_actions = {
'detail': 'GET',
'info': 'GET',
'sync_instances': 'POST',
'capacities': 'GET',
}
memb_actions = {
'capacities': 'GET',
}
res = extensions.ResourceExtension('os-cells',
Controller(self.ext_mgr), collection_actions=coll_actions,
member_actions=memb_actions)
return [res]
| apache-2.0 |
syhpoon/xyzcmd | libxyz/ui/size.py | 1 | 1143 | #-*- coding: utf8 -*
#
# Max E. Kuznecov ~syhpoon <[email protected]> 2008
#
# This file is part of XYZCommander.
# XYZCommander is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# XYZCommander is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
# You should have received a copy of the GNU Lesser Public License
# along with XYZCommander. If not, see <http://www.gnu.org/licenses/>.
class Size(object):
"""
Simple widget size wrapper
"""
def __init__(self, rows, cols):
self.rows = rows
self.cols = cols
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def __str__(self):
return "<Size: %d, %d>" % (self.rows, self.cols)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def __repr__(self):
return self.__str__()
| gpl-3.0 |
junbochen/pylearn2 | pylearn2/datasets/tests/test_hdf5.py | 47 | 7240 | """
HDF5 dataset tests.
"""
import numpy as np
import os
import tempfile
from pylearn2.config import yaml_parse
from pylearn2.testing.datasets import (
random_dense_design_matrix,
random_one_hot_dense_design_matrix,
random_one_hot_topological_dense_design_matrix)
from pylearn2.testing.skip import skip_if_no_h5py
def test_hdf5_design_matrix():
"""Train using an HDF5 dataset."""
skip_if_no_h5py()
import h5py
# save random data to HDF5
handle, filename = tempfile.mkstemp()
dataset = random_one_hot_dense_design_matrix(np.random.RandomState(1),
num_examples=10, dim=5,
num_classes=3)
with h5py.File(filename, 'w') as f:
f.create_dataset('X', data=dataset.get_design_matrix())
f.create_dataset('y', data=dataset.get_targets())
# instantiate Train object
trainer = yaml_parse.load(design_matrix_yaml % {'filename': filename})
trainer.main_loop()
# cleanup
os.remove(filename)
def test_hdf5_topo_view():
"""Train using an HDF5 dataset with topo_view instead of X."""
skip_if_no_h5py()
import h5py
# save random data to HDF5
handle, filename = tempfile.mkstemp()
dataset = random_one_hot_topological_dense_design_matrix(
np.random.RandomState(1), num_examples=10, shape=(2, 2), channels=3,
axes=('b', 0, 1, 'c'), num_classes=3)
with h5py.File(filename, 'w') as f:
f.create_dataset('topo_view', data=dataset.get_topological_view())
f.create_dataset('y', data=dataset.get_targets())
# instantiate Train object
trainer = yaml_parse.load(topo_view_yaml % {'filename': filename})
trainer.main_loop()
# cleanup
os.remove(filename)
def test_hdf5_convert_to_one_hot():
"""Train using an HDF5 dataset with one-hot target conversion."""
skip_if_no_h5py()
import h5py
# save random data to HDF5
handle, filename = tempfile.mkstemp()
dataset = random_dense_design_matrix(np.random.RandomState(1),
num_examples=10, dim=5, num_classes=3)
with h5py.File(filename, 'w') as f:
f.create_dataset('X', data=dataset.get_design_matrix())
f.create_dataset('y', data=dataset.get_targets())
# instantiate Train object
trainer = yaml_parse.load(convert_to_one_hot_yaml % {'filename': filename})
trainer.main_loop()
# cleanup
os.remove(filename)
def test_hdf5_load_all():
"""Train using an HDF5 dataset with all data loaded into memory."""
skip_if_no_h5py()
import h5py
# save random data to HDF5
handle, filename = tempfile.mkstemp()
dataset = random_one_hot_dense_design_matrix(np.random.RandomState(1),
num_examples=10, dim=5,
num_classes=3)
with h5py.File(filename, 'w') as f:
f.create_dataset('X', data=dataset.get_design_matrix())
f.create_dataset('y', data=dataset.get_targets())
# instantiate Train object
trainer = yaml_parse.load(load_all_yaml % {'filename': filename})
trainer.main_loop()
# cleanup
os.remove(filename)
design_matrix_yaml = """
!obj:pylearn2.train.Train {
dataset: &train !obj:pylearn2.datasets.hdf5.HDF5Dataset {
filename: %(filename)s,
X: X,
y: y,
},
model: !obj:pylearn2.models.mlp.MLP {
layers: [
!obj:pylearn2.models.mlp.Sigmoid {
layer_name: h0,
dim: 10,
irange: .005,
},
!obj:pylearn2.models.mlp.Softmax {
layer_name: y,
n_classes: 3,
irange: 0.
}
],
nvis: 5,
},
algorithm: !obj:pylearn2.training_algorithms.sgd.SGD {
batch_size: 5,
learning_rate: .1,
monitoring_dataset:
{
'train' : *train,
},
termination_criterion:
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 1,
},
},
}
"""
topo_view_yaml = """
!obj:pylearn2.train.Train {
dataset: &train !obj:pylearn2.datasets.hdf5.HDF5Dataset {
filename: %(filename)s,
topo_view: topo_view,
y: y,
},
model: !obj:pylearn2.models.mlp.MLP {
layers: [
!obj:pylearn2.models.mlp.Sigmoid {
layer_name: h0,
dim: 10,
irange: .005,
},
!obj:pylearn2.models.mlp.Softmax {
layer_name: y,
n_classes: 3,
irange: 0.
}
],
nvis: 12,
},
algorithm: !obj:pylearn2.training_algorithms.sgd.SGD {
batch_size: 5,
learning_rate: .1,
monitoring_dataset:
{
'train' : *train,
},
termination_criterion:
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 1,
},
},
}
"""
convert_to_one_hot_yaml = """
!obj:pylearn2.train.Train {
dataset: &train !obj:pylearn2.datasets.hdf5.HDF5Dataset {
filename: %(filename)s,
X: X,
y: y,
y_labels: 3
},
model: !obj:pylearn2.models.mlp.MLP {
layers: [
!obj:pylearn2.models.mlp.Sigmoid {
layer_name: h0,
dim: 10,
irange: .005,
},
!obj:pylearn2.models.mlp.Softmax {
layer_name: y,
n_classes: 3,
irange: 0.
}
],
nvis: 5,
},
algorithm: !obj:pylearn2.training_algorithms.sgd.SGD {
batch_size: 5,
learning_rate: .1,
monitoring_dataset:
{
'train' : *train,
},
termination_criterion:
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 1,
},
},
}
"""
load_all_yaml = """
!obj:pylearn2.train.Train {
dataset: &train !obj:pylearn2.datasets.hdf5.HDF5Dataset {
filename: %(filename)s,
X: X,
y: y,
load_all: 1,
},
model: !obj:pylearn2.models.mlp.MLP {
layers: [
!obj:pylearn2.models.mlp.Sigmoid {
layer_name: h0,
dim: 10,
irange: .005,
},
!obj:pylearn2.models.mlp.Softmax {
layer_name: y,
n_classes: 3,
irange: 0.
}
],
nvis: 5,
},
algorithm: !obj:pylearn2.training_algorithms.sgd.SGD {
batch_size: 5,
learning_rate: .1,
monitoring_dataset:
{
'train' : *train,
},
termination_criterion:
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 1,
},
},
}
"""
| bsd-3-clause |
yoer/hue | desktop/core/ext-py/Pygments-1.3.1/scripts/reindent.py | 194 | 9926 | #! /usr/bin/env python
# Released to the public domain, by Tim Peters, 03 October 2000.
# -B option added by Georg Brandl, 2006.
"""reindent [-d][-r][-v] [ path ... ]
-d (--dryrun) Dry run. Analyze, but don't make any changes to files.
-r (--recurse) Recurse. Search for all .py files in subdirectories too.
-B (--no-backup) Don't write .bak backup files.
-v (--verbose) Verbose. Print informative msgs; else only names of changed files.
-h (--help) Help. Print this usage information and exit.
Change Python (.py) files to use 4-space indents and no hard tab characters.
Also trim excess spaces and tabs from ends of lines, and remove empty lines
at the end of files. Also ensure the last line ends with a newline.
If no paths are given on the command line, reindent operates as a filter,
reading a single source file from standard input and writing the transformed
source to standard output. In this case, the -d, -r and -v flags are
ignored.
You can pass one or more file and/or directory paths. When a directory
path, all .py files within the directory will be examined, and, if the -r
option is given, likewise recursively for subdirectories.
If output is not to standard output, reindent overwrites files in place,
renaming the originals with a .bak extension. If it finds nothing to
change, the file is left alone. If reindent does change a file, the changed
file is a fixed-point for future runs (i.e., running reindent on the
resulting .py file won't change it again).
The hard part of reindenting is figuring out what to do with comment
lines. So long as the input files get a clean bill of health from
tabnanny.py, reindent should do a good job.
"""
__version__ = "1"
import tokenize
import os
import sys
verbose = 0
recurse = 0
dryrun = 0
no_backup = 0
def usage(msg=None):
if msg is not None:
print >> sys.stderr, msg
print >> sys.stderr, __doc__
def errprint(*args):
sep = ""
for arg in args:
sys.stderr.write(sep + str(arg))
sep = " "
sys.stderr.write("\n")
def main():
import getopt
global verbose, recurse, dryrun, no_backup
try:
opts, args = getopt.getopt(sys.argv[1:], "drvhB",
["dryrun", "recurse", "verbose", "help",
"no-backup"])
except getopt.error, msg:
usage(msg)
return
for o, a in opts:
if o in ('-d', '--dryrun'):
dryrun += 1
elif o in ('-r', '--recurse'):
recurse += 1
elif o in ('-v', '--verbose'):
verbose += 1
elif o in ('-B', '--no-backup'):
no_backup += 1
elif o in ('-h', '--help'):
usage()
return
if not args:
r = Reindenter(sys.stdin)
r.run()
r.write(sys.stdout)
return
for arg in args:
check(arg)
def check(file):
if os.path.isdir(file) and not os.path.islink(file):
if verbose:
print "listing directory", file
names = os.listdir(file)
for name in names:
fullname = os.path.join(file, name)
if ((recurse and os.path.isdir(fullname) and
not os.path.islink(fullname))
or name.lower().endswith(".py")):
check(fullname)
return
if verbose:
print "checking", file, "...",
try:
f = open(file)
except IOError, msg:
errprint("%s: I/O Error: %s" % (file, str(msg)))
return
r = Reindenter(f)
f.close()
if r.run():
if verbose:
print "changed."
if dryrun:
print "But this is a dry run, so leaving it alone."
else:
print "reindented", file, (dryrun and "(dry run => not really)" or "")
if not dryrun:
if not no_backup:
bak = file + ".bak"
if os.path.exists(bak):
os.remove(bak)
os.rename(file, bak)
if verbose:
print "renamed", file, "to", bak
f = open(file, "w")
r.write(f)
f.close()
if verbose:
print "wrote new", file
else:
if verbose:
print "unchanged."
class Reindenter:
def __init__(self, f):
self.find_stmt = 1 # next token begins a fresh stmt?
self.level = 0 # current indent level
# Raw file lines.
self.raw = f.readlines()
# File lines, rstripped & tab-expanded. Dummy at start is so
# that we can use tokenize's 1-based line numbering easily.
# Note that a line is all-blank iff it's "\n".
self.lines = [line.rstrip('\n \t').expandtabs() + "\n"
for line in self.raw]
self.lines.insert(0, None)
self.index = 1 # index into self.lines of next line
# List of (lineno, indentlevel) pairs, one for each stmt and
# comment line. indentlevel is -1 for comment lines, as a
# signal that tokenize doesn't know what to do about them;
# indeed, they're our headache!
self.stats = []
def run(self):
tokenize.tokenize(self.getline, self.tokeneater)
# Remove trailing empty lines.
lines = self.lines
while lines and lines[-1] == "\n":
lines.pop()
# Sentinel.
stats = self.stats
stats.append((len(lines), 0))
# Map count of leading spaces to # we want.
have2want = {}
# Program after transformation.
after = self.after = []
# Copy over initial empty lines -- there's nothing to do until
# we see a line with *something* on it.
i = stats[0][0]
after.extend(lines[1:i])
for i in range(len(stats)-1):
thisstmt, thislevel = stats[i]
nextstmt = stats[i+1][0]
have = getlspace(lines[thisstmt])
want = thislevel * 4
if want < 0:
# A comment line.
if have:
# An indented comment line. If we saw the same
# indentation before, reuse what it most recently
# mapped to.
want = have2want.get(have, -1)
if want < 0:
# Then it probably belongs to the next real stmt.
for j in xrange(i+1, len(stats)-1):
jline, jlevel = stats[j]
if jlevel >= 0:
if have == getlspace(lines[jline]):
want = jlevel * 4
break
if want < 0: # Maybe it's a hanging
# comment like this one,
# in which case we should shift it like its base
# line got shifted.
for j in xrange(i-1, -1, -1):
jline, jlevel = stats[j]
if jlevel >= 0:
want = have + getlspace(after[jline-1]) - \
getlspace(lines[jline])
break
if want < 0:
# Still no luck -- leave it alone.
want = have
else:
want = 0
assert want >= 0
have2want[have] = want
diff = want - have
if diff == 0 or have == 0:
after.extend(lines[thisstmt:nextstmt])
else:
for line in lines[thisstmt:nextstmt]:
if diff > 0:
if line == "\n":
after.append(line)
else:
after.append(" " * diff + line)
else:
remove = min(getlspace(line), -diff)
after.append(line[remove:])
return self.raw != self.after
def write(self, f):
f.writelines(self.after)
# Line-getter for tokenize.
def getline(self):
if self.index >= len(self.lines):
line = ""
else:
line = self.lines[self.index]
self.index += 1
return line
# Line-eater for tokenize.
def tokeneater(self, type, token, (sline, scol), end, line,
INDENT=tokenize.INDENT,
DEDENT=tokenize.DEDENT,
NEWLINE=tokenize.NEWLINE,
COMMENT=tokenize.COMMENT,
NL=tokenize.NL):
if type == NEWLINE:
# A program statement, or ENDMARKER, will eventually follow,
# after some (possibly empty) run of tokens of the form
# (NL | COMMENT)* (INDENT | DEDENT+)?
self.find_stmt = 1
elif type == INDENT:
self.find_stmt = 1
self.level += 1
elif type == DEDENT:
self.find_stmt = 1
self.level -= 1
elif type == COMMENT:
if self.find_stmt:
self.stats.append((sline, -1))
# but we're still looking for a new stmt, so leave
# find_stmt alone
elif type == NL:
pass
elif self.find_stmt:
# This is the first "real token" following a NEWLINE, so it
# must be the first token of the next program statement, or an
# ENDMARKER.
self.find_stmt = 0
if line: # not endmarker
self.stats.append((sline, self.level))
# Count number of leading blanks.
def getlspace(line):
i, n = 0, len(line)
while i < n and line[i] == " ":
i += 1
return i
if __name__ == '__main__':
main()
| apache-2.0 |
tomzhang/googletest | test/gtest_filter_unittest.py | 2826 | 21261 | #!/usr/bin/env python
#
# Copyright 2005 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test test filters.
A user can specify which test(s) in a Google Test program to run via either
the GTEST_FILTER environment variable or the --gtest_filter flag.
This script tests such functionality by invoking
gtest_filter_unittest_ (a program written with Google Test) with different
environments and command line flags.
Note that test sharding may also influence which tests are filtered. Therefore,
we test that here also.
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import re
import sets
import sys
import gtest_test_utils
# Constants.
# Checks if this platform can pass empty environment variables to child
# processes. We set an env variable to an empty string and invoke a python
# script in a subprocess to print whether the variable is STILL in
# os.environ. We then use 'eval' to parse the child's output so that an
# exception is thrown if the input is anything other than 'True' nor 'False'.
os.environ['EMPTY_VAR'] = ''
child = gtest_test_utils.Subprocess(
[sys.executable, '-c', 'import os; print \'EMPTY_VAR\' in os.environ'])
CAN_PASS_EMPTY_ENV = eval(child.output)
# Check if this platform can unset environment variables in child processes.
# We set an env variable to a non-empty string, unset it, and invoke
# a python script in a subprocess to print whether the variable
# is NO LONGER in os.environ.
# We use 'eval' to parse the child's output so that an exception
# is thrown if the input is neither 'True' nor 'False'.
os.environ['UNSET_VAR'] = 'X'
del os.environ['UNSET_VAR']
child = gtest_test_utils.Subprocess(
[sys.executable, '-c', 'import os; print \'UNSET_VAR\' not in os.environ'])
CAN_UNSET_ENV = eval(child.output)
# Checks if we should test with an empty filter. This doesn't
# make sense on platforms that cannot pass empty env variables (Win32)
# and on platforms that cannot unset variables (since we cannot tell
# the difference between "" and NULL -- Borland and Solaris < 5.10)
CAN_TEST_EMPTY_FILTER = (CAN_PASS_EMPTY_ENV and CAN_UNSET_ENV)
# The environment variable for specifying the test filters.
FILTER_ENV_VAR = 'GTEST_FILTER'
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
SHARD_STATUS_FILE_ENV_VAR = 'GTEST_SHARD_STATUS_FILE'
# The command line flag for specifying the test filters.
FILTER_FLAG = 'gtest_filter'
# The command line flag for including disabled tests.
ALSO_RUN_DISABED_TESTS_FLAG = 'gtest_also_run_disabled_tests'
# Command to run the gtest_filter_unittest_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_filter_unittest_')
# Regex for determining whether parameterized tests are enabled in the binary.
PARAM_TEST_REGEX = re.compile(r'/ParamTest')
# Regex for parsing test case names from Google Test's output.
TEST_CASE_REGEX = re.compile(r'^\[\-+\] \d+ tests? from (\w+(/\w+)?)')
# Regex for parsing test names from Google Test's output.
TEST_REGEX = re.compile(r'^\[\s*RUN\s*\].*\.(\w+(/\w+)?)')
# The command line flag to tell Google Test to output the list of tests it
# will run.
LIST_TESTS_FLAG = '--gtest_list_tests'
# Indicates whether Google Test supports death tests.
SUPPORTS_DEATH_TESTS = 'HasDeathTest' in gtest_test_utils.Subprocess(
[COMMAND, LIST_TESTS_FLAG]).output
# Full names of all tests in gtest_filter_unittests_.
PARAM_TESTS = [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestX/1',
'SeqQ/ParamTest.TestY/0',
'SeqQ/ParamTest.TestY/1',
]
DISABLED_TESTS = [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
]
if SUPPORTS_DEATH_TESTS:
DEATH_TESTS = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
]
else:
DEATH_TESTS = []
# All the non-disabled tests.
ACTIVE_TESTS = [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
] + DEATH_TESTS + PARAM_TESTS
param_tests_present = None
# Utilities.
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def RunAndReturnOutput(args = None):
"""Runs the test program and returns its output."""
return gtest_test_utils.Subprocess([COMMAND] + (args or []),
env=environ).output
def RunAndExtractTestList(args = None):
"""Runs the test program and returns its exit code and a list of tests run."""
p = gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ)
tests_run = []
test_case = ''
test = ''
for line in p.output.split('\n'):
match = TEST_CASE_REGEX.match(line)
if match is not None:
test_case = match.group(1)
else:
match = TEST_REGEX.match(line)
if match is not None:
test = match.group(1)
tests_run.append(test_case + '.' + test)
return (tests_run, p.exit_code)
def InvokeWithModifiedEnv(extra_env, function, *args, **kwargs):
"""Runs the given function and arguments in a modified environment."""
try:
original_env = environ.copy()
environ.update(extra_env)
return function(*args, **kwargs)
finally:
environ.clear()
environ.update(original_env)
def RunWithSharding(total_shards, shard_index, command):
"""Runs a test program shard and returns exit code and a list of tests run."""
extra_env = {SHARD_INDEX_ENV_VAR: str(shard_index),
TOTAL_SHARDS_ENV_VAR: str(total_shards)}
return InvokeWithModifiedEnv(extra_env, RunAndExtractTestList, command)
# The unit test.
class GTestFilterUnitTest(gtest_test_utils.TestCase):
"""Tests the env variable or the command line flag to filter tests."""
# Utilities.
def AssertSetEqual(self, lhs, rhs):
"""Asserts that two sets are equal."""
for elem in lhs:
self.assert_(elem in rhs, '%s in %s' % (elem, rhs))
for elem in rhs:
self.assert_(elem in lhs, '%s in %s' % (elem, lhs))
def AssertPartitionIsValid(self, set_var, list_of_sets):
"""Asserts that list_of_sets is a valid partition of set_var."""
full_partition = []
for slice_var in list_of_sets:
full_partition.extend(slice_var)
self.assertEqual(len(set_var), len(full_partition))
self.assertEqual(sets.Set(set_var), sets.Set(full_partition))
def AdjustForParameterizedTests(self, tests_to_run):
"""Adjust tests_to_run in case value parameterized tests are disabled."""
global param_tests_present
if not param_tests_present:
return list(sets.Set(tests_to_run) - sets.Set(PARAM_TESTS))
else:
return tests_to_run
def RunAndVerify(self, gtest_filter, tests_to_run):
"""Checks that the binary runs correct set of tests for a given filter."""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# First, tests using the environment variable.
# Windows removes empty variables from the environment when passing it
# to a new process. This means it is impossible to pass an empty filter
# into a process using the environment variable. However, we can still
# test the case when the variable is not supplied (i.e., gtest_filter is
# None).
# pylint: disable-msg=C6403
if CAN_TEST_EMPTY_FILTER or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
tests_run = RunAndExtractTestList()[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, tests_to_run)
# pylint: enable-msg=C6403
# Next, tests using the command line flag.
if gtest_filter is None:
args = []
else:
args = ['--%s=%s' % (FILTER_FLAG, gtest_filter)]
tests_run = RunAndExtractTestList(args)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def RunAndVerifyWithSharding(self, gtest_filter, total_shards, tests_to_run,
args=None, check_exit_0=False):
"""Checks that binary runs correct tests for the given filter and shard.
Runs all shards of gtest_filter_unittest_ with the given filter, and
verifies that the right set of tests were run. The union of tests run
on each shard should be identical to tests_to_run, without duplicates.
Args:
gtest_filter: A filter to apply to the tests.
total_shards: A total number of shards to split test run into.
tests_to_run: A set of tests expected to run.
args : Arguments to pass to the to the test binary.
check_exit_0: When set to a true value, make sure that all shards
return 0.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Windows removes empty variables from the environment when passing it
# to a new process. This means it is impossible to pass an empty filter
# into a process using the environment variable. However, we can still
# test the case when the variable is not supplied (i.e., gtest_filter is
# None).
# pylint: disable-msg=C6403
if CAN_TEST_EMPTY_FILTER or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
partition = []
for i in range(0, total_shards):
(tests_run, exit_code) = RunWithSharding(total_shards, i, args)
if check_exit_0:
self.assertEqual(0, exit_code)
partition.append(tests_run)
self.AssertPartitionIsValid(tests_to_run, partition)
SetEnvVar(FILTER_ENV_VAR, None)
# pylint: enable-msg=C6403
def RunAndVerifyAllowingDisabled(self, gtest_filter, tests_to_run):
"""Checks that the binary runs correct set of tests for the given filter.
Runs gtest_filter_unittest_ with the given filter, and enables
disabled tests. Verifies that the right set of tests were run.
Args:
gtest_filter: A filter to apply to the tests.
tests_to_run: A set of tests expected to run.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Construct the command line.
args = ['--%s' % ALSO_RUN_DISABED_TESTS_FLAG]
if gtest_filter is not None:
args.append('--%s=%s' % (FILTER_FLAG, gtest_filter))
tests_run = RunAndExtractTestList(args)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def setUp(self):
"""Sets up test case.
Determines whether value-parameterized tests are enabled in the binary and
sets the flags accordingly.
"""
global param_tests_present
if param_tests_present is None:
param_tests_present = PARAM_TEST_REGEX.search(
RunAndReturnOutput()) is not None
def testDefaultBehavior(self):
"""Tests the behavior of not specifying the filter."""
self.RunAndVerify(None, ACTIVE_TESTS)
def testDefaultBehaviorWithShards(self):
"""Tests the behavior without the filter, with sharding enabled."""
self.RunAndVerifyWithSharding(None, 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, 2, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) - 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS), ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) + 1, ACTIVE_TESTS)
def testEmptyFilter(self):
"""Tests an empty filter."""
self.RunAndVerify('', [])
self.RunAndVerifyWithSharding('', 1, [])
self.RunAndVerifyWithSharding('', 2, [])
def testBadFilter(self):
"""Tests a filter that matches nothing."""
self.RunAndVerify('BadFilter', [])
self.RunAndVerifyAllowingDisabled('BadFilter', [])
def testFullName(self):
"""Tests filtering by full name."""
self.RunAndVerify('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyAllowingDisabled('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyWithSharding('FooTest.Xyz', 5, ['FooTest.Xyz'])
def testUniversalFilters(self):
"""Tests filters that match everything."""
self.RunAndVerify('*', ACTIVE_TESTS)
self.RunAndVerify('*.*', ACTIVE_TESTS)
self.RunAndVerifyWithSharding('*.*', len(ACTIVE_TESTS) - 3, ACTIVE_TESTS)
self.RunAndVerifyAllowingDisabled('*', ACTIVE_TESTS + DISABLED_TESTS)
self.RunAndVerifyAllowingDisabled('*.*', ACTIVE_TESTS + DISABLED_TESTS)
def testFilterByTestCase(self):
"""Tests filtering by test case name."""
self.RunAndVerify('FooTest.*', ['FooTest.Abc', 'FooTest.Xyz'])
BAZ_TESTS = ['BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB']
self.RunAndVerify('BazTest.*', BAZ_TESTS)
self.RunAndVerifyAllowingDisabled('BazTest.*',
BAZ_TESTS + ['BazTest.DISABLED_TestC'])
def testFilterByTest(self):
"""Tests filtering by test name."""
self.RunAndVerify('*.TestOne', ['BarTest.TestOne', 'BazTest.TestOne'])
def testFilterDisabledTests(self):
"""Select only the disabled tests to run."""
self.RunAndVerify('DISABLED_FoobarTest.Test1', [])
self.RunAndVerifyAllowingDisabled('DISABLED_FoobarTest.Test1',
['DISABLED_FoobarTest.Test1'])
self.RunAndVerify('*DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*DISABLED_*', DISABLED_TESTS)
self.RunAndVerify('*.DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*.DISABLED_*', [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.DISABLED_Test2',
])
self.RunAndVerify('DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('DISABLED_*', [
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
])
def testWildcardInTestCaseName(self):
"""Tests using wildcard in the test case name."""
self.RunAndVerify('*a*.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS)
def testWildcardInTestName(self):
"""Tests using wildcard in the test name."""
self.RunAndVerify('*.*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testFilterWithoutDot(self):
"""Tests a filter that has no '.' in it."""
self.RunAndVerify('*z*', [
'FooTest.Xyz',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
])
def testTwoPatterns(self):
"""Tests filters that consist of two patterns."""
self.RunAndVerify('Foo*.*:*A*', [
'FooTest.Abc',
'FooTest.Xyz',
'BazTest.TestA',
])
# An empty pattern + a non-empty one
self.RunAndVerify(':*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testThreePatterns(self):
"""Tests filters that consist of three patterns."""
self.RunAndVerify('*oo*:*A*:*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
'BazTest.TestA',
])
# The 2nd pattern is empty.
self.RunAndVerify('*oo*::*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
])
# The last 2 patterns are empty.
self.RunAndVerify('*oo*::', [
'FooTest.Abc',
'FooTest.Xyz',
])
def testNegativeFilters(self):
self.RunAndVerify('*-BazTest.TestOne', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestA',
'BazTest.TestB',
] + DEATH_TESTS + PARAM_TESTS)
self.RunAndVerify('*-FooTest.Abc:BazTest.*', [
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
] + DEATH_TESTS + PARAM_TESTS)
self.RunAndVerify('BarTest.*-BarTest.TestOne', [
'BarTest.TestTwo',
'BarTest.TestThree',
])
# Tests without leading '*'.
self.RunAndVerify('-FooTest.Abc:FooTest.Xyz:BazTest.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
] + DEATH_TESTS + PARAM_TESTS)
# Value parameterized tests.
self.RunAndVerify('*/*', PARAM_TESTS)
# Value parameterized tests filtering by the sequence name.
self.RunAndVerify('SeqP/*', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
])
# Value parameterized tests filtering by the test name.
self.RunAndVerify('*/0', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestY/0',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestY/0',
])
def testFlagOverridesEnvVar(self):
"""Tests that the filter flag overrides the filtering env. variable."""
SetEnvVar(FILTER_ENV_VAR, 'Foo*')
args = ['--%s=%s' % (FILTER_FLAG, '*One')]
tests_run = RunAndExtractTestList(args)[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, ['BarTest.TestOne', 'BazTest.TestOne'])
def testShardStatusFileIsCreated(self):
"""Tests that the shard file is created if specified in the environment."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
try:
InvokeWithModifiedEnv(extra_env, RunAndReturnOutput)
finally:
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
def testShardStatusFileIsCreatedWithListTests(self):
"""Tests that the shard file is created with the "list_tests" flag."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file2')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
try:
output = InvokeWithModifiedEnv(extra_env,
RunAndReturnOutput,
[LIST_TESTS_FLAG])
finally:
# This assertion ensures that Google Test enumerated the tests as
# opposed to running them.
self.assert_('[==========]' not in output,
'Unexpected output during test enumeration.\n'
'Please ensure that LIST_TESTS_FLAG is assigned the\n'
'correct flag value for listing Google Test tests.')
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
if SUPPORTS_DEATH_TESTS:
def testShardingWorksWithDeathTests(self):
"""Tests integration with death tests and sharding."""
gtest_filter = 'HasDeathTest.*:SeqP/*'
expected_tests = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
]
for flag in ['--gtest_death_test_style=threadsafe',
'--gtest_death_test_style=fast']:
self.RunAndVerifyWithSharding(gtest_filter, 3, expected_tests,
check_exit_0=True, args=[flag])
self.RunAndVerifyWithSharding(gtest_filter, 5, expected_tests,
check_exit_0=True, args=[flag])
if __name__ == '__main__':
gtest_test_utils.Main()
| bsd-3-clause |
Cloudef/mpv | waftools/checks/custom.py | 1 | 4397 | from waftools.inflectors import DependencyInflector
from waftools.checks.generic import *
from waflib import Utils
import os
__all__ = ["check_pthreads", "check_iconv", "check_lua", "check_oss_4front",
"check_cocoa"]
pthreads_program = load_fragment('pthreads.c')
def check_pthread_flag(ctx, dependency_identifier):
checks = [
check_cc(fragment = pthreads_program, cflags = '-pthread'),
check_cc(fragment = pthreads_program, cflags = '-pthread',
linkflags = '-pthread') ]
for fn in checks:
if fn(ctx, dependency_identifier):
return True
return False
def check_pthreads(ctx, dependency_identifier):
if ctx.dependency_satisfied('win32-internal-pthreads'):
h = ctx.path.find_node('osdep/win32/include').abspath()
# define IN_WINPTHREAD to workaround mingw stupidity (we never want it
# to define features specific to its own pthread stuff)
ctx.env.CFLAGS += ['-isystem', h, '-I', h, '-DIN_WINPTHREAD']
return True
if check_pthread_flag(ctx, dependency_identifier):
return True
platform_cflags = {
'linux': '-D_REENTRANT',
'freebsd': '-D_THREAD_SAFE',
'netbsd': '-D_THREAD_SAFE',
'openbsd': '-D_THREAD_SAFE',
}.get(ctx.env.DEST_OS, '')
libs = ['pthreadGC2', 'pthread']
checkfn = check_cc(fragment=pthreads_program, cflags=platform_cflags)
checkfn_nocflags = check_cc(fragment=pthreads_program)
for fn in [checkfn, checkfn_nocflags]:
if check_libs(libs, fn)(ctx, dependency_identifier):
return True
return False
def check_iconv(ctx, dependency_identifier):
iconv_program = load_fragment('iconv.c')
libdliconv = " ".join(ctx.env.LIB_LIBDL + ['iconv'])
libs = ['iconv', libdliconv]
checkfn = check_cc(fragment=iconv_program)
return check_libs(libs, checkfn)(ctx, dependency_identifier)
def check_lua(ctx, dependency_identifier):
lua_versions = [
( '51', 'lua >= 5.1.0 lua < 5.2.0'),
( '51deb', 'lua5.1 >= 5.1.0'), # debian
( '51fbsd', 'lua-5.1 >= 5.1.0'), # FreeBSD
( '52', 'lua >= 5.2.0' ),
( '52deb', 'lua5.2 >= 5.2.0'), # debian
( '52fbsd', 'lua-5.2 >= 5.2.0'), # FreeBSD
( 'luajit', 'luajit >= 2.0.0' ),
]
if ctx.options.LUA_VER:
lua_versions = \
[lv for lv in lua_versions if lv[0] == ctx.options.LUA_VER]
for lua_version, pkgconfig_query in lua_versions:
if check_pkg_config(pkgconfig_query, uselib_store=lua_version) \
(ctx, dependency_identifier):
# XXX: this is a bit of a hack, ask waf developers if I can copy
# the uselib_store to 'lua'
ctx.mark_satisfied(lua_version)
ctx.add_optional_message(dependency_identifier,
'version found: ' + lua_version)
return True
return False
def __get_osslibdir():
cmd = ['sh', '-c', '. /etc/oss.conf && echo $OSSLIBDIR']
p = Utils.subprocess.Popen(cmd, stdin=Utils.subprocess.PIPE,
stdout=Utils.subprocess.PIPE,
stderr=Utils.subprocess.PIPE)
return p.communicate()[0].decode().rstrip()
def check_oss_4front(ctx, dependency_identifier):
oss_libdir = __get_osslibdir()
# avoid false positive from native sys/soundcard.h
if not oss_libdir:
defkey = DependencyInflector(dependency_identifier).define_key()
ctx.undefine(defkey)
return False
soundcard_h = os.path.join(oss_libdir, "include/sys/soundcard.h")
include_dir = os.path.join(oss_libdir, "include")
fn = check_cc(header_name=soundcard_h,
defines=['PATH_DEV_DSP="/dev/dsp"',
'PATH_DEV_MIXER="/dev/mixer"'],
cflags='-I{0}'.format(include_dir),
fragment=load_fragment('oss_audio.c'))
return fn(ctx, dependency_identifier)
def check_cocoa(ctx, dependency_identifier):
fn = check_cc(
fragment = load_fragment('cocoa.m'),
compile_filename = 'test.m',
framework_name = ['Cocoa', 'IOKit', 'OpenGL', 'QuartzCore'],
includes = ctx.srcnode.abspath(),
linkflags = '-fobjc-arc')
return fn(ctx, dependency_identifier)
| gpl-2.0 |
anryko/ansible | test/units/modules/network/nxos/test_nxos_ospf.py | 23 | 2029 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.nxos import nxos_ospf
from .nxos_module import TestNxosModule, set_module_args
class TestNxosOspfModule(TestNxosModule):
module = nxos_ospf
def setUp(self):
super(TestNxosOspfModule, self).setUp()
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_ospf.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_config = patch('ansible.modules.network.nxos.nxos_ospf.get_config')
self.get_config = self.mock_get_config.start()
def tearDown(self):
super(TestNxosOspfModule, self).tearDown()
self.mock_load_config.stop()
self.mock_get_config.stop()
def load_fixtures(self, commands=None, device=''):
self.load_config.return_value = None
def test_nxos_ospf_present(self):
set_module_args(dict(ospf=1, state='present'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['router ospf 1'])
def test_nxos_ospf_absent(self):
set_module_args(dict(ospf=1, state='absent'))
result = self.execute_module(changed=False)
self.assertEqual(result['commands'], [])
| gpl-3.0 |
anryko/ansible | lib/ansible/modules/cloud/vultr/vultr_plan_info.py | 21 | 3763 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2018, Yanis Guenane <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: vultr_plan_info
short_description: Gather information about the Vultr plans available.
description:
- Gather information about plans available to boot servers.
version_added: "2.9"
author: "Yanis Guenane (@Spredzy)"
extends_documentation_fragment: vultr
'''
EXAMPLES = r'''
- name: Gather Vultr plans information
local_action:
module: vultr_plan_info
register: result
- name: Print the gathered information
debug:
var: result.vultr_plan_info
'''
RETURN = r'''
---
vultr_api:
description: Response from Vultr API with a few additions/modification
returned: success
type: complex
contains:
api_account:
description: Account used in the ini file to select the key
returned: success
type: str
sample: default
api_timeout:
description: Timeout used for the API requests
returned: success
type: int
sample: 60
api_retries:
description: Amount of max retries for the API requests
returned: success
type: int
sample: 5
api_retry_max_delay:
description: Exponential backoff delay in seconds between retries up to this max delay value.
returned: success
type: int
sample: 12
version_added: '2.9'
api_endpoint:
description: Endpoint used for the API requests
returned: success
type: str
sample: "https://api.vultr.com"
vultr_plan_info:
description: Response from Vultr API
returned: success
type: complex
contains:
plan:
description: List of the plans available.
returned: success
type: list
sample: [{
"available_locations": [
1
],
"bandwidth": 40.0,
"bandwidth_gb": 40960,
"disk": 110,
"id": 118,
"name": "32768 MB RAM,110 GB SSD,40.00 TB BW",
"plan_type": "DEDICATED",
"price_per_month": 240.0,
"ram": 32768,
"vcpu_count": 8,
"windows": false
}]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vultr import (
Vultr,
vultr_argument_spec,
)
class AnsibleVultrPlanInfo(Vultr):
def __init__(self, module):
super(AnsibleVultrPlanInfo, self).__init__(module, "vultr_plan_info")
self.returns = {
"VPSPLANID": dict(key='id', convert_to='int'),
"available_locations": dict(),
"bandwidth": dict(convert_to='float'),
"bandwidth_gb": dict(convert_to='int'),
"disk": dict(convert_to='int'),
"name": dict(),
"plan_type": dict(),
"price_per_month": dict(convert_to='float'),
"ram": dict(convert_to='int'),
"vcpu_count": dict(convert_to='int'),
"windows": dict(convert_to='bool')
}
def get_plans(self):
return self.api_query(path="/v1/plans/list")
def parse_plans_list(plans_list):
return [plan for id, plan in plans_list.items()]
def main():
argument_spec = vultr_argument_spec()
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
plan_info = AnsibleVultrPlanInfo(module)
result = plan_info.get_result(parse_plans_list(plan_info.get_plans()))
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
kailIII/geraldo | site/newsite/django_1_0/django/dispatch/dispatcher.py | 9 | 17129 | """Multiple-producer-multiple-consumer signal-dispatching
dispatcher is the core of the PyDispatcher system,
providing the primary API and the core logic for the
system.
Module attributes of note:
Any -- Singleton used to signal either "Any Sender" or
"Any Signal". See documentation of the _Any class.
Anonymous -- Singleton used to signal "Anonymous Sender"
See documentation of the _Anonymous class.
Internal attributes:
WEAKREF_TYPES -- tuple of types/classes which represent
weak references to receivers, and thus must be de-
referenced on retrieval to retrieve the callable
object
connections -- { senderkey (id) : { signal : [receivers...]}}
senders -- { senderkey (id) : weakref(sender) }
used for cleaning up sender references on sender
deletion
sendersBack -- { receiverkey (id) : [senderkey (id)...] }
used for cleaning up receiver references on receiver
deletion, (considerably speeds up the cleanup process
vs. the original code.)
"""
import weakref
from django.dispatch import saferef, robustapply, errors
__author__ = "Patrick K. O'Brien <[email protected]>"
__cvsid__ = "$Id: dispatcher.py,v 1.9 2005/09/17 04:55:57 mcfletch Exp $"
__version__ = "$Revision: 1.9 $"[11:-2]
class _Parameter:
"""Used to represent default parameter values."""
def __repr__(self):
return self.__class__.__name__
class _Any(_Parameter):
"""Singleton used to signal either "Any Sender" or "Any Signal"
The Any object can be used with connect, disconnect,
send, or sendExact to signal that the parameter given
Any should react to all senders/signals, not just
a particular sender/signal.
"""
Any = _Any()
class _Anonymous(_Parameter):
"""Singleton used to signal "Anonymous Sender"
The Anonymous object is used to signal that the sender
of a message is not specified (as distinct from being
"any sender"). Registering callbacks for Anonymous
will only receive messages sent without senders. Sending
with anonymous will only send messages to those receivers
registered for Any or Anonymous.
Note:
The default sender for connect is Any, while the
default sender for send is Anonymous. This has
the effect that if you do not specify any senders
in either function then all messages are routed
as though there was a single sender (Anonymous)
being used everywhere.
"""
Anonymous = _Anonymous()
WEAKREF_TYPES = (weakref.ReferenceType, saferef.BoundMethodWeakref)
connections = {}
senders = {}
sendersBack = {}
def connect(receiver, signal=Any, sender=Any, weak=True):
"""Connect receiver to sender for signal
receiver -- a callable Python object which is to receive
messages/signals/events. Receivers must be hashable
objects.
if weak is True, then receiver must be weak-referencable
(more precisely saferef.safeRef() must be able to create
a reference to the receiver).
Receivers are fairly flexible in their specification,
as the machinery in the robustApply module takes care
of most of the details regarding figuring out appropriate
subsets of the sent arguments to apply to a given
receiver.
Note:
if receiver is itself a weak reference (a callable),
it will be de-referenced by the system's machinery,
so *generally* weak references are not suitable as
receivers, though some use might be found for the
facility whereby a higher-level library passes in
pre-weakrefed receiver references.
signal -- the signal to which the receiver should respond
if Any, receiver will receive any signal from the
indicated sender (which might also be Any, but is not
necessarily Any).
Otherwise must be a hashable Python object other than
None (DispatcherError raised on None).
sender -- the sender to which the receiver should respond
if Any, receiver will receive the indicated signals
from any sender.
if Anonymous, receiver will only receive indicated
signals from send/sendExact which do not specify a
sender, or specify Anonymous explicitly as the sender.
Otherwise can be any python object.
weak -- whether to use weak references to the receiver
By default, the module will attempt to use weak
references to the receiver objects. If this parameter
is false, then strong references will be used.
returns None, may raise DispatcherTypeError
"""
if signal is None:
raise errors.DispatcherTypeError(
'Signal cannot be None (receiver=%r sender=%r)' % (receiver, sender)
)
if weak:
receiver = saferef.safeRef(receiver, onDelete=_removeReceiver)
senderkey = id(sender)
signals = connections.setdefault(senderkey, {})
# Keep track of senders for cleanup.
# Is Anonymous something we want to clean up?
if sender not in (None, Anonymous, Any):
def remove(object, senderkey=senderkey):
_removeSender(senderkey=senderkey)
# Skip objects that can not be weakly referenced, which means
# they won't be automatically cleaned up, but that's too bad.
try:
weakSender = weakref.ref(sender, remove)
senders[senderkey] = weakSender
except:
pass
receiverID = id(receiver)
# get current set, remove any current references to
# this receiver in the set, including back-references
if signals.has_key(signal):
receivers = signals[signal]
_removeOldBackRefs(senderkey, signal, receiver, receivers)
else:
receivers = signals[signal] = []
try:
current = sendersBack.get(receiverID)
if current is None:
sendersBack[ receiverID ] = current = []
if senderkey not in current:
current.append(senderkey)
except:
pass
receivers.append(receiver)
def disconnect(receiver, signal=Any, sender=Any, weak=True):
"""Disconnect receiver from sender for signal
receiver -- the registered receiver to disconnect
signal -- the registered signal to disconnect
sender -- the registered sender to disconnect
weak -- the weakref state to disconnect
disconnect reverses the process of connect,
the semantics for the individual elements are
logically equivalent to a tuple of
(receiver, signal, sender, weak) used as a key
to be deleted from the internal routing tables.
(The actual process is slightly more complex
but the semantics are basically the same).
Note:
Using disconnect is not required to cleanup
routing when an object is deleted, the framework
will remove routes for deleted objects
automatically. It's only necessary to disconnect
if you want to stop routing to a live object.
returns None, may raise DispatcherTypeError or
DispatcherKeyError
"""
if signal is None:
raise errors.DispatcherTypeError(
'Signal cannot be None (receiver=%r sender=%r)' % (receiver, sender)
)
if weak: receiver = saferef.safeRef(receiver)
senderkey = id(sender)
try:
signals = connections[senderkey]
receivers = signals[signal]
except KeyError:
raise errors.DispatcherKeyError(
"""No receivers found for signal %r from sender %r""" %(
signal,
sender
)
)
try:
# also removes from receivers
_removeOldBackRefs(senderkey, signal, receiver, receivers)
except ValueError:
raise errors.DispatcherKeyError(
"""No connection to receiver %s for signal %s from sender %s""" %(
receiver,
signal,
sender
)
)
_cleanupConnections(senderkey, signal)
def getReceivers(sender=Any, signal=Any):
"""Get list of receivers from global tables
This utility function allows you to retrieve the
raw list of receivers from the connections table
for the given sender and signal pair.
Note:
there is no guarantee that this is the actual list
stored in the connections table, so the value
should be treated as a simple iterable/truth value
rather than, for instance a list to which you
might append new records.
Normally you would use liveReceivers(getReceivers(...))
to retrieve the actual receiver objects as an iterable
object.
"""
existing = connections.get(id(sender))
if existing is not None:
return existing.get(signal, [])
return []
def liveReceivers(receivers):
"""Filter sequence of receivers to get resolved, live receivers
This is a generator which will iterate over
the passed sequence, checking for weak references
and resolving them, then returning all live
receivers.
"""
for receiver in receivers:
if isinstance(receiver, WEAKREF_TYPES):
# Dereference the weak reference.
receiver = receiver()
if receiver is not None:
yield receiver
else:
yield receiver
def getAllReceivers(sender=Any, signal=Any):
"""Get list of all receivers from global tables
This gets all dereferenced receivers which should receive
the given signal from sender, each receiver should
be produced only once by the resulting generator
"""
receivers = {}
# Get receivers that receive *this* signal from *this* sender.
# Add receivers that receive *any* signal from *this* sender.
# Add receivers that receive *this* signal from *any* sender.
# Add receivers that receive *any* signal from *any* sender.
l = []
i = id(sender)
if i in connections:
sender_receivers = connections[i]
if signal in sender_receivers:
l.extend(sender_receivers[signal])
if signal is not Any and Any in sender_receivers:
l.extend(sender_receivers[Any])
if sender is not Any:
i = id(Any)
if i in connections:
sender_receivers = connections[i]
if sender_receivers is not None:
if signal in sender_receivers:
l.extend(sender_receivers[signal])
if signal is not Any and Any in sender_receivers:
l.extend(sender_receivers[Any])
for receiver in l:
try:
if not receiver in receivers:
if isinstance(receiver, WEAKREF_TYPES):
receiver = receiver()
# this should only (rough guess) be possible if somehow, deref'ing
# triggered a wipe.
if receiver is None:
continue
receivers[receiver] = 1
yield receiver
except TypeError:
# dead weakrefs raise TypeError on hash...
pass
def send(signal=Any, sender=Anonymous, *arguments, **named):
"""Send signal from sender to all connected receivers.
signal -- (hashable) signal value, see connect for details
sender -- the sender of the signal
if Any, only receivers registered for Any will receive
the message.
if Anonymous, only receivers registered to receive
messages from Anonymous or Any will receive the message
Otherwise can be any python object (normally one
registered with a connect if you actually want
something to occur).
arguments -- positional arguments which will be passed to
*all* receivers. Note that this may raise TypeErrors
if the receivers do not allow the particular arguments.
Note also that arguments are applied before named
arguments, so they should be used with care.
named -- named arguments which will be filtered according
to the parameters of the receivers to only provide those
acceptable to the receiver.
Return a list of tuple pairs [(receiver, response), ... ]
if any receiver raises an error, the error propagates back
through send, terminating the dispatch loop, so it is quite
possible to not have all receivers called if a raises an
error.
"""
# Call each receiver with whatever arguments it can accept.
# Return a list of tuple pairs [(receiver, response), ... ].
responses = []
for receiver in getAllReceivers(sender, signal):
response = robustapply.robustApply(
receiver,
signal=signal,
sender=sender,
*arguments,
**named
)
responses.append((receiver, response))
return responses
def sendExact(signal=Any, sender=Anonymous, *arguments, **named ):
"""Send signal only to those receivers registered for exact message
sendExact allows for avoiding Any/Anonymous registered
handlers, sending only to those receivers explicitly
registered for a particular signal on a particular
sender.
"""
responses = []
for receiver in liveReceivers(getReceivers(sender, signal)):
response = robustapply.robustApply(
receiver,
signal=signal,
sender=sender,
*arguments,
**named
)
responses.append((receiver, response))
return responses
def _removeReceiver(receiver):
"""Remove receiver from connections."""
if not sendersBack:
# During module cleanup the mapping will be replaced with None
return False
backKey = id(receiver)
for senderkey in sendersBack.get(backKey,()):
try:
signals = connections[senderkey].keys()
except KeyError,err:
pass
else:
for signal in signals:
try:
receivers = connections[senderkey][signal]
except KeyError:
pass
else:
try:
receivers.remove(receiver)
except Exception, err:
pass
_cleanupConnections(senderkey, signal)
try:
del sendersBack[ backKey ]
except KeyError:
pass
def _cleanupConnections(senderkey, signal):
"""Delete any empty signals for senderkey. Delete senderkey if empty."""
try:
receivers = connections[senderkey][signal]
except:
pass
else:
if not receivers:
# No more connected receivers. Therefore, remove the signal.
try:
signals = connections[senderkey]
except KeyError:
pass
else:
del signals[signal]
if not signals:
# No more signal connections. Therefore, remove the sender.
_removeSender(senderkey)
def _removeSender(senderkey):
"""Remove senderkey from connections."""
_removeBackrefs(senderkey)
connections.pop(senderkey, None)
senders.pop(senderkey, None)
def _removeBackrefs(senderkey):
"""Remove all back-references to this senderkey"""
for receiver_list in connections.pop(senderkey, {}).values():
for receiver in receiver_list:
_killBackref(receiver, senderkey)
def _removeOldBackRefs(senderkey, signal, receiver, receivers):
"""Kill old sendersBack references from receiver
This guards against multiple registration of the same
receiver for a given signal and sender leaking memory
as old back reference records build up.
Also removes old receiver instance from receivers
"""
try:
index = receivers.index(receiver)
# need to scan back references here and remove senderkey
except ValueError:
return False
else:
oldReceiver = receivers[index]
del receivers[index]
found = 0
signals = connections.get(signal)
if signals is not None:
for sig,recs in connections.get(signal,{}).iteritems():
if sig != signal:
for rec in recs:
if rec is oldReceiver:
found = 1
break
if not found:
_killBackref(oldReceiver, senderkey)
return True
return False
def _killBackref(receiver, senderkey):
"""Do the actual removal of back reference from receiver to senderkey"""
receiverkey = id(receiver)
receivers_list = sendersBack.get(receiverkey, ())
while senderkey in receivers_list:
try:
receivers_list.remove(senderkey)
except:
break
if not receivers_list:
try:
del sendersBack[ receiverkey ]
except KeyError:
pass
return True
| lgpl-3.0 |
Dunkas12/BeepBoopBot | lib/youtube_dl/extractor/tubitv.py | 32 | 3025 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
int_or_none,
sanitized_Request,
urlencode_postdata,
)
class TubiTvIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?tubitv\.com/video/(?P<id>[0-9]+)'
_LOGIN_URL = 'http://tubitv.com/login'
_NETRC_MACHINE = 'tubitv'
_GEO_COUNTRIES = ['US']
_TEST = {
'url': 'http://tubitv.com/video/283829/the_comedian_at_the_friday',
'md5': '43ac06be9326f41912dc64ccf7a80320',
'info_dict': {
'id': '283829',
'ext': 'mp4',
'title': 'The Comedian at The Friday',
'description': 'A stand up comedian is forced to look at the decisions in his life while on a one week trip to the west coast.',
'uploader_id': 'bc168bee0d18dd1cb3b86c68706ab434',
},
}
def _login(self):
(username, password) = self._get_login_info()
if username is None:
return
self.report_login()
form_data = {
'username': username,
'password': password,
}
payload = urlencode_postdata(form_data)
request = sanitized_Request(self._LOGIN_URL, payload)
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
login_page = self._download_webpage(
request, None, False, 'Wrong login info')
if not re.search(r'id="tubi-logout"', login_page):
raise ExtractorError(
'Login failed (invalid username/password)', expected=True)
def _real_initialize(self):
self._login()
def _real_extract(self, url):
video_id = self._match_id(url)
video_data = self._download_json(
'http://tubitv.com/oz/videos/%s/content' % video_id, video_id)
title = video_data['title']
formats = self._extract_m3u8_formats(
self._proto_relative_url(video_data['url']),
video_id, 'mp4', 'm3u8_native')
self._sort_formats(formats)
thumbnails = []
for thumbnail_url in video_data.get('thumbnails', []):
if not thumbnail_url:
continue
thumbnails.append({
'url': self._proto_relative_url(thumbnail_url),
})
subtitles = {}
for sub in video_data.get('subtitles', []):
sub_url = sub.get('url')
if not sub_url:
continue
subtitles.setdefault(sub.get('lang', 'English'), []).append({
'url': self._proto_relative_url(sub_url),
})
return {
'id': video_id,
'title': title,
'formats': formats,
'subtitles': subtitles,
'thumbnails': thumbnails,
'description': video_data.get('description'),
'duration': int_or_none(video_data.get('duration')),
'uploader_id': video_data.get('publisher_id'),
}
| gpl-3.0 |
rmariano/pywars | game/views.py | 2 | 7077 | import json
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render, redirect
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from django.contrib.auth.decorators import login_required
from django.views.decorators.cache import cache_page
from django.db.models import Q
from django.core.exceptions import ObjectDoesNotExist
from .forms import BotBufferForm
from models import Challenge, Bot, UserProfile
from game.tasks import validate_bot
def index(request, match_id=None):
return render(request, 'home.html', {'tab': 'arena', 'match_id': match_id})
def about(request):
return render(request, 'about.html', {'tab': 'about'})
@login_required
def scoreboard(request):
#bots = Bot.objects.all().order_by('-points')
users = UserProfile.objects.filter(current_bot__isnull=False, user__is_active=True).order_by('-score')
users = ((user, request.user.profile.latest_match_id(user)) for user in users)
challenges = Challenge.objects.filter(requested_by=request.user.profile, challenger_bot=request.user.profile.current_bot, played=False, canceled=False)
# if challenges.count() > 0:
# pending_challenges = True
# else:
# pending_challenges = False
pending_challenged_bots = [c.challenged_bot for c in challenges]
played_challenges = Challenge.objects.filter(requested_by=request.user.profile, played=True, canceled=False)
challenged_bots = [c.challenged_bot for c in played_challenges]
return render(request, 'scoreboard.html', {'tab': 'score',
'users': users,
'challenged_bots': challenged_bots,
'pending_challenged_bots': pending_challenged_bots})
@login_required
def tournament(request):
user_query = UserProfile.objects.filter(current_bot__isnull=False, user__is_active=True,
user__is_superuser=False)
for user in user_query.all():
user.score = user.points
user.save()
users = user_query.order_by('-score')
return render(request, 'tournament.html', {'tab': 'tournament', 'users': users})
@login_required
def mybots(request):
user_prof = UserProfile.objects.get(user=request.user)
if request.method == 'POST':
form = BotBufferForm(request.POST)
if not form.is_valid():
print "ERROR in form!"
return
new_code = form.cleaned_data['code']
user_prof.code = new_code
if 'publish_buffer' in request.POST:
bot = Bot()
bot.owner = user_prof
bot.code = new_code
bot.save()
validate_bot.delay(bot.id, new_code)
user_prof.current_bot = bot
user_prof.save()
return redirect('/mybots')
else:
form = BotBufferForm(instance=user_prof)
return render(request, "my_bots.html", {
'form': form,
'user_prof': user_prof,
'tab': 'mybots',
'my_bots': reversed(Bot.objects.filter(owner=user_prof))
})
@login_required
@csrf_exempt
@require_POST
def challenge(request):
if request.is_ajax():
challenge_bot_id = json.loads(request.body)['msg']
challenge_bot = Bot.objects.get(pk=challenge_bot_id)
# get the user current bot
user_prof = UserProfile.objects.get(user=request.user)
if not user_prof.current_bot:
print "Can not challenge if does not have a bot!"
return HttpResponse("Error")
if challenge_bot.owner == user_prof:
print "[CHEATING!] - wrong challenge bot!"
return HttpResponse("Error")
# challenged bot must be the owners current bot
if not challenge_bot.is_current_bot:
print "[CHEATING!] - wrong challenge bot!, must be the owners current bot!."
return HttpResponse("Error")
print "Got a challenge for bot: ", challenge_bot
# Get pending challenges for this user
challenges = Challenge.objects.filter(requested_by=user_prof, played=False, canceled=False)
if challenges.count() > 0:
# has pending challenges, must wait.
return HttpResponse("Can not challenge more than one bot at a time")
# Check if these bots haven't already played.
#played_challs = Challenge.objects.filter(challenger_bot=user_prof.current_bot,
# challenged_bot=challenge_bot, played=True)
#if played_challs.count() > 0:
# # has already played against this bot, must upload a new one
# return HttpResponse("Already played against this bot!. Upload a new one.")
if (user_prof.current_bot.valid != Bot.READY
or challenge_bot.valid != Bot.READY):
return JsonResponse({'success': False, 'msg': 'One of the bot is not READY' })
new_challengue = Challenge()
new_challengue.requested_by = user_prof
new_challengue.challenger_bot = user_prof.current_bot
new_challengue.challenged_bot = challenge_bot
new_challengue.save()
return JsonResponse({'success': True})
@login_required
@cache_page(60)
def main_match(request):
return HttpResponse(None)
@login_required
def my_matches(request):
matches = Challenge.objects.filter(Q(challenger_bot__owner=request.user) |
Q(challenged_bot__owner=request.user)).filter(canceled=False).filter(played=True).order_by('-creation_date').select_related('challenger_bot__owner__user', 'challenged_bot__owner__user', 'winner_bot__owner__user')
return render(request, 'mymatches.html', {'matches': matches, 'tab': 'my-matches'})
@login_required
def get_match(request, match_id):
try:
challenge = Challenge.objects.get(pk=match_id)
if challenge.canceled:
return JsonResponse({'success': False})
else:
return JsonResponse({'success': True, 'data': json.loads(challenge.result)})
except ObjectDoesNotExist:
return JsonResponse({'success': False})
@login_required
def get_bot_status(request, bot_id):
try:
bot = Bot.objects.get(pk=bot_id)
return JsonResponse({'success': True, 'status': bot.valid, 'code': bot.code ,'reason': bot.invalid_reason})
except ObjectDoesNotExist:
return JsonResponse({'success': False})
@login_required
def random_test_match(request):
return HttpResponse(None)
@login_required
def bot_code(request, bot_pk):
if bot_pk == "0":
user_prof = UserProfile.objects.get(user=request.user)
return HttpResponse(user_prof.my_buffer)
bot_code = Bot.objects.get(pk=bot_pk, owner=request.user).code
return HttpResponse(bot_code)
@login_required
@cache_page(10)
def get_playlist(request):
challenges = Challenge.objects.filter(played=True, canceled=False).order_by('-creation_date')[:50]
if not challenges:
return JsonResponse({'success': False, 'data': []})
challs = [ [ch.id, ch.caption()] for ch in challenges ]
return JsonResponse({'success': True, 'data': challs})
| mit |
flotre/sickbeard-vfvo | lib/requests/packages/chardet2/gb2312prober.py | 2994 | 1681 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import GB2312DistributionAnalysis
from .mbcssm import GB2312SMModel
class GB2312Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(GB2312SMModel)
self._mDistributionAnalyzer = GB2312DistributionAnalysis()
self.reset()
def get_charset_name(self):
return "GB2312"
| gpl-3.0 |
flotre/sickbeard-vfvo | cherrypy/_cpchecker.py | 39 | 14290 | import os
import warnings
import cherrypy
class Checker(object):
"""A checker for CherryPy sites and their mounted applications.
on: set this to False to turn off the checker completely.
When this object is called at engine startup, it executes each
of its own methods whose names start with "check_". If you wish
to disable selected checks, simply add a line in your global
config which sets the appropriate method to False:
[global]
checker.check_skipped_app_config = False
You may also dynamically add or replace check_* methods in this way.
"""
on = True
def __init__(self):
self._populate_known_types()
def __call__(self):
"""Run all check_* methods."""
if self.on:
oldformatwarning = warnings.formatwarning
warnings.formatwarning = self.formatwarning
try:
for name in dir(self):
if name.startswith("check_"):
method = getattr(self, name)
if method and callable(method):
method()
finally:
warnings.formatwarning = oldformatwarning
def formatwarning(self, message, category, filename, lineno, line=None):
"""Function to format a warning."""
return "CherryPy Checker:\n%s\n\n" % message
# This value should be set inside _cpconfig.
global_config_contained_paths = False
def check_app_config_entries_dont_start_with_script_name(self):
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
if not app.config:
continue
if sn == '':
continue
sn_atoms = sn.strip("/").split("/")
for key in app.config.keys():
key_atoms = key.strip("/").split("/")
if key_atoms[:len(sn_atoms)] == sn_atoms:
warnings.warn(
"The application mounted at %r has config " \
"entries that start with its script name: %r" % (sn, key))
def check_site_config_entries_in_app_config(self):
for sn, app in cherrypy.tree.apps.iteritems():
if not isinstance(app, cherrypy.Application):
continue
msg = []
for section, entries in app.config.iteritems():
if section.startswith('/'):
for key, value in entries.iteritems():
for n in ("engine.", "server.", "tree.", "checker."):
if key.startswith(n):
msg.append("[%s] %s = %s" % (section, key, value))
if msg:
msg.insert(0,
"The application mounted at %r contains the following "
"config entries, which are only allowed in site-wide "
"config. Move them to a [global] section and pass them "
"to cherrypy.config.update() instead of tree.mount()." % sn)
warnings.warn(os.linesep.join(msg))
def check_skipped_app_config(self):
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
if not app.config:
msg = "The Application mounted at %r has an empty config." % sn
if self.global_config_contained_paths:
msg += (" It looks like the config you passed to "
"cherrypy.config.update() contains application-"
"specific sections. You must explicitly pass "
"application config via "
"cherrypy.tree.mount(..., config=app_config)")
warnings.warn(msg)
return
def check_app_config_brackets(self):
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
if not app.config:
continue
for key in app.config.keys():
if key.startswith("[") or key.endswith("]"):
warnings.warn(
"The application mounted at %r has config " \
"section names with extraneous brackets: %r. "
"Config *files* need brackets; config *dicts* "
"(e.g. passed to tree.mount) do not." % (sn, key))
def check_static_paths(self):
# Use the dummy Request object in the main thread.
request = cherrypy.request
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
request.app = app
for section in app.config:
# get_resource will populate request.config
request.get_resource(section + "/dummy.html")
conf = request.config.get
if conf("tools.staticdir.on", False):
msg = ""
root = conf("tools.staticdir.root")
dir = conf("tools.staticdir.dir")
if dir is None:
msg = "tools.staticdir.dir is not set."
else:
fulldir = ""
if os.path.isabs(dir):
fulldir = dir
if root:
msg = ("dir is an absolute path, even "
"though a root is provided.")
testdir = os.path.join(root, dir[1:])
if os.path.exists(testdir):
msg += ("\nIf you meant to serve the "
"filesystem folder at %r, remove "
"the leading slash from dir." % testdir)
else:
if not root:
msg = "dir is a relative path and no root provided."
else:
fulldir = os.path.join(root, dir)
if not os.path.isabs(fulldir):
msg = "%r is not an absolute path." % fulldir
if fulldir and not os.path.exists(fulldir):
if msg:
msg += "\n"
msg += ("%r (root + dir) is not an existing "
"filesystem path." % fulldir)
if msg:
warnings.warn("%s\nsection: [%s]\nroot: %r\ndir: %r"
% (msg, section, root, dir))
# -------------------------- Compatibility -------------------------- #
obsolete = {
'server.default_content_type': 'tools.response_headers.headers',
'log_access_file': 'log.access_file',
'log_config_options': None,
'log_file': 'log.error_file',
'log_file_not_found': None,
'log_request_headers': 'tools.log_headers.on',
'log_to_screen': 'log.screen',
'show_tracebacks': 'request.show_tracebacks',
'throw_errors': 'request.throw_errors',
'profiler.on': ('cherrypy.tree.mount(profiler.make_app('
'cherrypy.Application(Root())))'),
}
deprecated = {}
def _compat(self, config):
"""Process config and warn on each obsolete or deprecated entry."""
for section, conf in config.items():
if isinstance(conf, dict):
for k, v in conf.items():
if k in self.obsolete:
warnings.warn("%r is obsolete. Use %r instead.\n"
"section: [%s]" %
(k, self.obsolete[k], section))
elif k in self.deprecated:
warnings.warn("%r is deprecated. Use %r instead.\n"
"section: [%s]" %
(k, self.deprecated[k], section))
else:
if section in self.obsolete:
warnings.warn("%r is obsolete. Use %r instead."
% (section, self.obsolete[section]))
elif section in self.deprecated:
warnings.warn("%r is deprecated. Use %r instead."
% (section, self.deprecated[section]))
def check_compatibility(self):
"""Process config and warn on each obsolete or deprecated entry."""
self._compat(cherrypy.config)
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
self._compat(app.config)
# ------------------------ Known Namespaces ------------------------ #
extra_config_namespaces = []
def _known_ns(self, app):
ns = ["wsgi"]
ns.extend(app.toolboxes.keys())
ns.extend(app.namespaces.keys())
ns.extend(app.request_class.namespaces.keys())
ns.extend(cherrypy.config.namespaces.keys())
ns += self.extra_config_namespaces
for section, conf in app.config.items():
is_path_section = section.startswith("/")
if is_path_section and isinstance(conf, dict):
for k, v in conf.items():
atoms = k.split(".")
if len(atoms) > 1:
if atoms[0] not in ns:
# Spit out a special warning if a known
# namespace is preceded by "cherrypy."
if (atoms[0] == "cherrypy" and atoms[1] in ns):
msg = ("The config entry %r is invalid; "
"try %r instead.\nsection: [%s]"
% (k, ".".join(atoms[1:]), section))
else:
msg = ("The config entry %r is invalid, because "
"the %r config namespace is unknown.\n"
"section: [%s]" % (k, atoms[0], section))
warnings.warn(msg)
elif atoms[0] == "tools":
if atoms[1] not in dir(cherrypy.tools):
msg = ("The config entry %r may be invalid, "
"because the %r tool was not found.\n"
"section: [%s]" % (k, atoms[1], section))
warnings.warn(msg)
def check_config_namespaces(self):
"""Process config and warn on each unknown config namespace."""
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
self._known_ns(app)
# -------------------------- Config Types -------------------------- #
known_config_types = {}
def _populate_known_types(self):
import __builtin__ as builtins
b = [x for x in vars(builtins).values()
if type(x) is type(str)]
def traverse(obj, namespace):
for name in dir(obj):
# Hack for 3.2's warning about body_params
if name == 'body_params':
continue
vtype = type(getattr(obj, name, None))
if vtype in b:
self.known_config_types[namespace + "." + name] = vtype
traverse(cherrypy.request, "request")
traverse(cherrypy.response, "response")
traverse(cherrypy.server, "server")
traverse(cherrypy.engine, "engine")
traverse(cherrypy.log, "log")
def _known_types(self, config):
msg = ("The config entry %r in section %r is of type %r, "
"which does not match the expected type %r.")
for section, conf in config.items():
if isinstance(conf, dict):
for k, v in conf.items():
if v is not None:
expected_type = self.known_config_types.get(k, None)
vtype = type(v)
if expected_type and vtype != expected_type:
warnings.warn(msg % (k, section, vtype.__name__,
expected_type.__name__))
else:
k, v = section, conf
if v is not None:
expected_type = self.known_config_types.get(k, None)
vtype = type(v)
if expected_type and vtype != expected_type:
warnings.warn(msg % (k, section, vtype.__name__,
expected_type.__name__))
def check_config_types(self):
"""Assert that config values are of the same type as default values."""
self._known_types(cherrypy.config)
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
self._known_types(app.config)
# -------------------- Specific config warnings -------------------- #
def check_localhost(self):
"""Warn if any socket_host is 'localhost'. See #711."""
for k, v in cherrypy.config.items():
if k == 'server.socket_host' and v == 'localhost':
warnings.warn("The use of 'localhost' as a socket host can "
"cause problems on newer systems, since 'localhost' can "
"map to either an IPv4 or an IPv6 address. You should "
"use '127.0.0.1' or '[::1]' instead.")
| gpl-3.0 |
GrognardsFromHell/TemplePlus | tpdatasrc/co8infra/scr/Spell741 - Ice Breath Weapon.py | 1 | 1381 | from toee import *
def OnBeginSpellCast( spell ):
print "Frozen Breath OnBeginSpellCast"
print "spell.target_list=", spell.target_list
print "spell.caster=", spell.caster, " caster.level= ", spell.caster_level
game.particles( "sp-evocation-conjure", spell.caster )
def OnSpellEffect ( spell ):
print "Frozen Breath OnSpellEffect"
remove_list = []
dam = dice_new( '1d6' )
dam.number = spell.spell_level
if dam.number > 6:
dam.number = 6
game.particles( 'sp-Cone of Cold', spell.caster )
npc = spell.caster
spell.dc = spell.dc + 5
if npc.name == 14999: ## Old White Dragon
dam.number = 8
spell.dc = 27
# range = 25 + 5 * int(spell.caster_level/2)
range = 60
target_list = list(game.obj_list_cone( spell.caster, OLC_CRITTERS, range, -30, 60 ))
target_list.remove(spell.caster)
for obj in target_list:
if obj.reflex_save_and_damage( spell.caster, spell.dc,
D20_Save_Reduction_Half, D20STD_F_NONE, dam, D20DT_COLD, D20DAP_UNSPECIFIED,
D20A_CAST_SPELL, spell.id ) > 0:
# saving throw successful
obj.float_mesfile_line( 'mes\\spell.mes', 30001 )
else:
# saving throw unsuccessful
obj.float_mesfile_line( 'mes\\spell.mes', 30002 )
spell.target_list.remove_list( remove_list )
spell.spell_end(spell.id)
def OnBeginRound( spell ):
print "Frozen Breath OnBeginRound"
def OnEndSpellCast( spell ):
print "Frozen Breath OnEndSpellCast"
| mit |
Nowheresly/odoo | addons/delivery/stock.py | 38 | 10914 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
# Overloaded stock_picking to manage carriers :
class stock_picking(osv.osv):
_inherit = 'stock.picking'
def _cal_weight(self, cr, uid, ids, name, args, context=None):
res = {}
for picking in self.browse(cr, uid, ids, context=context):
total_weight = total_weight_net = 0.00
for move in picking.move_lines:
if move.state != 'cancel':
total_weight += move.weight
total_weight_net += move.weight_net
res[picking.id] = {
'weight': total_weight,
'weight_net': total_weight_net,
}
return res
def _get_picking_line(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('stock.move').browse(cr, uid, ids, context=context):
result[line.picking_id.id] = True
return result.keys()
_columns = {
'carrier_id':fields.many2one("delivery.carrier","Carrier"),
'volume': fields.float('Volume', copy=False),
'weight': fields.function(_cal_weight, type='float', string='Weight', digits_compute= dp.get_precision('Stock Weight'), multi='_cal_weight',
store={
'stock.picking': (lambda self, cr, uid, ids, c={}: ids, ['move_lines'], 40),
'stock.move': (_get_picking_line, ['state', 'picking_id', 'product_id','product_uom_qty','product_uom'], 40),
}),
'weight_net': fields.function(_cal_weight, type='float', string='Net Weight', digits_compute= dp.get_precision('Stock Weight'), multi='_cal_weight',
store={
'stock.picking': (lambda self, cr, uid, ids, c={}: ids, ['move_lines'], 40),
'stock.move': (_get_picking_line, ['state', 'picking_id', 'product_id','product_uom_qty','product_uom'], 40),
}),
'carrier_tracking_ref': fields.char('Carrier Tracking Ref', copy=False),
'number_of_packages': fields.integer('Number of Packages', copy=False),
'weight_uom_id': fields.many2one('product.uom', 'Unit of Measure', required=True,readonly="1",help="Unit of measurement for Weight",),
}
def _prepare_shipping_invoice_line(self, cr, uid, picking, invoice, context=None):
"""Prepare the invoice line to add to the shipping costs to the shipping's
invoice.
:param browse_record picking: the stock picking being invoiced
:param browse_record invoice: the stock picking's invoice
:return: dict containing the values to create the invoice line,
or None to create nothing
"""
if picking.sale_id:
delivery_line = picking.sale_id.order_line.filtered(lambda l: l.is_delivery and l.invoiced)
if delivery_line:
return None
carrier_obj = self.pool.get('delivery.carrier')
grid_obj = self.pool.get('delivery.grid')
currency_obj = self.pool.get('res.currency')
if not picking.carrier_id or \
any(inv_line.product_id.id == picking.carrier_id.product_id.id
for inv_line in invoice.invoice_line):
return None
grid_id = carrier_obj.grid_get(cr, uid, [picking.carrier_id.id],
picking.partner_id.id, context=context)
if not grid_id:
raise osv.except_osv(_('Warning!'),
_('The carrier %s (id: %d) has no delivery grid!') \
% (picking.carrier_id.name,
picking.carrier_id.id))
quantity = sum([line.product_uom_qty for line in picking.move_lines])
price = grid_obj.get_price_from_picking(cr, uid, grid_id,
invoice.amount_untaxed, picking.weight, picking.volume,
quantity, context=context)
if invoice.company_id.currency_id.id != invoice.currency_id.id:
price = currency_obj.compute(cr, uid, invoice.company_id.currency_id.id, invoice.currency_id.id,
price, context=dict(context or {}, date=invoice.date_invoice))
account_id = picking.carrier_id.product_id.property_account_income.id
if not account_id:
account_id = picking.carrier_id.product_id.categ_id\
.property_account_income_categ.id
taxes = picking.carrier_id.product_id.taxes_id
partner = picking.partner_id or False
fp = invoice.fiscal_position or partner.property_account_position
if partner:
account_id = self.pool.get('account.fiscal.position').map_account(cr, uid, fp, account_id)
taxes_ids = self.pool.get('account.fiscal.position').map_tax(cr, uid, fp, taxes, context=context)
else:
taxes_ids = [x.id for x in taxes]
return {
'name': picking.carrier_id.name,
'invoice_id': invoice.id,
'uos_id': picking.carrier_id.product_id.uos_id.id,
'product_id': picking.carrier_id.product_id.id,
'account_id': account_id,
'price_unit': price,
'quantity': 1,
'invoice_line_tax_id': [(6, 0, taxes_ids)],
}
def _invoice_create_line(self, cr, uid, moves, journal_id, inv_type='out_invoice', context=None):
invoice_obj = self.pool.get('account.invoice')
invoice_line_obj = self.pool.get('account.invoice.line')
invoice_ids = super(stock_picking, self)._invoice_create_line(cr, uid, moves, journal_id, inv_type=inv_type, context=context)
delivey_invoices = {}
for move in moves:
for invoice in move.picking_id.sale_id.invoice_ids:
if invoice.id in invoice_ids:
delivey_invoices[invoice] = move.picking_id
if delivey_invoices:
for invoice, picking in delivey_invoices.items():
invoice_line = self._prepare_shipping_invoice_line(cr, uid, picking, invoice, context=context)
if invoice_line:
invoice_line_obj.create(cr, uid, invoice_line)
invoice_obj.button_compute(cr, uid, [invoice.id], context=context, set_total=(inv_type in ('in_invoice', 'in_refund')))
return invoice_ids
def _get_default_uom(self, cr, uid, context=None):
uom_categ_id = self.pool.get('ir.model.data').xmlid_to_res_id(cr, uid, 'product.product_uom_categ_kgm')
return self.pool.get('product.uom').search(cr, uid, [('category_id', '=', uom_categ_id), ('factor', '=', 1)])[0]
_defaults = {
'weight_uom_id': lambda self, cr, uid, c: self._get_default_uom(cr, uid, c),
}
class stock_move(osv.osv):
_inherit = 'stock.move'
def _cal_move_weight(self, cr, uid, ids, name, args, context=None):
res = {}
uom_obj = self.pool.get('product.uom')
for move in self.browse(cr, uid, ids, context=context):
weight = weight_net = 0.00
if move.product_id.weight > 0.00:
converted_qty = move.product_qty
weight = (converted_qty * move.product_id.weight)
if move.product_id.weight_net > 0.00:
weight_net = (converted_qty * move.product_id.weight_net)
res[move.id] = {
'weight': weight,
'weight_net': weight_net,
}
return res
_columns = {
'weight': fields.function(_cal_move_weight, type='float', string='Weight', digits_compute= dp.get_precision('Stock Weight'), multi='_cal_move_weight',
store={
'stock.move': (lambda self, cr, uid, ids, c=None: ids, ['product_id', 'product_uom_qty', 'product_uom'], 30),
}),
'weight_net': fields.function(_cal_move_weight, type='float', string='Net weight', digits_compute= dp.get_precision('Stock Weight'), multi='_cal_move_weight',
store={
'stock.move': (lambda self, cr, uid, ids, c=None: ids, ['product_id', 'product_uom_qty', 'product_uom'], 30),
}),
'weight_uom_id': fields.many2one('product.uom', 'Unit of Measure', required=True,readonly="1",help="Unit of Measure (Unit of Measure) is the unit of measurement for Weight",),
}
def action_confirm(self, cr, uid, ids, context=None):
"""
Pass the carrier to the picking from the sales order
(Should also work in case of Phantom BoMs when on explosion the original move is deleted)
"""
procs_to_check = []
for move in self.browse(cr, uid, ids, context=context):
if move.procurement_id and move.procurement_id.sale_line_id and move.procurement_id.sale_line_id.order_id.carrier_id:
procs_to_check += [move.procurement_id]
res = super(stock_move, self).action_confirm(cr, uid, ids, context=context)
pick_obj = self.pool.get("stock.picking")
for proc in procs_to_check:
pickings = list(set([x.picking_id.id for x in proc.move_ids if x.picking_id and not x.picking_id.carrier_id]))
if pickings:
pick_obj.write(cr, uid, pickings, {'carrier_id': proc.sale_line_id.order_id.carrier_id.id}, context=context)
return res
def _get_default_uom(self, cr, uid, context=None):
uom_categ_id = self.pool.get('ir.model.data').xmlid_to_res_id(cr, uid, 'product.product_uom_categ_kgm')
return self.pool.get('product.uom').search(cr, uid, [('category_id', '=', uom_categ_id),('factor','=',1)])[0]
_defaults = {
'weight_uom_id': lambda self, cr, uid, c: self._get_default_uom(cr, uid, c),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
fboers/jumeg | examples/do_MLICA.py | 1 | 5891 | """
Compute ICA object based on filtered and downsampled data.
Identify ECG and EOG artifacts using MLICA and compare
results to correlation & ctps analysis.
Apply ICA object to filtered and unfiltered data.
Ahmad Hasasneh, Nikolas Kampel, Praveen Sripad, N. Jon Shah, and Juergen Dammers
"Deep Learning Approach for Automatic Classification of Ocular and Cardiac
Artifacts in MEG Data"
Journal of Engineering, vol. 2018, Article ID 1350692,10 pages, 2018.
https://doi.org/10.1155/2018/1350692
"""
import os.path as op
import matplotlib.pylab as plt
plt.ion()
import numpy as np
import mne
from jumeg.decompose.ica_replace_mean_std import ICA, ica_update_mean_std
from keras.models import load_model
from jumeg.jumeg_noise_reducer import noise_reducer
from jumeg.jumeg_preprocessing import get_ics_cardiac, get_ics_ocular
from jumeg.jumeg_plot import plot_performance_artifact_rejection
from jumeg.jumeg_utils import get_jumeg_path
# config
MLICA_threshold = 0.8
n_components = 60
njobs = 4 # for downsampling
tmin = 0
tmax = tmin + 15000
flow_ecg, fhigh_ecg = 8, 20
flow_eog, fhigh_eog = 1, 20
ecg_thresh, eog_thresh = 0.3, 0.3
ecg_ch = 'ECG 001'
eog1_ch = 'EOG 001'
eog2_ch = 'EOG 002'
reject = {'mag': 5e-12}
refnotch = [50., 100., 150., 200., 250., 300., 350., 400.]
data_path = op.join(get_jumeg_path(), 'data')
print(data_path)
# example filname
raw_fname = "/Volumes/megraid21/sripad/cau_fif_data/jumeg_test_data/" \
"109925_CAU01A_100715_0842_2_c,rfDC-raw.fif"
# load the model for artifact rejection
# the details of the model is provided in the x_validation_shuffle_v4_split_23.txt
model_name = op.join(data_path, "dcnn_model.hdf5")
model = load_model(model_name)
# noise reducer
raw_nr = noise_reducer(raw_fname, reflp=5., return_raw=True)
raw_nr = noise_reducer(raw_fname, raw=raw_nr, refhp=0.1, noiseref=['RFG ...'],
return_raw=True)
# 50HZ and 60HZ notch filter to remove noise
raw = noise_reducer(raw_fname, raw=raw_nr, refnotch=refnotch, return_raw=True)
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=False,
stim=False, exclude='bads')
raw_filtered = raw.copy().filter(0., 45., picks=picks, filter_length='auto',
l_trans_bandwidth='auto', h_trans_bandwidth='auto',
n_jobs=njobs, method='fir', phase='zero',
fir_window='hamming')
# downsample the data to 250 Hz, necessary for the model
raw_ds = raw_filtered.copy().resample(250, npad='auto', window='boxcar', stim_picks=None,
n_jobs=njobs, events=None)
raw_ds_chop = raw_ds.copy().crop(tmin=tmin*4./1000, tmax=tmax*4./1000) # downsampled raw
raw_filtered_chop = raw_filtered.copy().crop(tmin=tmin*4./1000, tmax=tmax*4./1000)
raw_chop = raw.copy().crop(tmin=tmin*4./1000, tmax=tmax*4./1000)
ica = ICA(method='fastica', n_components=n_components, random_state=42,
max_pca_components=None, max_iter=5000, verbose=None)
# do the ICA decomposition on downsampled raw
ica.fit(raw_ds_chop, picks=picks, reject=reject, verbose=None)
sources = ica.get_sources(raw_ds_chop)._data
# extract temporal and spatial components
mm = np.float32(np.dot(ica.mixing_matrix_[:, :].T,
ica.pca_components_[:ica.n_components_]))
# use [:, :15000] to make sure it's 15000 data points
chop = sources[:, :15000]
chop_reshaped = np.reshape(chop, (len(chop), len(chop[0]), 1))
model_scores = model.predict([mm, chop_reshaped], verbose=1)
bads_MLICA = []
# print model_scores
for idx in range(0, len(model_scores)):
if model_scores[idx][0] > MLICA_threshold:
bads_MLICA.append(idx)
# visualisation
# ica.exclude = bads_MLICA
# ica.plot_sources(raw_ds_chop, block=True)
# compare MLICA to results from correlation and ctps analysis
ica.exclude = []
print('Identifying components..')
# get ECG/EOG related components using JuMEG
ic_ecg = get_ics_cardiac(raw_filtered_chop, ica, flow=flow_ecg, fhigh=fhigh_ecg,
thresh=ecg_thresh, tmin=-0.5, tmax=0.5,
name_ecg=ecg_ch, use_CTPS=True)[0] # returns both ICs and scores (take only ICs)
ic_eog = get_ics_ocular(raw_filtered_chop, ica, flow=flow_eog, fhigh=fhigh_eog,
thresh=eog_thresh, name_eog_hor=eog1_ch,
name_eog_ver=eog2_ch, score_func='pearsonr')
bads_corr_ctps = list(ic_ecg) + list(ic_eog)
bads_corr_ctps = list(set(bads_corr_ctps)) # remove potential duplicates
bads_corr_ctps.sort()
# visualisation
# ica.exclude = bads_corr_ctps
# ica.plot_sources(raw_chop, block=True)
print('Bad components from MLICA:', bads_MLICA)
print('Bad components from correlation & ctps:', bads_corr_ctps)
# apply MLICA result to filtered and unfiltered data
# exclude bad components identified by MLICA
ica.exclude = bads_MLICA
fnout_fig = '109925_CAU01A_100715_0842_2_c,rfDC,0-45hz,ar-perf'
ica_filtered_chop = ica_update_mean_std(raw_filtered_chop, ica, picks=picks, reject=reject)
raw_filtered_chop_clean = ica_filtered_chop.apply(raw_filtered_chop, exclude=ica.exclude,
n_pca_components=None)
ica_unfiltered_chop = ica_update_mean_std(raw_chop, ica, picks=picks, reject=reject)
raw_unfiltered_chop_clean = ica_unfiltered_chop.apply(raw_chop, exclude=ica.exclude, n_pca_components=None)
# create copy of original data since apply_ica_replace_mean_std changes the input data in place (raw and ica)
raw_copy = raw.copy().crop(tmin=tmin*4./1000, tmax=tmax*4./1000)
plot_performance_artifact_rejection(raw_copy, ica_unfiltered_chop, fnout_fig,
meg_clean=raw_unfiltered_chop_clean,
show=False, verbose=False,
name_ecg=ecg_ch,
name_eog=eog2_ch)
| bsd-3-clause |
bfaviero/ok | nodisk.py | 1 | 2852 | import os
import subprocess
H_NAME = 'oauth-kerberos-server'
H_FOLDER = os.path.join('/cgroup', H_NAME)
CGROUP_NAME = 'thegroup'
CGROUP_FOLDER = os.path.join(H_FOLDER, CGROUP_NAME)
MOUNT_CMD_PATH = '/bin/mount'
UMOUNT_CMD_PATH = '/bin/umount'
MOUNTPOINT_CMD_PATH = '/bin/mountpoint'
def prevent_swapping():
"""prevents the calling process (and any children spawned after
calling) from being swapped out in whole or in part
This is done by creating a Linux cgroup which the calling process is
added to, then setting the memory.swappiness value for the cgroup to 0.
According to the cgroup documentation, this accomplishes the desire
effect.
The calling process must be root (have euid 0), but it is fine if the
process drops privelidges after calling this."""
if os.geteuid() != 0:
raise Exception("you must have effective uid 0 to run this")
# setup cgroup folders if they don't already exist
makedirs(H_FOLDER, 0o700, NO_ERROR_IF_EXISTING) # only root
# mount cgroup heierarchy, if it isn't already mounted
if mountpoint(H_FOLDER)!=0:
code = mount('-t', 'cgroup', '-o', 'memory', H_NAME, H_FOLDER)
if code != 0:
raise Exception("unable to create cgroup using mount")
# make the cgroup if it doesn't exist
makedirs(CGROUP_FOLDER, 0o700, NO_ERROR_IF_EXISTING)
# set memory.swappiiness to 0 for the cgroup
f = open(os.path.join(CGROUP_FOLDER, 'memory.swappiness'), 'w')
f.write('0')
f.close() # we don't need the file anymore, plus we want the write to be flushedyy
# add our pid to the cgroup
f = open(os.path.join(CGROUP_FOLDER, 'tasks'), 'w')
f.write(str(os.getpid()))
f.close() # we don't need the file anymore, plus we want the write to be flushedyy
ERROR_IF_EXISTING = 0 # raise an error if leaf exists
NO_ERROR_IF_EXISTING = 1 # don't raise an error if leaf exists
def makedirs(path, mode=0o777, behavior=ERROR_IF_EXISTING):
"""this does the same thing as os.makedirs, but offers the option to
change the behavior in the event that the leaf directory to be created
already exists"""
try:
os.makedirs(path, mode)
except OSError as e:
# If we encountered error because file exists, everything is
# fine. Otherwise, re-throw the exception
if e.errno != 17 or behavior==ERROR_IF_EXISTING:
raise e
def mount(*argv):
"""calls the mount command with the given arguments, returning whatever
the mount command returns"""
return subprocess.call([MOUNT_CMD_PATH] + list(argv))
def umount(*argv):
"""calls the umount command with the given arguments, returning whatever
the mount command returns"""
return subprocess.call([UMOUNT_CMD_PATH] + list(argv))
def mountpoint(dirname):
"""calls the mountpoint comand with the -q (quiet) argument followed by the dirname
argument, returning whatever the command returns"""
return subprocess.call([MOUNTPOINT_CMD_PATH, '-q', dirname])
| mit |
explosion/thinc | thinc/tests/layers/test_combinators.py | 1 | 7655 | import pytest
import numpy
from numpy.testing import assert_allclose
from thinc.api import clone, concatenate, noop, add, map_list
from thinc.api import Linear, Dropout, Model, NumpyOps
from thinc.layers import chain, tuplify
@pytest.fixture(params=[1, 2, 9])
def nB(request):
return request.param
@pytest.fixture(params=[1, 6])
def nI(request):
return request.param
@pytest.fixture(params=[1, 5, 3])
def nH(request):
return request.param
@pytest.fixture(params=[1, 2, 7, 9])
def nO(request):
return request.param
@pytest.fixture
def model1(nH, nI):
return Linear(nH, nI)
@pytest.fixture
def model2(nO, nH):
return Linear(nO, nH)
@pytest.fixture
def model3(nO):
return Linear(nO, nO)
def test_tuplify_zero():
with pytest.raises(TypeError):
tuplify()
def test_tuplify_one(model1):
with pytest.raises(TypeError):
tuplify(model1)
def test_tuplify_two(model1, model2):
model = tuplify(model1, model2)
assert len(model.layers) == 2
def test_tuplify_operator_two(model1, model2):
with Model.define_operators({"&": tuplify}):
model = model1 & model2
assert len(model.layers) == 2
def test_tuplify_dulicates_input():
model = tuplify(noop(), noop())
ones = numpy.ones([10])
out = model.predict(ones)
assert out == (ones, ones)
def test_tuplify_three(model1, model2, model3):
model = tuplify(model1, model2, model3)
assert len(model.layers) == 3
def test_tuplify_operator_three(model1, model2, model3):
# Previously we 'flattened' these nested calls. We might opt to do so
# again, especially for the operators.
with Model.define_operators({"&": tuplify}):
model = model1 & model2 & model3
assert len(model.layers) == 2
assert len(model.layers[0].layers) == 2
def test_chain_zero():
with pytest.raises(TypeError):
chain()
def test_chain_one(model1):
with pytest.raises(TypeError):
chain(model1)
def test_chain_two(model1, model2):
model = chain(model1, model2)
assert len(model.layers) == 2
def test_chain_operator_two(model1, model2):
with Model.define_operators({">>": chain}):
model = model1 >> model2
assert len(model.layers) == 2
def test_chain_three(model1, model2, model3):
model = chain(model1, model2, model3)
assert len(model.layers) == 3
def test_chain_operator_three(model1, model2, model3):
# Previously we 'flattened' these nested calls. We might opt to do so
# again, especially for the operators.
with Model.define_operators({">>": chain}):
model = model1 >> model2 >> model3
assert len(model.layers) == 2
assert len(model.layers[0].layers) == 2
def test_chain_right_branch(model1, model2, model3):
# Previously we 'flattened' these nested calls. We might opt to do so
# again, especially for the operators.
merge1 = chain(model1, model2)
merge2 = chain(merge1, model3)
assert len(merge1.layers) == 2
assert len(merge2.layers) == 2
@pytest.mark.parametrize("ops", [NumpyOps(), NumpyOps(use_blis=True)])
def test_chain(ops):
data = numpy.asarray([[1, 2, 3, 4]], dtype="f")
model = chain(Linear(1), Dropout(), Linear(1))
model.ops = ops
model.initialize(data, data)
Y, backprop = model(data, is_train=True)
backprop(Y)
# Layers with and without nO/nI
model = chain(Linear(1), Dropout(), Linear(1, 1))
model.initialize(data, data)
# Setting dim on model
model = chain(Linear(1), Dropout(), Linear(1))
model.set_dim("nO", 1)
model.initialize(data, None)
model = chain(Linear(1, 1), Dropout(), Linear(1, 1))
model.set_dim("nI", 1)
model.initialize(None, data)
# Not enough arguments
with pytest.raises(TypeError):
chain(Linear())
with pytest.raises(TypeError):
chain()
def test_concatenate_one(model1):
model = concatenate(model1)
assert isinstance(model, Model)
def test_concatenate_two(model1, model2):
model = concatenate(model1, model2)
assert len(model.layers) == 2
def test_concatenate_operator_two(model1, model2):
with Model.define_operators({"|": concatenate}):
model = model1 | model2
assert len(model.layers) == 2
def test_concatenate_three(model1, model2, model3):
model = concatenate(model1, model2, model3)
assert len(model.layers) == 3
def test_concatenate_operator_three(model1, model2, model3):
with Model.define_operators({"|": concatenate}):
model = model1 | model2 | model3
assert len(model.layers) == 3
def test_clone_changes_predictions(nH, nI):
model1 = Linear(nH)
model = clone(model1, 10)
ones = numpy.ones((10, nI), dtype="f")
model.initialize(X=ones)
output_from_cloned = model.predict(ones)
output_from_orig = model1.predict(ones)
assert output_from_cloned.sum() != output_from_orig.sum()
def test_clone_gives_distinct_ids(nH, nI):
model = clone(Linear(nH), 5)
assert len(model.layers) == 5
seen_ids = set()
for node in model.walk():
assert node.id not in seen_ids
seen_ids.add(node.id)
assert len(seen_ids) == 6
def test_clone_noop():
model = clone(Linear(), 0)
assert len(model.layers) == 0
assert model.name == "noop"
def test_concatenate_noop():
model = concatenate()
assert len(model.layers) == 0
assert model.name == "noop"
def test_noop():
data = numpy.asarray([1, 2, 3], dtype="f")
model = noop(Linear(), Linear())
model.initialize(data, data)
Y, backprop = model(data, is_train=True)
assert numpy.array_equal(Y, data)
dX = backprop(Y)
assert numpy.array_equal(dX, data)
def test_add():
data = numpy.asarray([[1, 2, 3, 4]], dtype="f")
model = add(Linear(), Linear())
model.initialize(data, data)
Y, backprop = model(data, is_train=True)
Y2 = sum(layer.predict(data) for layer in model.layers)
assert numpy.array_equal(Y, Y2)
dX = backprop(Y)
assert dX.shape == data.shape
# Test that nesting works
model2 = add(model, Linear())
assert len(model2.layers) == 3
model.initialize(data, data)
Y = model2.predict(data)
Y2 = sum(layer.predict(data) for layer in model2.layers)
assert numpy.array_equal(Y, Y2)
def test_add_edge_cases():
data = numpy.asarray([[1, 2, 3, 4]], dtype="f")
with pytest.raises(TypeError):
add()
model = add(Linear(), Linear())
model._layers = []
Y, backprop = model(data, is_train=True)
assert numpy.array_equal(data, Y)
dX = backprop(Y)
assert numpy.array_equal(dX, data)
def test_concatenate():
data = numpy.asarray([[1, 2, 3], [4, 5, 6]], dtype="f")
model = concatenate(Linear(), Linear())
model.initialize(data, data)
Y, backprop = model(data, is_train=True)
assert Y.shape[1] == sum([layer.predict(data).shape[1] for layer in model.layers])
dX = backprop(Y)
assert dX.shape == data.shape
def test_map_list():
nI = 4
nO = 9
Xs = [
numpy.zeros((6, nI), dtype="f"),
numpy.ones((3, nI), dtype="f")
]
Y_shapes = [(x.shape[0], nO) for x in Xs]
model = map_list(Linear())
model.initialize(X=Xs, Y=[numpy.zeros(shape, dtype="f") for shape in Y_shapes])
Ys, backprop = model(Xs, is_train=True)
assert isinstance(Ys, list)
assert len(Ys) == len(Xs)
layer = model.layers[0]
for X, Y in zip(Xs, Ys):
assert_allclose(layer.predict(X), Y)
dXs = backprop(Ys)
assert isinstance(dXs, list)
assert len(dXs) == len(Xs)
assert dXs[0].shape == Xs[0].shape
assert dXs[1].shape == Xs[1].shape
| mit |
tersmitten/ansible | lib/ansible/modules/cloud/vmware/vmware_tag.py | 7 | 8097 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# Copyright: (c) 2018, Abhijeet Kasurde <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_tag
short_description: Manage VMware tags
description:
- This module can be used to create / delete / update VMware tags.
- Tag feature is introduced in vSphere 6 version, so this module is not supported in the earlier versions of vSphere.
- All variables and VMware object names are case sensitive.
version_added: '2.6'
author:
- Abhijeet Kasurde (@Akasurde)
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
- vSphere Automation SDK
options:
tag_name:
description:
- The name of tag to manage.
required: True
tag_description:
description:
- The tag description.
- This is required only if C(state) is set to C(present).
- This parameter is ignored, when C(state) is set to C(absent).
- Process of updating tag only allows description change.
required: False
default: ''
category_id:
description:
- The unique ID generated by vCenter should be used to.
- User can get this unique ID from facts module.
required: False
state:
description:
- The state of tag.
- If set to C(present) and tag does not exists, then tag is created.
- If set to C(present) and tag exists, then tag is updated.
- If set to C(absent) and tag exists, then tag is deleted.
- If set to C(absent) and tag does not exists, no action is taken.
required: False
default: 'present'
choices: [ 'present', 'absent' ]
extends_documentation_fragment: vmware_rest_client.documentation
'''
EXAMPLES = r'''
- name: Create a tag
vmware_tag:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
validate_certs: no
category_id: 'urn:vmomi:InventoryServiceCategory:e785088d-6981-4b1c-9fb8-1100c3e1f742:GLOBAL'
tag_name: Sample_Tag_0002
tag_description: Sample Description
state: present
delegate_to: localhost
- name: Update tag description
vmware_tag:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
tag_name: Sample_Tag_0002
tag_description: Some fancy description
state: present
delegate_to: localhost
- name: Delete tag
vmware_tag:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
tag_name: Sample_Tag_0002
state: absent
delegate_to: localhost
'''
RETURN = r'''
results:
description: dictionary of tag metadata
returned: on success
type: dict
sample: {
"msg": "Tag 'Sample_Tag_0002' created.",
"tag_id": "urn:vmomi:InventoryServiceTag:bff91819-f529-43c9-80ca-1c9dfda09441:GLOBAL"
}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware_rest_client import VmwareRestClient
class VmwareTag(VmwareRestClient):
def __init__(self, module):
super(VmwareTag, self).__init__(module)
self.global_tags = dict()
# api_client to call APIs instead of individual service
self.tag_service = self.api_client.tagging.Tag
self.tag_name = self.params.get('tag_name')
self.get_all_tags()
self.category_service = self.api_client.tagging.Category
def ensure_state(self):
"""
Manage internal states of tags
"""
desired_state = self.params.get('state')
states = {
'present': {
'present': self.state_update_tag,
'absent': self.state_create_tag,
},
'absent': {
'present': self.state_delete_tag,
'absent': self.state_unchanged,
}
}
states[desired_state][self.check_tag_status()]()
def state_create_tag(self):
"""
Create tag
"""
tag_spec = self.tag_service.CreateSpec()
tag_spec.name = self.tag_name
tag_spec.description = self.params.get('tag_description')
category_id = self.params.get('category_id', None)
if category_id is None:
self.module.fail_json(msg="'category_id' is required parameter while creating tag.")
category_found = False
for category in self.category_service.list():
category_obj = self.category_service.get(category)
if category_id == category_obj.id:
category_found = True
break
if not category_found:
self.module.fail_json(msg="Unable to find category specified using 'category_id' - %s" % category_id)
tag_spec.category_id = category_id
tag_id = self.tag_service.create(tag_spec)
if tag_id:
self.module.exit_json(changed=True,
results=dict(msg="Tag '%s' created." % tag_spec.name,
tag_id=tag_id))
self.module.exit_json(changed=False,
results=dict(msg="No tag created", tag_id=''))
def state_unchanged(self):
"""
Return unchanged state
"""
self.module.exit_json(changed=False)
def state_update_tag(self):
"""
Update tag
"""
changed = False
tag_id = self.global_tags[self.tag_name]['tag_id']
results = dict(msg="Tag %s is unchanged." % self.tag_name,
tag_id=tag_id)
tag_update_spec = self.tag_service.UpdateSpec()
tag_desc = self.global_tags[self.tag_name]['tag_description']
desired_tag_desc = self.params.get('tag_description')
if tag_desc != desired_tag_desc:
tag_update_spec.description = desired_tag_desc
self.tag_service.update(tag_id, tag_update_spec)
results['msg'] = 'Tag %s updated.' % self.tag_name
changed = True
self.module.exit_json(changed=changed, results=results)
def state_delete_tag(self):
"""
Delete tag
"""
tag_id = self.global_tags[self.tag_name]['tag_id']
self.tag_service.delete(tag_id=tag_id)
self.module.exit_json(changed=True,
results=dict(msg="Tag '%s' deleted." % self.tag_name,
tag_id=tag_id))
def check_tag_status(self):
"""
Check if tag exists or not
Returns: 'present' if tag found, else 'absent'
"""
ret = 'present' if self.tag_name in self.global_tags else 'absent'
return ret
def get_all_tags(self):
"""
Retrieve all tag information
"""
for tag in self.tag_service.list():
tag_obj = self.tag_service.get(tag)
self.global_tags[tag_obj.name] = dict(tag_description=tag_obj.description,
tag_used_by=tag_obj.used_by,
tag_category_id=tag_obj.category_id,
tag_id=tag_obj.id
)
def main():
argument_spec = VmwareRestClient.vmware_client_argument_spec()
argument_spec.update(
tag_name=dict(type='str', required=True),
tag_description=dict(type='str', default='', required=False),
category_id=dict(type='str', required=False),
state=dict(type='str', choices=['present', 'absent'], default='present', required=False),
)
module = AnsibleModule(argument_spec=argument_spec)
vmware_tag = VmwareTag(module)
vmware_tag.ensure_state()
if __name__ == '__main__':
main()
| gpl-3.0 |
shadyueh/pyranking | env/lib/python2.7/site-packages/django/contrib/auth/handlers/modwsgi.py | 537 | 1344 | from django import db
from django.contrib import auth
from django.utils.encoding import force_bytes
def check_password(environ, username, password):
"""
Authenticates against Django's auth database
mod_wsgi docs specify None, True, False as return value depending
on whether the user exists and authenticates.
"""
UserModel = auth.get_user_model()
# db connection state is managed similarly to the wsgi handler
# as mod_wsgi may call these functions outside of a request/response cycle
db.reset_queries()
try:
try:
user = UserModel._default_manager.get_by_natural_key(username)
except UserModel.DoesNotExist:
return None
if not user.is_active:
return None
return user.check_password(password)
finally:
db.close_old_connections()
def groups_for_user(environ, username):
"""
Authorizes a user based on groups
"""
UserModel = auth.get_user_model()
db.reset_queries()
try:
try:
user = UserModel._default_manager.get_by_natural_key(username)
except UserModel.DoesNotExist:
return []
if not user.is_active:
return []
return [force_bytes(group.name) for group in user.groups.all()]
finally:
db.close_old_connections()
| mit |
matejv/micropython-weatherstation | weatherstation.py | 1 | 4667 | from machine import I2C, Pin, Timer
import socket
import utime as time
import dht
from bmp180 import BMP180 # https://github.com/micropython-IMU/micropython-bmp180
from esp8266_i2c_lcd import I2cLcd # https://github.com/dhylands/python_lcd/
import clock, nethelper
class WeatherStation:
DHTPIN = 14 # DHT data pin
BMPSCL = 5 # BMP I2C clock pin
BMPSDA = 4 # BMP I2C data pin
DISSCL = 12 # LCD I2C clock pin
DISSDA = 13 # LCD I2C data pin
DEFAULT_LCD_ADDR = 0x27
DEFAULT_INTERVAL = 10
MEASURE_TRIES = 3
SERVER_NAME = 'graphite.example.com' # hostname of your graphite server
SERVER_PORT = 2003
def __init__(self):
self.bmp = None
self.dht = None
self.lcd = None
self.socket = None
self.online = False
self.interval = self.DEFAULT_INTERVAL
self.init_lcd()
self.init_net()
self.init_bmp()
self.init_dht()
self.init_clock()
self.init_socket()
self.timer = Timer(-1)
self.timer.init(period=self.interval*1000, mode=Timer.PERIODIC, callback=self.update)
self.update(None)
def update(self, timer):
print('update')
self.check_net()
self.update_clock()
self.measure()
self.update_lcd()
self.send_data()
def stop(self):
self.timer.deinit()
def measure(self):
print('measure')
tries = self.MEASURE_TRIES
while tries:
try:
self.dht.measure()
except:
tries -= 1
def update_lcd(self):
print('update_lcd')
if self.online:
now = time.localtime()
time_str = '%02d:%02d' % (now[3], now[4])
else:
time_str = 'noNet'
#self.lcd.clear() # this will cause flicker
self.lcd.move_to(0, 0) # better to overwrite whole display
self.lcd.putstr('T: %.1f\xdfC H: %.0f%% %s' % (
self.dht.temperature(),
self.dht.humidity(),
time_str
))
def send_data(self):
print('send_data')
if not self.socket:
print('no_socket')
return
data = 'weatherstation.temp.dht {tempdht:.1f} {ts}\nweatherstation.hum.dht {humdht:.0f} {ts}\nweatherstation.temp.bmp {tempbmp:.1f} {ts}\nweatherstation.pressure.bmp {pressurebmp:.1f} {ts}\nweatherstation.time {ts} {ts}\n'.format(
tempdht=self.dht.temperature(),
humdht=self.dht.humidity(),
tempbmp=self.bmp.temperature,
pressurebmp=self.bmp.pressure,
ts=self.clock.get_ts()
)
try:
print('writing socket')
self.socket.write(data)
print('socket write complete')
except:
print('wtite failed')
self.check_net(recheck=True)
self.init_socket()
def init_bmp(self):
bus = I2C(scl=Pin(self.BMPSCL), sda=Pin(self.BMPSDA), freq=100000)
self.bmp = BMP180(bus)
def init_dht(self):
self.dht = dht.DHT22(Pin(self.DHTPIN))
def init_lcd(self):
i2c = I2C(scl=Pin(self.DISSCL), sda=Pin(self.DISSDA), freq=400000)
self.lcd = I2cLcd(i2c, self.DEFAULT_LCD_ADDR, 2, 16)
def init_net(self):
self.net = nethelper.NetHelper()
self.net.check()
def check_net(self, recheck=False):
info = self.net.check(recheck)
if info and self.online:
return True
elif info and not self.online:
import utime
self.online = True
self.got_online()
self.lcd.clear()
self.lcd.putstr('% 16s%s' % (info[1], info[0]))
utime.sleep_ms(5000)
self.lcd.clear()
return True
elif not info and self.online:
import utime
self.online = False
self.lcd.clear()
self.lcd.putstr('Reconnecting...')
utime.sleep_ms(5000)
self.lcd.clear()
return False
elif not info and not self.online:
return False
def got_online(self):
self.init_socket()
self.init_clock()
def init_socket(self):
print('init_socket')
if self.online:
addr_info = socket.getaddrinfo(self.SERVER_NAME, self.SERVER_PORT)
addr = addr_info[0][-1]
self.socket = socket.socket()
self.socket.connect(addr)
else:
self.socket = None
def init_clock(self):
self.clock = clock.Clock()
def update_clock(self):
if self.online:
self.clock.sync()
| mit |
Tao-Ma/gpdb | src/test/tinc/tincrepo/mpp/models/mpp_tc.py | 12 | 25269 | """
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import inspect
import os
import re
import sys
import time
import tinctest
from tinctest.runner import TINCTextTestResult
from tinctest.lib.system import TINCSystem
from gppylib.commands.base import Command
from mpp.lib.datagen.databases import __databases__, TINCTestDatabase, TINCDatagenException
from mpp.lib.gplog import GpLog
from mpp.lib.gpstop import GpStop
from mpp.lib.PSQL import PSQL
import unittest2 as unittest
class MPPTestCaseException(Exception):
"""
The exception that will be thrown for any errors or failures in MPPTestCase
"""
pass
class MPPDUT(object):
"""
This class is used to find the Device Under Test.
It provides instance variables for product name and version_string.
It will only be used by MPPMetaClassType to dynamically change a class's MRO.
It also provides a product_environment dictionary to store gpopt version if found.
"""
def __init__(self, product = None, version_string = None):
# Valid products as of 11/25/13: gpdb, hawq
self.product = product
# version_string has this format: major#.minor#.service_pack#.version_number<hotfix_alphanumeral>
# It can be incomplete: 4.3 or 4.2.1
self.version_string = version_string
self.product_environment = {}
# First, get the product version
if (self.product is None) or (self.version_string is None):
self._get_product_version()
# Next, get gpopt (GP Optimizer Mode) version
gpopt_version = self._get_gpopt_version()
if gpopt_version:
self.product_environment['gpopt'] = gpopt_version
def _get_version_string_output(self):
# Version string is the output of postgres --gp-version or postgress --version
# Output in gpdb: "postgres (Greenplum Database) 4.3_PARISTX_ORCA build 43249"
# Output in hawq: "postgres (HAWQ) 4.2.0 build 1"
# Output in postgres: "postgres (PostgreSQL) 9.2.4"
# The following command will fail if the DUT is postgres
version_command = Command(name = 'get gp-version', cmdStr = 'postgres --gp-version')
try:
version_command.run(validateAfter = True)
except Exception, e:
tinctest.logger.debug("Failed while running get gp-version: %s" %e)
version_command = Command(name = 'get version', cmdStr = 'postgres --version')
version_command.run(validateAfter = True)
return version_command.get_results().stdout
def _get_product_version(self):
version_string_information = ''
try:
version_string_information = self._get_version_string_output()
except Exception, e:
tinctest.logger.exception("Failure while getting version information: %s" %e)
tinctest.logger.critical("Could not detect one of the supported products (gpdb, hawq or postgres) in your environment. Make sure your environment is set correctly.")
raise MPPTestCaseException("Could not detect one of the supported products (gpdb, hawq or postgres) in your environment. Make sure your environment is set correctly.")
match_object = re.search("\((.+)\)", version_string_information)
database_match = match_object.group(0)
if "HAWQ" in database_match:
self.product = 'hawq'
# Replace version_string_information to point to hawq-version
version_command = Command(name = 'get hawq-version', cmdStr = 'postgres --hawq-version')
version_command.run(validateAfter = True)
version_string_information = version_command.get_results().stdout
tinctest.logger.info("DUT is detected to be hawq. Version string: %s" %version_string_information)
elif "Greenplum Database" in database_match:
tinctest.logger.info("DUT is detected to be gpdb. Version string: %s" %version_string_information)
self.product = 'gpdb'
elif "PostgreSQL" in database_match:
tinctest.logger.info("DUT is detected to be postgres. Version string: %s" %version_string_information)
self.product = 'postgres'
else:
tinctest.logger.critical("Unexpected version string obtained: %s." %version_string_information)
tinctest.logger.critical("Could not detect one of the supported products (gpdb, hawq or postgres) in your environment. Make sure your environment is set correctly.")
raise MPPTestCaseException("Unexpected version string obtained: %s" %version_string_information)
# At this point, version_string_information can be extracted to get the exact version
# version_string_information for gpdb (--gp_version): "postgres (Greenplum Database) 4.3_PARISTX_ORCA build 43249"
# version_string_information for hawq (--hawq_version): "postgres (HAWQ) 1.1.4.0 build dev"
# version_string_information for postgres (--version): "postgres (PostgreSQL) 9.2.4"
version_string_information_match_list = re.findall("\)\s(.*)", version_string_information)
if version_string_information_match_list:
# Remove everything after space and underscore
version_part = re.sub(r'\s.*$', r'', version_string_information_match_list[0])
version_part = re.sub(r'_.*$', r'', version_part)
# At this point, we have a version
self.version_string = version_part
else:
tinctest.logger.critical("Unexpected version string obtained: %s." %version_string_information)
tinctest.logger.critical("Could not detect one of the supported products (gpdb, hawq or postgres) in your environment. Make sure your environment is set correctly.")
raise MPPTestCaseException("Unexpected version string obtained: %s" %version_string_information)
def _get_gpopt_version(self):
# Return gpopt_version. Return empty, if not found.
gp_opt_version = ""
try:
# The following command will fail if the DUT doesn't have optimizer
gp_opt_version_cmd_results = {}
psql_stdout = PSQL.run_sql_command("select gp_opt_version()", flags = "-t -q", results=gp_opt_version_cmd_results).strip()
if gp_opt_version_cmd_results['rc'] or gp_opt_version_cmd_results['stderr'] != "":
# received an error
return gp_opt_version
# Output is in the format of: GPOPT version: 1.241, GPOS version: 1.90, Xerces version: 3.1.1-p1
# We want 1.241 from the above
gp_opt_version = psql_stdout.split()[2].strip(",")
except Exception, e:
tinctest.logger.debug("Failed while running select gp_opt_version: %s" %e)
return gp_opt_version
def __str__(self):
return "DUT: product: %s ; version: %s" % (self.product, self.version_string)
class _MPPMetaClassType(type):
"""
MPPMetaClassType class overrides new and init methods of metaclass type.
It is used to dynamically change a class's MRO for a DUT.
It does this by iterating through the base classes and checking
if there are any product-specific hidden models of those base classes.
MPPTestCase and all of its derived classes are of type MPPMetaClassType.
Product-specific hidden models have to follow these rules:
- They have to reside in the same module as the base class.
- They have to be prefixed and suffixed with two underscores (__)
- They have to have the lower-case product name in the class name, following the prefix of __
- The product name has to be same as the one provided by DUT class.
An example of product-specific hidden model: __gpdbSQLTestCase__ in the same module as SQLTestCase for gpdb DUT.
"""
# Class variable to keep track of DUT
DUT = MPPDUT()
tinctest.logger.info(DUT)
def __new__(metaclass, clsname, bases, dct):
# Add DUT to class's built-in dictionary
dct['__product__'] = _MPPMetaClassType.DUT.product
dct['__version_string__'] = _MPPMetaClassType.DUT.version_string
dct['__product_environment__'] = _MPPMetaClassType.DUT.product_environment
dct['change_mro'] = False
dct['make_me_product_agnostic'] = classmethod(metaclass.make_me_product_agnostic)
new_bases = ()
if (clsname.startswith('__') and clsname.endswith('__')) or (clsname is 'MPPTestCase'):
# If here, our clsname is one of the product-specific hidden models or MPPTestCase
# No need to check bases
new_bases += bases
else:
# If here, we need to check each of our clsname's bases
# and see if each has product-specific class
for base in bases:
new_base_name = '__' + _MPPMetaClassType.DUT.product + base.__name__ + '__'
# Variable to track whether we found a match for the base
try:
""" Product-specific hidden models should always reside in the same module as the base class """
exec ('from ' + base.__module__ + ' import ' + new_base_name)
new_bases += (eval(new_base_name),)
except:
new_bases += (base,)
return super(_MPPMetaClassType, metaclass).__new__(metaclass, clsname, new_bases, dct)
def __init__(cls, clsname, bases, dct):
super(_MPPMetaClassType, cls).__init__(clsname, bases, dct)
@staticmethod
def make_me_product_agnostic(cls):
# Change the class variable change_mro to let mro() method know that this class needs to prepend product specific model
cls.change_mro = True
# The line below (fakingly changing the cls' bases) retriggers mro() method
cls.__bases__ = cls.__bases__ + tuple()
def mro(cls):
default_mro = super(_MPPMetaClassType, cls).mro()
if hasattr(cls, "change_mro") and cls.change_mro:
new_class_name = '__' + _MPPMetaClassType.DUT.product + cls.__name__ + '__'
try:
exec ('from ' + cls.__module__ + ' import ' + new_class_name)
new_class_object = eval(new_class_name)
default_mro.insert(0, new_class_object)
return default_mro
except:
# No hidden class defined. Nothing to do
pass
return default_mro
@tinctest.skipLoading("Test model. No tests loaded.")
class MPPTestCase(tinctest.TINCTestCase):
"""
MPPTestCase model is a top-level executor for all MPP test cases. All MPP test cases (HAWQ, GPDB, etc.)
should either directly or indirectly inherit from MPPTestCase. It inherits from TINCTestCase,
and is a parent of SQLTestCase.
When a test of this type fails, we do the following:
-> if restart_on_fatal_failure is set to True, inspect logs for errors and restart the cluster.
-> if gather_logs_on_failure is set to True, gather master and segment logs for the duration of the test case when this test case fails.
@metadata: host: Host where the MPP database resides. Defaults to localhost.
@metadata: db_name: Database where the test case will be executed. Defaults to system environment variable DBNAME.
@metadata: username: Username to use to login to the database. Defaults to system environment variable USER.
@metadata: password: Password to use to login to the database. If not given, it assumes that user has trust authentication.
@metadata: gather_logs_on_fatal_failure: Gather master and segment logs in case of a fatal failure.
@metadata: restart_on_fatal_failure: Boolean to determine if the cluster should be restarted on failure. If the metadata doesn't exist, it won't be restarted.
@undocumented: defaultTestResult
@undocumented: __metaclass__
"""
# MPPTestCase class is of type MPPMetaClassType
# MPPMetaClassType will take of reconfiguring the bases of all the derived classes that have product-specific hidden models
__metaclass__ = _MPPMetaClassType
#: Directory relative to the test module where all the output artifacts will be collected. Defaults to 'output/'
out_dir = 'output/'
#: Database name to be used for any connection to the test cluster. Defaults to None. This database will also be configured in setUpClass on MPPTestCase
db_name = None
def __init__(self, methodName, baseline_result = None):
#: boolean that determines whether or not to restart the cluster on a fatal failure. Defaults to False.
self.restart_on_fatal_failure = False
#: boolean that determines whether or not to gather logs on failure. Defaults to False
self.gather_logs_on_failure = False
super(MPPTestCase, self).__init__(methodName, baseline_result)
@classmethod
def setUpClass(cls):
"""
setUpClass of MPPTestCase does the following:
-> Create out directory for the class if it does not exist.
This is thread safe in case an MPPTestCase is used concurrently
within a ScenarioTestCase or ConcurrencyTestCase
-> Configures the database specified at the class level variable 'db_name'
"""
tinctest.logger.trace_in()
#QAINF-760 - we need to treat db_name in the class level doc string as a class level variable
#rather than an instance level variable
ds = cls.__doc__
if ds:
lines = ds.splitlines()
for line in lines:
line = line.strip()
if line.find('@db_name') != 0:
continue
line = line[1:]
if len(line.split()) <= 1:
break
(key, cls.db_name) = line.split(' ', 1)
break
super(MPPTestCase, cls).setUpClass()
if not os.path.exists(cls.get_out_dir()):
TINCSystem.make_dirs(cls.get_out_dir(), ignore_exists_error = True)
if cls.db_name:
tinctest.logger.debug("Configure database %s from MPPTestCase setUpClass." % cls.db_name)
cls.configure_database(cls.db_name)
tinctest.logger.trace_out()
@classmethod
def get_out_dir(cls):
"""
Returns the absolute output directory for this test class.
Joins cls.out_dir with the location where the test module exists.
"""
source_file = sys.modules[cls.__module__].__file__
source_dir = os.path.dirname(source_file)
abs_out_dir = os.path.join(source_dir, cls.out_dir)
return abs_out_dir
@classmethod
def get_source_dir(cls):
"""
Returns the directory at which this test class exists.
"""
source_file = sys.modules[cls.__module__].__file__
source_dir = os.path.dirname(source_file)
return source_dir
@classmethod
def configure_database(cls,db_name):
"""
Configures the given database using datagen libraries.
@param db_name: Name of the database to be configured. If there is no specific datagen available for this database,
this will just create an empty database with the given name.
@type db_name: string
"""
tinctest.logger.trace_in(db_name)
if not __databases__.has_key(db_name):
tinctest.logger.info("db_name %s is not defined in __databases__ dictionary." % db_name)
__databases__[db_name] = TINCTestDatabase(database_name=db_name)
py_mod = sys.modules[cls.__module__]
TINCTestCustomDatabase = None
for obj in inspect.getmembers(py_mod, lambda member: inspect.isclass(member)
and issubclass(member, TINCTestDatabase)):
if obj[1]._infer_metadata().get('db_name', None) == db_name:
TINCTestCustomDatabase = obj[1]
break
if TINCTestCustomDatabase:
__databases__[db_name] = TINCTestCustomDatabase(database_name=db_name)
else:
tinctest.logger.warning("No CustomDatabase class provided for %s." %db_name)
if __databases__[db_name]:
tinctest.logger.info("Running setup of database %s." % db_name)
try:
__databases__[db_name].setUp()
except Exception, exp:
# if we are here, setup failed. handle errors
# accordingly.
__databases__[db_name].tearDown()
raise TINCDatagenException(exp)
tinctest.logger.trace_out()
def setUp(self):
"""
setUp method in MPPTestCase does the following:
-> Configures the database specified through the metadat 'db_name'.
This will configure the database only if it was not already done in setUpClass.
"""
tinctest.logger.trace_in()
super(MPPTestCase, self).setUp()
# Create the database if db_name metadata is specified and if it doesn't exists
# TODO: Change TINCTestDatabase to take-in PSQL options (part of story QAINF-191)
if self.db_name and self.__class__.db_name and self.db_name == self.__class__.db_name:
tinctest.logger.debug("No need to configure database %s in setUp, since it would have already been configured via setUpClass." % self.db_name)
elif self.db_name:
tinctest.logger.debug("Configure database %s from MPPTestCase setUp." % self.db_name)
self.configure_database(self.db_name)
tinctest.logger.trace_out()
def defaultTestResult(self, stream=None, descriptions=None, verbosity=None):
"""
TODO: This method should not be exposed as a public method. All result objects
will be internal.
Return a custom result object for MPPTestCase. We need a handle on
whether the test errored out / failed to honor metadata like 'restart'
"""
if stream and descriptions and verbosity:
return _MPPTestCaseResult(stream, descriptions, verbosity)
else:
return unittest.TestResult()
def get_product_version(self):
"""
This function is used by TINCTestCase to determine the current DUT version.
It uses this information, along with @product_version, to determine if a test case
should run in this particular DUT.
@return: A two-tuple containing name and version of the product where test is executed
@rtype: (string, string)
"""
return (self.__class__.__product__, self.__class__.__version_string__)
def _infer_metadata(self):
"""
Read all the metadata and store them as instance variables.
"""
super(MPPTestCase, self)._infer_metadata()
self.host = self._metadata.get('host', 'localhost')
self.db_name = self._metadata.get('db_name', self.__class__.db_name)
self.username = self._metadata.get('username', None)
self.password = self._metadata.get('password', None)
if self._metadata.get('gather_logs_on_failure') and self._metadata.get('gather_logs_on_failure').lower() == 'true':
self.gather_logs_on_failure = True
if self._metadata.get('restart_on_fatal_failure') and self._metadata.get('restart_on_fatal_failure').lower() == 'true':
self.restart_on_fatal_failure = True
self.gpopt = self._metadata.get('gpopt', None)
if self.gpopt:
if 'gpopt' not in self.__class__.__product_environment__:
self.skip = 'Test does not apply to the deployed system. Test Case GPOPT version - %s , Deployed system has no GPOPT' % self.gpopt
elif tuple(self.gpopt.split('.')) > tuple(self.__class__.__product_environment__['gpopt'].split('.')):
self.skip = 'Test does not apply to the deployed GPOPT version. Test Case GPOPT version - %s , Deployed version - %s' % (self.gpopt, self.__class__.__product_environment__['gpopt'])
def install_cluster(self):
"""
This function will install the cluster
"""
pass
def initialize_cluster(self):
"""
This function will initialize the cluster
"""
pass
def configure_cluster(self):
"""
This function will configure the cluster
"""
pass
def inspect_cluster(self):
"""
This function will inspect the cluster from the start time of this test till now.
Returns true if there are no errors in logs, False if there are errors in logs.
@return: Returns True / False depending on whether errors were found in the log
@rtype: boolean
"""
tinctest.logger.trace_in()
start_time = self.start_time
if start_time == 0 or not start_time:
return True
end_time = self.end_time
if end_time == 0 or not end_time:
end_time = time.time()
return_status = not GpLog.check_log_for_errors(start_time, end_time)
tinctest.logger.trace_out(str(return_status))
return return_status
def gather_log(self):
"""
This method will gather logs from all segments between start_time and end_time
of the test and write it to an out file in the output directory. The default name
of the log file will be <testmethodname>.logs
"""
tinctest.logger.trace_in()
start_time = self.start_time
if start_time == 0 or not start_time:
return
end_time = self.end_time
if end_time == 0 or not end_time:
end_time = time.time()
out_file = os.path.join(self.get_out_dir(), self._testMethodName + '.logs')
GpLog.gather_log(start_time, end_time, out_file)
tinctest.logger.trace_out()
def delete_cluster(self):
"""
This function will delete the cluster
"""
pass
def start_cluster(self):
"""
This function will start the cluster
"""
pass
def stop_cluster(self):
"""
This function will stop the cluster
"""
pass
def restart_cluster(self):
"""
This function will restart the cluster
"""
pass
class _MPPTestCaseResult(TINCTextTestResult):
"""
A custom listener class for MPPTestCase. This is responsible for
reacting appropriately to failures and errors of type MPPTestCase.
Following is what this class does on failure:
-> If restart_on_fatal_failure is set for the test , inspects the logs for
fatal failure and restarts the cluster if there are any errors found.
-> If gather_logs_on_failure is set for the test, gathers segment and master
logs to the output directory.
"""
def addFailure(self, test, err):
try:
# restart the cluster if restart_on_failure is set to True and inspect cluster returns False
if test.gather_logs_on_failure:
test.gather_log()
if test.restart_on_fatal_failure:
if not test.inspect_cluster():
tinctest.logger.warning("Errors found in the logs for this test case. Restarting the cluster")
test.restart_cluster()
except Exception, e:
tinctest.logger.exception("Re-starting cluster failed - %s" %e)
super(_MPPTestCaseResult, self).addFailure(test, err)
class __gpdbMPPTestCase__(MPPTestCase):
"""
__gpdbMPPTestCase__ is a hidden class that overrides GPDB specific methods of MPPTestCase.
This class should never be used as a parent or as an executor for any test cases.
Presently, this class doesn't override any methods. It is here only for reference.
"""
pass
class __hawqMPPTestCase__(MPPTestCase):
"""
__hawqMPPTestCase__ is a hidden class that overrides HAWQ specific methods of MPPTestCase.
This class should never be used as a parent or as an executor for any test cases.
Presently, this class doesn't override any methods. It is here only for reference.
"""
pass
class __postgresMPPTestCase__(MPPTestCase):
"""
__postgresMPPTestCase__ is a hidden class that overrides postgres specific methods of MPPTestCase.
This class should never be used as a parent or as an executor for any test cases.
Presently, this class doesn't override any methods. It is here only for reference.
"""
pass
| apache-2.0 |
schmidsi/django-pyodbc | tests/order_with_respect_to/models.py | 24 | 2132 | """
Tests for the order_with_respect_to Meta attribute.
"""
from django.db import models
class Question(models.Model):
text = models.CharField(max_length=200)
class Answer(models.Model):
text = models.CharField(max_length=200)
question = models.ForeignKey(Question)
class Meta:
order_with_respect_to = 'question'
def __unicode__(self):
return unicode(self.text)
__test__ = {'API_TESTS': """
>>> q1 = Question(text="Which Beatle starts with the letter 'R'?")
>>> q1.save()
>>> q2 = Question(text="What is your name?")
>>> q2.save()
>>> Answer(text="John", question=q1).save()
>>> Answer(text="Jonno",question=q2).save()
>>> Answer(text="Paul", question=q1).save()
>>> Answer(text="Paulo", question=q2).save()
>>> Answer(text="George", question=q1).save()
>>> Answer(text="Ringo", question=q1).save()
The answers will always be ordered in the order they were inserted.
>>> q1.answer_set.all()
[<Answer: John>, <Answer: Paul>, <Answer: George>, <Answer: Ringo>]
We can retrieve the answers related to a particular object, in the order
they were created, once we have a particular object.
>>> a1 = Answer.objects.filter(question=q1)[0]
>>> a1
<Answer: John>
>>> a2 = a1.get_next_in_order()
>>> a2
<Answer: Paul>
>>> a4 = list(Answer.objects.filter(question=q1))[-1]
>>> a4
<Answer: Ringo>
>>> a4.get_previous_in_order()
<Answer: George>
Determining (and setting) the ordering for a particular item is also possible.
>>> id_list = [o.pk for o in q1.answer_set.all()]
>>> a2.question.get_answer_order() == id_list
True
>>> a5 = Answer(text="Number five", question=q1)
>>> a5.save()
It doesn't matter which answer we use to check the order, it will always be the same.
>>> a2.question.get_answer_order() == a5.question.get_answer_order()
True
The ordering can be altered:
>>> id_list = [o.pk for o in q1.answer_set.all()]
>>> x = id_list.pop()
>>> id_list.insert(-1, x)
>>> a5.question.get_answer_order() == id_list
False
>>> a5.question.set_answer_order(id_list)
>>> q1.answer_set.all()
[<Answer: John>, <Answer: Paul>, <Answer: George>, <Answer: Number five>, <Answer: Ringo>]
"""
}
| bsd-3-clause |
mjirik/teigen | tests/teigen_test.py | 1 | 6709 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import logging
logger = logging.getLogger(__name__)
import unittest
import sys
import pytest
# import teigen
# import io3d
import os.path as op
path_to_script = op.dirname(op.abspath(__file__))
class MyTestCase(unittest.TestCase):
#
@pytest.mark.interactive
def test_teigen_gui_interactive(self):
import os.path as op
params = None
# params = io3d.misc.obj_from_file(op.expanduser("~/teigen_data/038/slice_parameters.yaml"))
from PyQt5.QtWidgets import QApplication, QFileDialog
# from teigen.dictwidgetqt import DictWidget
import teigen.gui
app = QApplication(sys.argv)
cw = teigen.gui.TeigenWidget(config=params)
cw.show()
app.exec_()
@pytest.mark.interactive
def test_teigen_gui_interactive_with_parameters(self):
"""
reproduces undetected colision bug
:return:
"""
import os.path as op
params = None
# params = io3d.misc.obj_from_file(op.expanduser("~/teigen_data/038/slice_parameters.yaml"))
params = {
"generator_id": 3,
"areasampling": {
"voxelsize_mm": [1., 1., 1.],
"areasize_px": [20, 20, 20],
"areasize_mm": [20, 20, 20],
},
"postprocessing": {
"measurement_resolution": 15,
"measurement_multiplier": -1,
"add_noise": False
},
"generators": {
"Unconnected tubes": {
"element_number": 3,
"random_generator_seed": 110,
"radius_distribution_mean": 15,
"radius_distribution_maximum": 20,
"orientation_anisotropic": False,
}
}
}
# tg.update_config(**conf)
from PyQt5.QtWidgets import QApplication
# from teigen.dictwidgetqt import DictWidget
import teigen.gui
app = QApplication(sys.argv)
cw = teigen.gui.TeigenWidget(use_default_config=True, config=params)
cw.show()
app.exec_()
# def test_teigen_gui(self):
# import PyQt4
# from PyQt4.QtGui import QApplication, QFileDialog
# # from teigen.dictwidgetqt import DictWidget
# import teigen
# import teigen.geometry3d
# import teigen.gui
# app = QApplication(sys.argv)
# cw = teigen.gui.TeigenWidget()
# cw.show()
# cw.deleteLater()
# app.deleteLater()
@pytest.mark.interactive
def test_teigen_without_save(self):
import teigen.gui
tg = teigen.gui.Teigen()
conf = {
"generator_id": 3,
"areasampling": {
"voxelsize_mm": [1., 1., 1.],
"areasize_px": [110, 120, 130],
"areasize_mm": [110, 120, 130],
},
"postprocessing": {
"measurement_multiplier": -1,
"add_noise": False
},
"generators": {
"Unconnected cylinders": {
"element_number": 10
}
}
}
tg.update_config(**conf)
tg.step1()
@pytest.mark.interactive
def test_teigen_big(self):
import teigen.gui
tg = teigen.gui.Teigen()
conf = {
"areasampling": {
"voxelsize_mm": [1., 1., 1.],
"areasize_px": [210, 720, 730],
"areasize_mm": [210, 720, 730],
},
"postprocessing": {
"measurement_multiplier": -1,
"add_noise": False
},
"generators": {
"Unconnected cylinders": {
"element_number": 10
}
}
}
tg.update_config(**conf)
tg.step1()
tg.step2()
# def test_teigen_small(self):
# import teigen.gui
# tg = teigen.gui.Teigen()
# conf = {
# "areasampling":{
# "voxelsize_mm": [1., 1., 1.],
# "areasize_px": [110, 120, 130],
# "areasize_mm": [110, 120, 130],
# },
# "postprocessing":{
# "measurement_multiplier":-1,
# }
# }
# tg.update_config(**conf)
# tg.run()
# tg.save_volume()
def test_teigen_prepare_parameters_and_measurement(self):
"""
Check string like generator_id
:return:
"""
logger.debug("test prepare parameters and measurement")
import teigen.gui
tg = teigen.gui.Teigen()
tg.use_default_config()
conf = {
"generator_id": "Unconnected tubes",
"areasampling": {
"voxelsize_mm": [1., 1., 1.],
"areasize_px": [110, 120, 130],
"areasize_mm": [110, 120, 130],
},
"postprocessing": {
# "measurement_multiplier": -1,
"add_noise": False
},
"generators": {
"Unconnected tubes": {
"element_number": 1
}
}
}
tg.update_config(**conf)
tg.step1()
params = tg.get_config_and_measurement()
tg.step2()
logger.debug(params)
def test_teigen_read_tube_skeleton_from_file(self):
"""
Read tube skeleton from file
:return:
"""
logger.debug("test read tube skeleton from file")
import teigen.gui
tg = teigen.gui.Teigen()
tg.use_default_config()
conf = {
"generator_id": "Unconnected tubes",
"areasampling": {
"voxelsize_mm": [1., 1., 1.],
"areasize_px": [110, 120, 130],
"areasize_mm": [110, 120, 130],
},
"postprocessing": {
# "measurement_multiplier": -1,
"add_noise": False
},
"generators": {
"Unconnected tubes": {
"element_number": 1
}
}
}
tg.update_config(**conf)
tg.set_loglevel("DEBUG")
tg.step1_by_load_tube_skeleton(
op.join(path_to_script, "data_vt.yaml" ))
#op.join(path_to_script, "vt_biodur.yaml" ))
params = tg.get_config_and_measurement()
tg.step2()
logger.debug(params)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
40423130/2016fallcadp_hw | plugin/liquid_tags/flickr.py | 278 | 3478 | """
Flickr Tag
----------
This implements a Liquid-style flickr tag for Pelican.
IMPORTANT: You have to create a API key to access the flickr api.
You can do this `here <https://www.flickr.com/services/apps/create/apply>`_.
Add the created key to your config under FLICKR_API_KEY.
Syntax
------
{% flickr image_id [small|medium|large] ["alt text"|'alt text'] %}
Example
--------
{% flickr 18841055371 large "Fichte"}
Output
------
<a href="https://www.flickr.com/photos/marvinxsteadfast/18841055371/"><img src="https://farm6.staticflickr.com/5552/18841055371_17ac287217_b.jpg" alt="Fichte"></a>
"""
import json
import re
try:
from urllib.request import urlopen
from urllib.parse import urlencode
except ImportError:
from urllib import urlopen, urlencode
from .mdx_liquid_tags import LiquidTags
SYNTAX = '''{% flickr image_id [small|medium|large] ["alt text"|'alt text'] %}'''
PARSE_SYNTAX = re.compile(('''(?P<photo_id>\S+)'''
'''(?:\s+(?P<size>large|medium|small))?'''
'''(?:\s+(['"]{0,1})(?P<alt>.+)(\\3))?'''))
def get_info(photo_id, api_key):
''' Get photo informations from flickr api. '''
query = urlencode({
'method': 'flickr.photos.getInfo',
'api_key': api_key,
'photo_id': photo_id,
'format': 'json',
'nojsoncallback': '1'
})
r = urlopen('https://api.flickr.com/services/rest/?' + query)
info = json.loads(r.read().decode('utf-8'))
if info['stat'] == 'fail':
raise ValueError(info['message'])
return info
def source_url(farm, server, id, secret, size):
''' Url for direct jpg use. '''
if size == 'small':
img_size = 'n'
elif size == 'medium':
img_size = 'c'
elif size == 'large':
img_size = 'b'
return 'https://farm{}.staticflickr.com/{}/{}_{}_{}.jpg'.format(
farm, server, id, secret, img_size)
def generate_html(attrs, api_key):
''' Returns html code. '''
# getting flickr api data
flickr_data = get_info(attrs['photo_id'], api_key)
# if size is not defined it will use large as image size
if 'size' not in attrs.keys():
attrs['size'] = 'large'
# if no alt is defined it will use the flickr image title
if 'alt' not in attrs.keys():
attrs['alt'] = flickr_data['photo']['title']['_content']
# return final html code
return '<a href="{}"><img src="{}" alt="{}"></a>'.format(
flickr_data['photo']['urls']['url'][0]['_content'],
source_url(flickr_data['photo']['farm'],
flickr_data['photo']['server'],
attrs['photo_id'],
flickr_data['photo']['secret'],
attrs['size']),
attrs['alt'])
@LiquidTags.register('flickr')
def flickr(preprocessor, tag, markup):
# getting flickr api key out of config
api_key = preprocessor.configs.getConfig('FLICKR_API_KEY')
# parse markup and extract data
attrs = None
match = PARSE_SYNTAX.search(markup)
if match:
attrs = dict(
[(key, value.strip())
for (key, value) in match.groupdict().items() if value])
else:
raise ValueError('Error processing input. '
'Expected syntax: {}'.format(SYNTAX))
return generate_html(attrs, api_key)
# ---------------------------------------------------
# This import allows image tag to be a Pelican plugin
from liquid_tags import register
| agpl-3.0 |
euri10/pynavitia | setup.py | 1 | 1662 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
'Click>=6.0',
# TODO: put package requirements here
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='pynavitia',
version='0.1.0',
description="Python Boilerplate contains all the boilerplate you need to create a Python package.",
long_description=readme + '\n\n' + history,
author="Benoit Barthelet",
author_email='[email protected]',
url='https://github.com/euri10/pynavitia',
packages=[
'pynavitia',
],
package_dir={'pynavitia':
'pynavitia'},
entry_points={
'console_scripts': [
'pynavitia=pynavitia.cli:main'
]
},
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords='pynavitia',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
test_suite='tests',
tests_require=test_requirements
)
| mit |
lovexiaov/SandwichApp | venv/lib/python2.7/site-packages/wheel/egg2wheel.py | 471 | 2633 | #!/usr/bin/env python
import os.path
import re
import sys
import tempfile
import zipfile
import wheel.bdist_wheel
import shutil
import distutils.dist
from distutils.archive_util import make_archive
from argparse import ArgumentParser
from glob import iglob
egg_info_re = re.compile(r'''(?P<name>.+?)-(?P<ver>.+?)
(-(?P<pyver>.+?))?(-(?P<arch>.+?))?.egg''', re.VERBOSE)
def egg2wheel(egg_path, dest_dir):
egg_info = egg_info_re.match(os.path.basename(egg_path)).groupdict()
dir = tempfile.mkdtemp(suffix="_e2w")
if os.path.isfile(egg_path):
# assume we have a bdist_egg otherwise
egg = zipfile.ZipFile(egg_path)
egg.extractall(dir)
else:
# support buildout-style installed eggs directories
for pth in os.listdir(egg_path):
src = os.path.join(egg_path, pth)
if os.path.isfile(src):
shutil.copy2(src, dir)
else:
shutil.copytree(src, os.path.join(dir, pth))
dist_info = "%s-%s" % (egg_info['name'], egg_info['ver'])
abi = 'none'
pyver = egg_info['pyver'].replace('.', '')
arch = (egg_info['arch'] or 'any').replace('.', '_').replace('-', '_')
if arch != 'any':
# assume all binary eggs are for CPython
pyver = 'cp' + pyver[2:]
wheel_name = '-'.join((
dist_info,
pyver,
abi,
arch
))
bw = wheel.bdist_wheel.bdist_wheel(distutils.dist.Distribution())
bw.root_is_purelib = egg_info['arch'] is None
dist_info_dir = os.path.join(dir, '%s.dist-info' % dist_info)
bw.egg2dist(os.path.join(dir, 'EGG-INFO'),
dist_info_dir)
bw.write_wheelfile(dist_info_dir, generator='egg2wheel')
bw.write_record(dir, dist_info_dir)
filename = make_archive(os.path.join(dest_dir, wheel_name), 'zip', root_dir=dir)
os.rename(filename, filename[:-3] + 'whl')
shutil.rmtree(dir)
def main():
parser = ArgumentParser()
parser.add_argument('eggs', nargs='*', help="Eggs to convert")
parser.add_argument('--dest-dir', '-d', default=os.path.curdir,
help="Directory to store wheels (default %(default)s)")
parser.add_argument('--verbose', '-v', action='store_true')
args = parser.parse_args()
for pat in args.eggs:
for egg in iglob(pat):
if args.verbose:
sys.stdout.write("{0}... ".format(egg))
egg2wheel(egg, args.dest_dir)
if args.verbose:
sys.stdout.write("OK\n")
if __name__ == "__main__":
main()
| apache-2.0 |
jimmbraddock/ns-3.20-ATN | src/fd-net-device/bindings/modulegen_customizations.py | 128 | 1118 | import os
def post_register_types(root_module):
enabled_features = os.environ['NS3_ENABLED_FEATURES'].split(',')
if 'EmuFdNetDevice' not in enabled_features:
if 'ns3::EmuFdNetDeviceHelper'in root_module:
root_module.classes.remove(root_module['ns3::EmuFdNetDeviceHelper'])
if 'TapFdNetDevice' not in enabled_features:
if 'ns3::TapFdNetDeviceHelper'in root_module:
root_module.classes.remove(root_module['ns3::TapFdNetDeviceHelper'])
if 'PlanetLabFdNetDevice' not in enabled_features:
if 'ns3::PlanetLabFdNetDeviceHelper'in root_module:
root_module.classes.remove(root_module['ns3::PlanetLabFdNetDeviceHelper'])
if 'FdNetDevice' not in enabled_features:
for clsname in ['FdNetDevice', 'FdNetDeviceHelper', 'FdNetDeviceFdReader']:
if 'ns3::%s' % clsname in root_module:
root_module.classes.remove(root_module['ns3::%s' % clsname])
if 'ns3::FdNetDeviceHelper::EncapsulationMode' in root_module:
root_module.enums.remove(root_module['ns3::FdNetDeviceHelper::EncapsulationMode'])
| gpl-2.0 |
JimCircadian/ansible | lib/ansible/module_utils/facts/hardware/darwin.py | 96 | 3527 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector
from ansible.module_utils.facts.sysctl import get_sysctl
class DarwinHardware(Hardware):
"""
Darwin-specific subclass of Hardware. Defines memory and CPU facts:
- processor
- processor_cores
- memtotal_mb
- memfree_mb
- model
- osversion
- osrevision
"""
platform = 'Darwin'
def populate(self, collected_facts=None):
hardware_facts = {}
self.sysctl = get_sysctl(self.module, ['hw', 'machdep', 'kern'])
mac_facts = self.get_mac_facts()
cpu_facts = self.get_cpu_facts()
memory_facts = self.get_memory_facts()
hardware_facts.update(mac_facts)
hardware_facts.update(cpu_facts)
hardware_facts.update(memory_facts)
return hardware_facts
def get_system_profile(self):
rc, out, err = self.module.run_command(["/usr/sbin/system_profiler", "SPHardwareDataType"])
if rc != 0:
return dict()
system_profile = dict()
for line in out.splitlines():
if ': ' in line:
(key, value) = line.split(': ', 1)
system_profile[key.strip()] = ' '.join(value.strip().split())
return system_profile
def get_mac_facts(self):
mac_facts = {}
rc, out, err = self.module.run_command("sysctl hw.model")
if rc == 0:
mac_facts['model'] = out.splitlines()[-1].split()[1]
mac_facts['osversion'] = self.sysctl['kern.osversion']
mac_facts['osrevision'] = self.sysctl['kern.osrevision']
return mac_facts
def get_cpu_facts(self):
cpu_facts = {}
if 'machdep.cpu.brand_string' in self.sysctl: # Intel
cpu_facts['processor'] = self.sysctl['machdep.cpu.brand_string']
cpu_facts['processor_cores'] = self.sysctl['machdep.cpu.core_count']
else: # PowerPC
system_profile = self.get_system_profile()
cpu_facts['processor'] = '%s @ %s' % (system_profile['Processor Name'], system_profile['Processor Speed'])
cpu_facts['processor_cores'] = self.sysctl['hw.physicalcpu']
cpu_facts['processor_vcpus'] = self.sysctl.get('hw.logicalcpu') or self.sysctl.get('hw.ncpu') or ''
return cpu_facts
def get_memory_facts(self):
memory_facts = {}
memory_facts['memtotal_mb'] = int(self.sysctl['hw.memsize']) // 1024 // 1024
rc, out, err = self.module.run_command("sysctl hw.usermem")
if rc == 0:
memory_facts['memfree_mb'] = int(out.splitlines()[-1].split()[1]) // 1024 // 1024
return memory_facts
class DarwinHardwareCollector(HardwareCollector):
_fact_class = DarwinHardware
_platform = 'Darwin'
| gpl-3.0 |
15Dkatz/pants | src/python/pants/option/ranked_value.py | 6 | 3858 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
class RankedValue(object):
"""An option value, together with a rank inferred from its source.
Allows us to control which source wins: e.g., a command-line flag overrides an environment
variable which overrides a config, etc. For example:
Consider this config:
[compile.java]
foo: 11
And this environment variable:
PANTS_COMPILE_FOO: 22
If the command-line is
./pants compile target
we expect the value of foo in the compile.java scope to be 22, because it was explicitly
set by the user in the enclosing compile scope. I.e., the outer scope's environment value
overrides the inner scope's config value.
However if the command-line is
./pants compile.java --foo=33 target
we now expect the value of foo in the compile.java to be 33. I.e., the inner scope's flag
overrides the outer scope's environment value.
To tell these cases apart we need to know the "ranking" of the value.
"""
# The ranked value sources. Higher ranks override lower ones.
NONE = 0 # The value None.
HARDCODED = 1 # The default provided at option registration.
CONFIG_DEFAULT = 2 # The value from the DEFAULT section of the config file.
CONFIG = 3 # The value from the relevant section of the config file.
ENVIRONMENT = 4 # The value from the appropriately-named environment variable.
FLAG = 5 # The value from the appropriately-named command-line flag.
_RANK_NAMES = {
NONE: 'NONE',
HARDCODED: 'HARDCODED',
CONFIG_DEFAULT: 'CONFIG_DEFAULT',
CONFIG: 'CONFIG',
ENVIRONMENT: 'ENVIRONMENT',
FLAG: 'FLAG'
}
@classmethod
def get_rank_name(cls, rank):
"""Returns the string name for the given rank integer.
:param int rank: the integer rank constant (E.g., RankedValue.HARDCODED).
:returns: the string name of the rank.
:rtype: string
"""
return cls._RANK_NAMES.get(rank, 'UNKNOWN')
@classmethod
def get_rank_value(cls, name):
"""Returns the integer constant value for the given rank name.
:param string rank: the string rank name (E.g., 'HARDCODED').
:returns: the integer constant value of the rank.
:rtype: int
"""
if name in cls._RANK_NAMES.values():
return getattr(cls, name, None)
return None
@classmethod
def get_names(cls):
"""Returns the list of rank names.
:returns: the rank names as a list (I.e., ['NONE', 'HARDCODED', 'CONFIG', ...])
:rtype: list
"""
return sorted(cls._RANK_NAMES.values(), key=cls.get_rank_value)
@classmethod
def prioritized_iter(cls, flag_val, env_val, config_val, config_default_val, hardcoded_val, default):
"""Yield the non-None values from highest-ranked to lowest, wrapped in RankedValue instances."""
if flag_val is not None:
yield RankedValue(cls.FLAG, flag_val)
if env_val is not None:
yield RankedValue(cls.ENVIRONMENT, env_val)
if config_val is not None:
yield RankedValue(cls.CONFIG, config_val)
if config_default_val is not None:
yield RankedValue(cls.CONFIG_DEFAULT, config_default_val)
if hardcoded_val is not None:
yield RankedValue(cls.HARDCODED, hardcoded_val)
yield RankedValue(cls.NONE, default)
def __init__(self, rank, value):
self._rank = rank
self._value = value
@property
def rank(self):
return self._rank
@property
def value(self):
return self._value
def __eq__(self, other):
return self._rank == other._rank and self._value == other._value
def __repr__(self):
return '({0}, {1})'.format(self.get_rank_name(self._rank), self._value)
| apache-2.0 |
gangeli/NaturalLI | etc/aristo/turk_51_view-turker.py | 1 | 2410 | #!/usr/bin/env python
#
import argparse
import sys
import os
import csv
import time
import math
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + '/../../lib/')
from naturalli import *
"""
Parse the command line arguments
"""
def parseargs():
parser = argparse.ArgumentParser(description=
'Convert the output of a turk task into a set of training examples for the classifier.')
parser.add_argument('--count', metavar='count', default=8,
type=int,
help='The number of queries per HIT. This should match the count in run-queries.py')
parser.add_argument('--turker', metavar='turker', required=True,
help='The turker to review.')
parser.add_argument('file', metavar='files', nargs='*',
help='The turker files to grep through.')
return parser.parse_args()
def writeLine(truth, premise, conclusion):
print("%d\t%s\t%s\t%s" % (0, 'True' if truth else 'False', premise, conclusion))
"""
Process a given turk file, writing an output in the Classifier data format.
@param f The file to process
@param count The number of queries in a HIT. This should match the value
used when creating the HIT.
@param negativesForPremise A map from a premise to negative hypothesis from
that premise.
@param workerStats An object for collecting statistics on a worker.
"""
def process(f, count, turker):
reader = csv.DictReader(f, delimiter=',', quotechar='"')
for row in reader:
if row['WorkerId'] == turker:
for i in range(count):
premise = row['Input.premise_' + str(i)]
conclusion = row['Input.hypothesis_' + str(i)]
answer = row['Answer.ex' + str(i)]
if answer == 'Forward':
print("%s\n => %s" % (premise, conclusion))
elif answer == 'Backward':
print("%s\n <=> %s" % (premise, conclusion))
elif answer == 'Both':
print("%s\n <= %s" % (premise, conclusion))
elif answer == 'Neither':
print("%s\n </> %s" % (premise, conclusion))
else:
raise Exception("Unknown answer: %s" % answer)
"""
The entry point of the program.
"""
if __name__ == "__main__":
# Parse the arguments
opts = parseargs()
for filename in opts.file:
with open(filename, 'r') as f:
process(f, opts.count, opts.turker)
| mit |
rednach/krill | test/shinken_modules.py | 13 | 9693 | #!/usr/bin/env python
import os
import re
import copy
import time
import subprocess
import shutil
import datetime # not used but "sub-"imported by livestatus test.. (to be corrected..)
import sys # not here used but "sub-"imported by livestatus test.. (to be corrected..)
#
from shinken.modulesctx import modulesctx
from shinken.objects.module import Module
from shinken.modulesmanager import ModulesManager
from shinken.misc.datamanager import datamgr
from shinken.log import logger
#
from shinken_test import (
modules_dir,
ShinkenTest,
time_hacker, # not used here but "sub"-imported by lvestatus test (to be corrected)
)
modulesctx.set_modulesdir(modules_dir)
# Special Livestatus module opening since the module rename
#from shinken.modules.livestatus import module as livestatus_broker
livestatus_broker = modulesctx.get_module('livestatus')
LiveStatus_broker = livestatus_broker.LiveStatus_broker
LiveStatus = livestatus_broker.LiveStatus
LiveStatusRegenerator = livestatus_broker.LiveStatusRegenerator
LiveStatusQueryCache = livestatus_broker.LiveStatusQueryCache
LiveStatusClientThread = livestatus_broker.LiveStatusClientThread
Logline = livestatus_broker.Logline
LiveStatusLogStoreMongoDB = modulesctx.get_module('logstore-mongodb').LiveStatusLogStoreMongoDB
LiveStatusLogStoreSqlite = modulesctx.get_module('logstore-sqlite').LiveStatusLogStoreSqlite
livestatus_modconf = Module()
livestatus_modconf.module_name = "livestatus"
livestatus_modconf.module_type = livestatus_broker.properties['type']
livestatus_modconf.properties = livestatus_broker.properties.copy()
class ShinkenModulesTest(ShinkenTest):
def do_load_modules(self):
self.modules_manager.load_and_init()
self.log.log("I correctly loaded the modules: [%s]" % (','.join([inst.get_name() for inst in self.modules_manager.instances])))
def update_broker(self, dodeepcopy=False):
# The brok should be manage in the good order
ids = self.sched.brokers['Default-Broker']['broks'].keys()
ids.sort()
for brok_id in ids:
brok = self.sched.brokers['Default-Broker']['broks'][brok_id]
#print "Managing a brok type", brok.type, "of id", brok_id
#if brok.type == 'update_service_status':
# print "Problem?", brok.data['is_problem']
if dodeepcopy:
brok = copy.deepcopy(brok)
brok.prepare()
self.livestatus_broker.manage_brok(brok)
self.sched.brokers['Default-Broker']['broks'] = {}
def init_livestatus(self, modconf=None, dbmodconf=None, needcache=False):
self.livelogs = 'tmp/livelogs.db' + self.testid
if modconf is None:
modconf = Module({'module_name': 'LiveStatus',
'module_type': 'livestatus',
'port': str(50000 + os.getpid()),
'pnp_path': 'tmp/pnp4nagios_test' + self.testid,
'host': '127.0.0.1',
'socket': 'live',
'name': 'test', #?
})
if dbmodconf is None:
dbmodconf = Module({'module_name': 'LogStore',
'module_type': 'logstore_sqlite',
'use_aggressive_sql': "0",
'database_file': self.livelogs,
'archive_path': os.path.join(os.path.dirname(self.livelogs), 'archives'),
})
modconf.modules = [dbmodconf]
self.livestatus_broker = LiveStatus_broker(modconf)
self.livestatus_broker.create_queues()
#--- livestatus_broker.main
self.livestatus_broker.log = logger
# this seems to damage the logger so that the scheduler can't use it
#self.livestatus_broker.log.load_obj(self.livestatus_broker)
self.livestatus_broker.debug_output = []
self.livestatus_broker.modules_manager = ModulesManager('livestatus', modules_dir, [])
self.livestatus_broker.modules_manager.set_modules(self.livestatus_broker.modules)
# We can now output some previouly silented debug ouput
self.livestatus_broker.do_load_modules()
for inst in self.livestatus_broker.modules_manager.instances:
if inst.properties["type"].startswith('logstore'):
f = getattr(inst, 'load', None)
if f and callable(f):
f(self.livestatus_broker) # !!! NOT self here !!!!
break
for s in self.livestatus_broker.debug_output:
print "errors during load", s
del self.livestatus_broker.debug_output
self.livestatus_broker.rg = LiveStatusRegenerator()
self.livestatus_broker.datamgr = datamgr
datamgr.load(self.livestatus_broker.rg)
self.livestatus_broker.query_cache = LiveStatusQueryCache()
if not needcache:
self.livestatus_broker.query_cache.disable()
self.livestatus_broker.rg.register_cache(self.livestatus_broker.query_cache)
#--- livestatus_broker.main
self.livestatus_broker.init()
self.livestatus_broker.db = self.livestatus_broker.modules_manager.instances[0]
self.livestatus_broker.livestatus = LiveStatus(self.livestatus_broker.datamgr, self.livestatus_broker.query_cache, self.livestatus_broker.db, self.livestatus_broker.pnp_path, self.livestatus_broker.from_q)
#--- livestatus_broker.do_main
self.livestatus_broker.db.open()
if hasattr(self.livestatus_broker.db, 'prepare_log_db_table'):
self.livestatus_broker.db.prepare_log_db_table()
#--- livestatus_broker.do_main
class TestConfig(ShinkenModulesTest):
def tearDown(self):
self.livestatus_broker.db.close()
if os.path.exists(self.livelogs):
os.remove(self.livelogs)
if os.path.exists(self.livelogs + "-journal"):
os.remove(self.livelogs + "-journal")
if os.path.exists(self.livestatus_broker.pnp_path):
shutil.rmtree(self.livestatus_broker.pnp_path)
if os.path.exists('var/shinken.log'):
os.remove('var/shinken.log')
if os.path.exists('var/retention.dat'):
os.remove('var/retention.dat')
if os.path.exists('var/status.dat'):
os.remove('var/status.dat')
self.livestatus_broker = None
def contains_line(self, text, pattern):
regex = re.compile(pattern)
for line in text.splitlines():
if re.search(regex, line):
return True
return False
def update_broker(self, dodeepcopy=False):
# The brok should be manage in the good order
ids = self.sched.brokers['Default-Broker']['broks'].keys()
ids.sort()
for brok_id in ids:
brok = self.sched.brokers['Default-Broker']['broks'][brok_id]
#print "Managing a brok type", brok.type, "of id", brok_id
#if brok.type == 'update_service_status':
# print "Problem?", brok.data['is_problem']
if dodeepcopy:
brok = copy.deepcopy(brok)
brok.prepare()
self.livestatus_broker.manage_brok(brok)
self.sched.brokers['Default-Broker']['broks'] = {}
def lines_equal(self, text1, text2):
# gets two multiline strings and compares the contents
# lifestatus output may not be in alphabetical order, so this
# function is used to compare unordered output with unordered
# expected output
# sometimes mklivestatus returns 0 or 1 on an empty result
text1 = text1.replace("200 1", "200 0")
text2 = text2.replace("200 1", "200 0")
text1 = text1.rstrip()
text2 = text2.rstrip()
#print "text1 //%s//" % text1
#print "text2 //%s//" % text2
sorted1 = "\n".join(sorted(text1.split("\n")))
sorted2 = "\n".join(sorted(text2.split("\n")))
len1 = len(text1.split("\n"))
len2 = len(text2.split("\n"))
#print "%s == %s text cmp %s" % (len1, len2, sorted1 == sorted2)
#print "text1 //%s//" % sorted(text1.split("\n"))
#print "text2 //%s//" % sorted(text2.split("\n"))
if sorted1 == sorted2 and len1 == len2:
return True
else:
# Maybe list members are different
# allhosts;test_host_0;test_ok_0;servicegroup_02,servicegroup_01,ok
# allhosts;test_host_0;test_ok_0;servicegroup_02,ok,servicegroup_01
# break it up to
# [['allhosts'], ['test_host_0'], ['test_ok_0'],
# ['ok', 'servicegroup_01', 'servicegroup_02']]
[line for line in sorted(text1.split("\n"))]
data1 = [[sorted(c.split(',')) for c in columns] for columns in [line.split(';') for line in sorted(text1.split("\n")) if line]]
data2 = [[sorted(c.split(',')) for c in columns] for columns in [line.split(';') for line in sorted(text2.split("\n")) if line]]
#print "text1 //%s//" % data1
#print "text2 //%s//" % data2
# cmp is clever enough to handle nested arrays
return cmp(data1, data2) == 0
def show_broks(self, title):
print
print "--- ", title
for brok in sorted(self.sched.broks.values(), lambda x, y: x.id - y.id):
if re.compile('^service_').match(brok.type):
pass
#print "BROK:", brok.type
#print "BROK ", brok.data['in_checking']
self.update_broker()
request = 'GET services\nColumns: service_description is_executing\n'
response, keepalive = self.livestatus_broker.livestatus.handle_request(request)
print response
| agpl-3.0 |
eLBati/stock-logistics-workflow | __unported__/stock_cancel/stock.py | 7 | 3434 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2012 Andrea Cometa All Rights Reserved.
# www.andreacometa.it
# [email protected]
# Copyright (C) 2013 Agile Business Group sagl (<http://www.agilebg.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm
from openerp.tools.translate import _
import netsvc
class stock_picking(orm.Model):
_inherit = 'stock.picking'
def has_valuation_moves(self, cr, uid, move):
return self.pool.get('account.move').search(cr, uid, [
('ref', '=', move.picking_id.name),
])
def action_revert_done(self, cr, uid, ids, context=None):
if not len(ids):
return False
for picking in self.browse(cr, uid, ids, context):
for line in picking.move_lines:
if self.has_valuation_moves(cr, uid, line):
raise orm.except_orm(
_('Error'),
_('Line %s has valuation moves (%s). \
Remove them first') % (line.name,
line.picking_id.name))
line.write({'state': 'draft'})
self.write(cr, uid, [picking.id], {'state': 'draft'})
if picking.invoice_state == 'invoiced' and not picking.invoice_id:
self.write(cr, uid, [picking.id],
{'invoice_state': '2binvoiced'})
wf_service = netsvc.LocalService("workflow")
# Deleting the existing instance of workflow
wf_service.trg_delete(uid, 'stock.picking', picking.id, cr)
wf_service.trg_create(uid, 'stock.picking', picking.id, cr)
for (id, name) in self.name_get(cr, uid, ids):
message = _(
"The stock picking '%s' has been set in draft state."
) % (name,)
self.log(cr, uid, id, message)
return True
class stock_picking_out(orm.Model):
_inherit = 'stock.picking.out'
def action_revert_done(self, cr, uid, ids, context=None):
# override in order to redirect to stock.picking object
return self.pool.get('stock.picking').action_revert_done(
cr, uid, ids, context=context)
class stock_picking_in(orm.Model):
_inherit = 'stock.picking.in'
def action_revert_done(self, cr, uid, ids, context=None):
# override in order to redirect to stock.picking object
return self.pool.get('stock.picking').action_revert_done(
cr, uid, ids, context=context)
| agpl-3.0 |
402231466/40223146 | pybean.py | 562 | 8617 | #coding: utf-8
import sqlite3
from pkg_resources import parse_version
__version__ = "0.2.1"
__author__ = "Mickael Desfrenes"
__email__ = "[email protected]"
# Yen 2013.04.08, 將 Python2 的 .next() 改為 next(), 以便在 Python 3 中使用
class SQLiteWriter(object):
"""
In frozen mode (the default), the writer will not alter db schema.
Just add frozen=False to enable column creation (or just add False
as second parameter):
query_writer = SQLiteWriter(":memory:", False)
"""
def __init__(self, db_path=":memory:", frozen=True):
self.db = sqlite3.connect(db_path)
self.db.isolation_level = None
self.db.row_factory = sqlite3.Row
self.frozen = frozen
self.cursor = self.db.cursor()
self.cursor.execute("PRAGMA foreign_keys=ON;")
self.cursor.execute('PRAGMA encoding = "UTF-8";')
self.cursor.execute('BEGIN;')
def __del__(self):
self.db.close()
def replace(self, bean):
keys = []
values = []
write_operation = "replace"
if "id" not in bean.__dict__:
write_operation = "insert"
keys.append("id")
values.append(None)
self.__create_table(bean.__class__.__name__)
columns = self.__get_columns(bean.__class__.__name__)
for key in bean.__dict__:
keys.append(key)
if key not in columns:
self.__create_column(bean.__class__.__name__, key,
type(bean.__dict__[key]))
values.append(bean.__dict__[key])
sql = write_operation + " into " + bean.__class__.__name__ + "("
sql += ",".join(keys) + ") values ("
sql += ",".join(["?" for i in keys]) + ")"
self.cursor.execute(sql, values)
if write_operation == "insert":
bean.id = self.cursor.lastrowid
return bean.id
def __create_column(self, table, column, sqltype):
if self.frozen:
return
if sqltype in [float, int, bool]:
sqltype = "NUMERIC"
else:
sqltype = "TEXT"
sql = "alter table " + table + " add " + column + " " + sqltype
self.cursor.execute(sql)
def __get_columns(self, table):
columns = []
if self.frozen:
return columns
self.cursor.execute("PRAGMA table_info(" + table + ")")
for row in self.cursor:
columns.append(row["name"])
return columns
def __create_table(self, table):
if self.frozen:
return
sql = "create table if not exists " + table + "(id INTEGER PRIMARY KEY AUTOINCREMENT)"
self.cursor.execute(sql)
def get_rows(self, table_name, sql = "1", replace = None):
if replace is None : replace = []
self.__create_table(table_name)
sql = "SELECT * FROM " + table_name + " WHERE " + sql
try:
self.cursor.execute(sql, replace)
for row in self.cursor:
yield row
except sqlite3.OperationalError:
return
def get_count(self, table_name, sql="1", replace = None):
if replace is None : replace = []
self.__create_table(table_name)
sql = "SELECT count(*) AS cnt FROM " + table_name + " WHERE " + sql
try:
self.cursor.execute(sql, replace)
except sqlite3.OperationalError:
return 0
for row in self.cursor:
return row["cnt"]
def delete(self, bean):
self.__create_table(bean.__class__.__name__)
sql = "delete from " + bean.__class__.__name__ + " where id=?"
self.cursor.execute(sql,[bean.id])
def link(self, bean_a, bean_b):
self.replace(bean_a)
self.replace(bean_b)
table_a = bean_a.__class__.__name__
table_b = bean_b.__class__.__name__
assoc_table = self.__create_assoc_table(table_a, table_b)
sql = "replace into " + assoc_table + "(" + table_a + "_id," + table_b
sql += "_id) values(?,?)"
self.cursor.execute(sql,
[bean_a.id, bean_b.id])
def unlink(self, bean_a, bean_b):
table_a = bean_a.__class__.__name__
table_b = bean_b.__class__.__name__
assoc_table = self.__create_assoc_table(table_a, table_b)
sql = "delete from " + assoc_table + " where " + table_a
sql += "_id=? and " + table_b + "_id=?"
self.cursor.execute(sql,
[bean_a.id, bean_b.id])
def get_linked_rows(self, bean, table_name):
bean_table = bean.__class__.__name__
assoc_table = self.__create_assoc_table(bean_table, table_name)
sql = "select t.* from " + table_name + " t inner join " + assoc_table
sql += " a on a." + table_name + "_id = t.id where a."
sql += bean_table + "_id=?"
self.cursor.execute(sql,[bean.id])
for row in self.cursor:
yield row
def __create_assoc_table(self, table_a, table_b):
assoc_table = "_".join(sorted([table_a, table_b]))
if not self.frozen:
sql = "create table if not exists " + assoc_table + "("
sql+= table_a + "_id NOT NULL REFERENCES " + table_a + "(id) ON DELETE cascade,"
sql+= table_b + "_id NOT NULL REFERENCES " + table_b + "(id) ON DELETE cascade,"
sql+= " PRIMARY KEY (" + table_a + "_id," + table_b + "_id));"
self.cursor.execute(sql)
# no real support for foreign keys until sqlite3 v3.6.19
# so here's the hack
if cmp(parse_version(sqlite3.sqlite_version),parse_version("3.6.19")) < 0:
sql = "create trigger if not exists fk_" + table_a + "_" + assoc_table
sql+= " before delete on " + table_a
sql+= " for each row begin delete from " + assoc_table + " where " + table_a + "_id = OLD.id;end;"
self.cursor.execute(sql)
sql = "create trigger if not exists fk_" + table_b + "_" + assoc_table
sql+= " before delete on " + table_b
sql+= " for each row begin delete from " + assoc_table + " where " + table_b + "_id = OLD.id;end;"
self.cursor.execute(sql)
return assoc_table
def delete_all(self, table_name, sql = "1", replace = None):
if replace is None : replace = []
self.__create_table(table_name)
sql = "DELETE FROM " + table_name + " WHERE " + sql
try:
self.cursor.execute(sql, replace)
return True
except sqlite3.OperationalError:
return False
def commit(self):
self.db.commit()
class Store(object):
"""
A SQL writer should be passed to the constructor:
beans_save = Store(SQLiteWriter(":memory"), frozen=False)
"""
def __init__(self, SQLWriter):
self.writer = SQLWriter
def new(self, table_name):
new_object = type(table_name,(object,),{})()
return new_object
def save(self, bean):
self.writer.replace(bean)
def load(self, table_name, id):
for row in self.writer.get_rows(table_name, "id=?", [id]):
return self.row_to_object(table_name, row)
def count(self, table_name, sql = "1", replace=None):
return self.writer.get_count(table_name, sql, replace if replace is not None else [])
def find(self, table_name, sql = "1", replace=None):
for row in self.writer.get_rows(table_name, sql, replace if replace is not None else []):
yield self.row_to_object(table_name, row)
def find_one(self, table_name, sql = "1", replace=None):
try:
return next(self.find(table_name, sql, replace))
except StopIteration:
return None
def delete(self, bean):
self.writer.delete(bean)
def link(self, bean_a, bean_b):
self.writer.link(bean_a, bean_b)
def unlink(self, bean_a, bean_b):
self.writer.unlink(bean_a, bean_b)
def get_linked(self, bean, table_name):
for row in self.writer.get_linked_rows(bean, table_name):
yield self.row_to_object(table_name, row)
def delete_all(self, table_name, sql = "1", replace=None):
return self.writer.delete_all(table_name, sql, replace if replace is not None else [])
def row_to_object(self, table_name, row):
new_object = type(table_name,(object,),{})()
for key in row.keys():
new_object.__dict__[key] = row[key]
return new_object
def commit(self):
self.writer.commit()
| gpl-3.0 |
Johnzero/OE7 | openerp/addons-modules/event_sale/event_sale.py | 30 | 4724 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class product(osv.osv):
_inherit = 'product.product'
_columns = {
'event_ok': fields.boolean('Event Subscription', help='Determine if a product needs to create automatically an event registration at the confirmation of a sales order line.'),
'event_type_id': fields.many2one('event.type', 'Type of Event', help='Select event types so when we use this product in sales order lines, it will filter events of this type only.'),
}
def onchange_event_ok(self, cr, uid, ids, event_ok, context=None):
return {'value': {'type': event_ok and 'service' or False}}
product()
class sale_order_line(osv.osv):
_inherit = 'sale.order.line'
_columns = {
'event_id': fields.many2one('event.event', 'Event', help="Choose an event and it will automatically create a registration for this event."),
#those 2 fields are used for dynamic domains and filled by onchange
'event_type_id': fields.related('product_id','event_type_id', type='many2one', relation="event.type", string="Event Type"),
'event_ok': fields.related('product_id', 'event_ok', string='event_ok', type='boolean'),
}
def product_id_change(self, cr, uid, ids,
pricelist,
product, qty=0,
uom=False,
qty_uos=0,
uos=False,
name='',
partner_id=False,
lang=False,
update_tax=True,
date_order=False,
packaging=False,
fiscal_position=False,
flag=False, context=None):
"""
check product if event type
"""
res = super(sale_order_line,self).product_id_change(cr, uid, ids, pricelist, product, qty=qty, uom=uom, qty_uos=qty_uos, uos=uos, name=name, partner_id=partner_id, lang=lang, update_tax=update_tax, date_order=date_order, packaging=packaging, fiscal_position=fiscal_position, flag=flag, context=context)
if product:
product_res = self.pool.get('product.product').browse(cr, uid, product, context=context)
if product_res.event_ok:
res['value'].update({'event_type_id': product_res.event_type_id.id, 'event_ok':product_res.event_ok})
return res
def button_confirm(self, cr, uid, ids, context=None):
'''
create registration with sales order
'''
registration_obj = self.pool.get('event.registration')
sale_obj = self.pool.get('sale.order')
for order_line in self.browse(cr, uid, ids, context=context):
if order_line.event_id.id:
dic = {
'name': order_line.order_id.partner_invoice_id.name,
'partner_id': order_line.order_id.partner_id.id,
'nb_register': int(order_line.product_uom_qty),
'email': order_line.order_id.partner_id.email,
'phone': order_line.order_id.partner_id.phone,
'origin': order_line.order_id.name,
'event_id': order_line.event_id.id,
}
registration_id = registration_obj.create(cr, uid, dic, context=context)
message = _("The registration %s has been created from the Sales Order %s.") % (registration_id, order_line.order_id.name)
registration_obj.message_post(cr, uid, [registration_id], body=message, context=context)
return super(sale_order_line, self).button_confirm(cr, uid, ids, context=context)
| agpl-3.0 |
laperry1/android_external_chromium_org | build/android/pylib/base/test_dispatcher.py | 32 | 14970 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Dispatches tests, either sharding or replicating them.
Performs the following steps:
* Create a test collection factory, using the given tests
- If sharding: test collection factory returns the same shared test collection
to all test runners
- If replciating: test collection factory returns a unique test collection to
each test runner, with the same set of tests in each.
* Create a test runner for each device.
* Run each test runner in its own thread, grabbing tests from the test
collection until there are no tests left.
"""
import logging
import threading
from pylib import android_commands
from pylib import constants
from pylib.base import base_test_result
from pylib.device import device_errors
from pylib.utils import reraiser_thread
from pylib.utils import watchdog_timer
DEFAULT_TIMEOUT = 7 * 60 # seven minutes
class _ThreadSafeCounter(object):
"""A threadsafe counter."""
def __init__(self):
self._lock = threading.Lock()
self._value = 0
def GetAndIncrement(self):
"""Get the current value and increment it atomically.
Returns:
The value before incrementing.
"""
with self._lock:
pre_increment = self._value
self._value += 1
return pre_increment
class _Test(object):
"""Holds a test with additional metadata."""
def __init__(self, test, tries=0):
"""Initializes the _Test object.
Args:
test: The test.
tries: Number of tries so far.
"""
self.test = test
self.tries = tries
class _TestCollection(object):
"""A threadsafe collection of tests.
Args:
tests: List of tests to put in the collection.
"""
def __init__(self, tests=None):
if not tests:
tests = []
self._lock = threading.Lock()
self._tests = []
self._tests_in_progress = 0
# Used to signal that an item is available or all items have been handled.
self._item_available_or_all_done = threading.Event()
for t in tests:
self.add(t)
def _pop(self):
"""Pop a test from the collection.
Waits until a test is available or all tests have been handled.
Returns:
A test or None if all tests have been handled.
"""
while True:
# Wait for a test to be available or all tests to have been handled.
self._item_available_or_all_done.wait()
with self._lock:
# Check which of the two conditions triggered the signal.
if self._tests_in_progress == 0:
return None
try:
return self._tests.pop(0)
except IndexError:
# Another thread beat us to the available test, wait again.
self._item_available_or_all_done.clear()
def add(self, test):
"""Add an test to the collection.
Args:
test: A test to add.
"""
with self._lock:
self._tests.append(test)
self._item_available_or_all_done.set()
self._tests_in_progress += 1
def test_completed(self):
"""Indicate that a test has been fully handled."""
with self._lock:
self._tests_in_progress -= 1
if self._tests_in_progress == 0:
# All tests have been handled, signal all waiting threads.
self._item_available_or_all_done.set()
def __iter__(self):
"""Iterate through tests in the collection until all have been handled."""
while True:
r = self._pop()
if r is None:
break
yield r
def __len__(self):
"""Return the number of tests currently in the collection."""
return len(self._tests)
def test_names(self):
"""Return a list of the names of the tests currently in the collection."""
with self._lock:
return list(t.test for t in self._tests)
def _RunTestsFromQueue(runner, test_collection, out_results, watcher,
num_retries, tag_results_with_device=False):
"""Runs tests from the test_collection until empty using the given runner.
Adds TestRunResults objects to the out_results list and may add tests to the
out_retry list.
Args:
runner: A TestRunner object used to run the tests.
test_collection: A _TestCollection from which to get _Test objects to run.
out_results: A list to add TestRunResults to.
watcher: A watchdog_timer.WatchdogTimer object, used as a shared timeout.
num_retries: Number of retries for a test.
tag_results_with_device: If True, appends the name of the device on which
the test was run to the test name. Used when replicating to identify
which device ran each copy of the test, and to ensure each copy of the
test is recorded separately.
"""
def TagTestRunResults(test_run_results):
"""Tags all results with the last 4 digits of the device id.
Used when replicating tests to distinguish the same tests run on different
devices. We use a set to store test results, so the hash (generated from
name and tag) must be unique to be considered different results.
"""
new_test_run_results = base_test_result.TestRunResults()
for test_result in test_run_results.GetAll():
test_result.SetName('%s_%s' % (runner.device_serial[-4:],
test_result.GetName()))
new_test_run_results.AddResult(test_result)
return new_test_run_results
for test in test_collection:
watcher.Reset()
try:
if runner.device_serial not in android_commands.GetAttachedDevices():
# Device is unresponsive, stop handling tests on this device.
msg = 'Device %s is unresponsive.' % runner.device_serial
logging.warning(msg)
raise device_errors.DeviceUnreachableError(msg)
result, retry = runner.RunTest(test.test)
if tag_results_with_device:
result = TagTestRunResults(result)
test.tries += 1
if retry and test.tries <= num_retries:
# Retry non-passing results, only record passing results.
pass_results = base_test_result.TestRunResults()
pass_results.AddResults(result.GetPass())
out_results.append(pass_results)
logging.warning('Will retry test, try #%s.' % test.tries)
test_collection.add(_Test(test=retry, tries=test.tries))
else:
# All tests passed or retry limit reached. Either way, record results.
out_results.append(result)
except:
# An unhandleable exception, ensure tests get run by another device and
# reraise this exception on the main thread.
test_collection.add(test)
raise
finally:
# Retries count as separate tasks so always mark the popped test as done.
test_collection.test_completed()
def _SetUp(runner_factory, device, out_runners, threadsafe_counter):
"""Creates a test runner for each device and calls SetUp() in parallel.
Note: if a device is unresponsive the corresponding TestRunner will not be
added to out_runners.
Args:
runner_factory: Callable that takes a device and index and returns a
TestRunner object.
device: The device serial number to set up.
out_runners: List to add the successfully set up TestRunner object.
threadsafe_counter: A _ThreadSafeCounter object used to get shard indices.
"""
try:
index = threadsafe_counter.GetAndIncrement()
logging.warning('Creating shard %s for device %s.', index, device)
runner = runner_factory(device, index)
runner.SetUp()
out_runners.append(runner)
except (device_errors.DeviceUnreachableError,
# TODO(jbudorick) Remove this once the underlying implementations
# for the above are switched or wrapped.
android_commands.errors.DeviceUnresponsiveError) as e:
logging.warning('Failed to create shard for %s: [%s]', device, e)
def _RunAllTests(runners, test_collection_factory, num_retries, timeout=None,
tag_results_with_device=False):
"""Run all tests using the given TestRunners.
Args:
runners: A list of TestRunner objects.
test_collection_factory: A callable to generate a _TestCollection object for
each test runner.
num_retries: Number of retries for a test.
timeout: Watchdog timeout in seconds.
tag_results_with_device: If True, appends the name of the device on which
the test was run to the test name. Used when replicating to identify
which device ran each copy of the test, and to ensure each copy of the
test is recorded separately.
Returns:
A tuple of (TestRunResults object, exit code)
"""
logging.warning('Running tests with %s test runners.' % (len(runners)))
results = []
exit_code = 0
run_results = base_test_result.TestRunResults()
watcher = watchdog_timer.WatchdogTimer(timeout)
test_collections = [test_collection_factory() for _ in runners]
threads = [
reraiser_thread.ReraiserThread(
_RunTestsFromQueue,
[r, tc, results, watcher, num_retries, tag_results_with_device],
name=r.device_serial[-4:])
for r, tc in zip(runners, test_collections)]
workers = reraiser_thread.ReraiserThreadGroup(threads)
workers.StartAll()
# Catch DeviceUnreachableErrors and set a warning exit code
try:
workers.JoinAll(watcher)
except (device_errors.DeviceUnreachableError,
# TODO(jbudorick) Remove this once the underlying implementations
# for the above are switched or wrapped.
android_commands.errors.DeviceUnresponsiveError) as e:
logging.error(e)
exit_code = constants.WARNING_EXIT_CODE
if not all((len(tc) == 0 for tc in test_collections)):
logging.error('Only ran %d tests (all devices are likely offline).' %
len(results))
for tc in test_collections:
run_results.AddResults(base_test_result.BaseTestResult(
t, base_test_result.ResultType.UNKNOWN) for t in tc.test_names())
for r in results:
run_results.AddTestRunResults(r)
if not run_results.DidRunPass():
exit_code = constants.ERROR_EXIT_CODE
return (run_results, exit_code)
def _CreateRunners(runner_factory, devices, timeout=None):
"""Creates a test runner for each device and calls SetUp() in parallel.
Note: if a device is unresponsive the corresponding TestRunner will not be
included in the returned list.
Args:
runner_factory: Callable that takes a device and index and returns a
TestRunner object.
devices: List of device serial numbers as strings.
timeout: Watchdog timeout in seconds, defaults to the default timeout.
Returns:
A list of TestRunner objects.
"""
logging.warning('Creating %s test runners.' % len(devices))
runners = []
counter = _ThreadSafeCounter()
threads = reraiser_thread.ReraiserThreadGroup(
[reraiser_thread.ReraiserThread(_SetUp,
[runner_factory, d, runners, counter],
name=d[-4:])
for d in devices])
threads.StartAll()
threads.JoinAll(watchdog_timer.WatchdogTimer(timeout))
return runners
def _TearDownRunners(runners, timeout=None):
"""Calls TearDown() for each test runner in parallel.
Args:
runners: A list of TestRunner objects.
timeout: Watchdog timeout in seconds, defaults to the default timeout.
"""
threads = reraiser_thread.ReraiserThreadGroup(
[reraiser_thread.ReraiserThread(r.TearDown, name=r.device_serial[-4:])
for r in runners])
threads.StartAll()
threads.JoinAll(watchdog_timer.WatchdogTimer(timeout))
def ApplyMaxPerRun(tests, max_per_run):
"""Rearrange the tests so that no group contains more than max_per_run tests.
Args:
tests:
max_per_run:
Returns:
A list of tests with no more than max_per_run per run.
"""
tests_expanded = []
for test_group in tests:
if type(test_group) != str:
# Do not split test objects which are not strings.
tests_expanded.append(test_group)
else:
test_split = test_group.split(':')
for i in range(0, len(test_split), max_per_run):
tests_expanded.append(':'.join(test_split[i:i+max_per_run]))
return tests_expanded
def RunTests(tests, runner_factory, devices, shard=True,
test_timeout=DEFAULT_TIMEOUT, setup_timeout=DEFAULT_TIMEOUT,
num_retries=2, max_per_run=256):
"""Run all tests on attached devices, retrying tests that don't pass.
Args:
tests: List of tests to run.
runner_factory: Callable that takes a device and index and returns a
TestRunner object.
devices: List of attached devices.
shard: True if we should shard, False if we should replicate tests.
- Sharding tests will distribute tests across all test runners through a
shared test collection.
- Replicating tests will copy all tests to each test runner through a
unique test collection for each test runner.
test_timeout: Watchdog timeout in seconds for running tests.
setup_timeout: Watchdog timeout in seconds for creating and cleaning up
test runners.
num_retries: Number of retries for a test.
max_per_run: Maximum number of tests to run in any group.
Returns:
A tuple of (base_test_result.TestRunResults object, exit code).
"""
if not tests:
logging.critical('No tests to run.')
return (base_test_result.TestRunResults(), constants.ERROR_EXIT_CODE)
tests_expanded = ApplyMaxPerRun(tests, max_per_run)
if shard:
# Generate a shared _TestCollection object for all test runners, so they
# draw from a common pool of tests.
shared_test_collection = _TestCollection([_Test(t) for t in tests_expanded])
test_collection_factory = lambda: shared_test_collection
tag_results_with_device = False
log_string = 'sharded across devices'
else:
# Generate a unique _TestCollection object for each test runner, but use
# the same set of tests.
test_collection_factory = lambda: _TestCollection(
[_Test(t) for t in tests_expanded])
tag_results_with_device = True
log_string = 'replicated on each device'
logging.info('Will run %d tests (%s): %s',
len(tests_expanded), log_string, str(tests_expanded))
runners = _CreateRunners(runner_factory, devices, setup_timeout)
try:
return _RunAllTests(runners, test_collection_factory,
num_retries, test_timeout, tag_results_with_device)
finally:
try:
_TearDownRunners(runners, setup_timeout)
except (device_errors.DeviceUnreachableError,
# TODO(jbudorick) Remove this once the underlying implementations
# for the above are switched or wrapped.
android_commands.errors.DeviceUnresponsiveError) as e:
logging.warning('Device unresponsive during TearDown: [%s]', e)
except Exception as e:
logging.error('Unexpected exception caught during TearDown: %s' % str(e))
| bsd-3-clause |
Manojkumar91/odoo_inresto | addons/point_of_sale/report/account_statement.py | 8 | 1964 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
class account_statement(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(account_statement, self).__init__(cr, uid, name, context=context)
self.total = 0.0
self.localcontext.update({
'time': time,
'get_total': self._get_total,
'get_data': self._get_data,
})
def _get_data(self, statement):
lines = []
for line in statement.line_ids:
lines.append(line)
return lines
def _get_total(self, statement_line_ids):
total = 0.0
for line in statement_line_ids:
total += line.amount
return total
class report_account_statement(osv.AbstractModel):
_name = 'report.point_of_sale.report_statement'
_inherit = 'report.abstract_report'
_template = 'point_of_sale.report_statement'
_wrapped_report_class = account_statement
| agpl-3.0 |
schlueter/ansible | lib/ansible/modules/notification/hall.py | 103 | 3365 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Billy Kimble <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
module: hall
short_description: Send notification to Hall
description:
- "The C(hall) module connects to the U(https://hall.com) messaging API and allows you to deliver notication messages to rooms."
version_added: "2.0"
author: Billy Kimble (@bkimble) <[email protected]>
options:
room_token:
description:
- "Room token provided to you by setting up the Ansible room integation on U(https://hall.com)"
required: true
msg:
description:
- The message you wish to deliver as a notification
required: true
title:
description:
- The title of the message
required: true
picture:
description:
- >
The full URL to the image you wish to use for the Icon of the message. Defaults to
U(http://cdn2.hubspot.net/hub/330046/file-769078210-png/Official_Logos/ansible_logo_black_square_small.png?t=1421076128627)
required: false
"""
EXAMPLES = """
- name: Send Hall notifiation
hall:
room_token: <hall room integration token>
title: Nginx
msg: 'Created virtual host file on {{ inventory_hostname }}'
delegate_to: loclahost
- name: Send Hall notification if EC2 servers were created.
hall:
room_token: <hall room integration token>
title: Server Creation
msg: 'Created instance {{ item.id }} of type {{ item.instance_type }}.\\nInstance can be reached at {{ item.public_ip }} in the {{ item.region }} region.'
delegate_to: loclahost
when: ec2.instances|length > 0
with_items: '{{ ec2.instances }}'
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
HALL_API_ENDPOINT = 'https://hall.com/api/1/services/generic/%s'
def send_request_to_hall(module, room_token, payload):
headers = {'Content-Type': 'application/json'}
payload = module.jsonify(payload)
api_endpoint = HALL_API_ENDPOINT % (room_token)
response, info = fetch_url(module, api_endpoint, data=payload, headers=headers)
if info['status'] != 200:
secure_url = HALL_API_ENDPOINT % ('[redacted]')
module.fail_json(msg=" failed to send %s to %s: %s" % (payload, secure_url, info['msg']))
def main():
module = AnsibleModule(
argument_spec=dict(
room_token=dict(type='str', required=True),
msg=dict(type='str', required=True),
title=dict(type='str', required=True),
picture=dict(type='str',
default='http://cdn2.hubspot.net/hub/330046/file-769078210-png/Official_Logos/ansible_logo_black_square_small.png?t=1421076128627'),
)
)
room_token = module.params['room_token']
message = module.params['msg']
title = module.params['title']
picture = module.params['picture']
payload = {'title': title, 'message': message, 'picture': picture}
send_request_to_hall(module, room_token, payload)
module.exit_json(msg="OK")
if __name__ == '__main__':
main()
| gpl-3.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.